query
stringlengths
7
3.85k
document
stringlengths
11
430k
metadata
dict
negatives
sequencelengths
0
101
negative_scores
sequencelengths
0
101
document_score
stringlengths
3
10
document_rank
stringclasses
102 values
WriteBytesToFile writes a []byte to a specified file on the EC2 instance
func (ins *EC2RemoteClient) WriteBytesToFile(source []byte, destination string) error { err := ins.cmdClient.WriteBytesToFile(source, destination) return err }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func WriteBytes(ctx context.Context, data []byte, filename string) error {\n\tif strings.HasPrefix(filename, \"gs://\") {\n\t\treturn writeGCSObject(ctx, data, filename)\n\t}\n\treturn ioutil.WriteFile(filename, data, os.ModePerm)\n}", "func SaveBytesToFile(content []byte, outputFile string, overwrite bool) error {\n\texists, err := FileExists(outputFile)\n\tif err != nil {\n\t\treturn errors.Wrap(err, fmt.Sprintf(\"error while checking if the output file %q already exists\", outputFile))\n\t}\n\tif exists && !overwrite {\n\t\treturn fmt.Errorf(\"cannot persist the data since the output file %q already exists\", outputFile)\n\t}\n\tparentFolder := filepath.Dir(outputFile)\n\texists, err = DirExists(parentFolder)\n\tif err != nil {\n\t\treturn errors.Wrap(err, fmt.Sprintf(\"error while checking if the parent folder of %q exists\", outputFile))\n\t}\n\tif !exists {\n\t\terr = os.MkdirAll(parentFolder, DefaultPermission)\n\t\tif err != nil {\n\t\t\treturn errors.Wrap(err, fmt.Sprintf(\"failed to create tree structure to %q to store the data\", parentFolder))\n\t\t}\n\t}\n\terr = ioutil.WriteFile(outputFile, content, DefaultPermission)\n\tif err != nil {\n\t\treturn errors.Wrap(err, fmt.Sprintf(\"error while saving the data to file %q\", outputFile))\n\t}\n\treturn nil\n}", "func FileWrite(f *os.File, b []byte) (int, error)", "func WriteBytes(file *os.File, bytes []byte, particularOffset bool, addr int64) {\n\tfmt.Printf(\"%04X\\n\", addr)\n\tvar jmpFileLoc int64\n\tif particularOffset {\n\t\toriginalOffset, _ := file.Seek(0, 1)\n\t\tjmpFileLoc = originalOffset\n\t\tfile.Seek(addr, 0)\n\t}\n\tbytesWritten, err := file.Write(bytes)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tlog.Printf(\"Wrote %d bytes.\\n\", bytesWritten)\n\tif particularOffset {\n\t\tfile.Seek(jmpFileLoc, 0)\n\t}\n}", "func (sshConfig *SSHConfig) CopyBytesToFile(contentBytes []byte, remotePath string, permissions string) (err error) {\n\tbyteReader := bytes.NewReader(contentBytes)\n\terr = sshConfig.Copy(byteReader, remotePath, permissions, int64(len(contentBytes)))\n\treturn\n}", "func WriteStringToFile(filename string, c []uint8) error {\n\tf, err := os.OpenFile(filename, os.O_RDWR|os.O_CREATE, 0755)\n\tCheck(err)\n\tfile, err := f.WriteString(string(c))\n\tCheck(err)\n\tf.Sync()\n\n\tfmt.Printf(\"wrote %d bytes\\n\", file)\n\treturn err\n}", "func BytesToFile(path, name string, data []byte) error {\n\tfilename := filepath.Join(path, name)\n\t_, err := os.Stat(filename)\n\tif err == nil {\n\t\treturn nil\n\t}\n\n\t// Create kubeconfig\n\tf, err := os.Create(filename)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer f.Close()\n\n\t_, err = f.Write(data)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}", "func WriteToFile(fileName string, bytes []byte) error {\n\tf, err := os.Create(fileName)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer func() error {\n\t\terr := f.Close()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\t}()\n\n\t_, errorWrite := f.Write(bytes)\n\tif errorWrite != nil {\n\t\treturn errorWrite\n\t}\n\n\treturn nil\n}", "func WriteBinaryFile(filePath string, contents []byte) error {\n _ = EnsureDir(filepath.Dir(filePath))\n return ioutil.WriteFile(filePath, contents, CreateModePerm)\n}", "func FileWriteAt(f *os.File, b []byte, off int64) (int, error)", "func (fw *FileWrapper) WriteBytes(b []byte) (int, error) {\n\t_, err := fw.SeekRel(0)\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\n\tn, err := fw.Write(b)\n\tif err == nil {\n\t\tfw.pos += int64(n)\n\t}\n\n\treturn n, err\n}", "func writeKeyToFile(keyBytes []byte, saveFileTo string) error {\n\terr := ioutil.WriteFile(saveFileTo, keyBytes, 0666)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}", "func WriteBWByte(ByteSlice [][]byte, fileName string) {\n\tnewfile, err := os.Create(fileName)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tdefer newfile.Close()\n\n\tfor i := 0; i < len(ByteSlice); i++ {\n\t\tnewfile.Write(ByteSlice[i])\n\t\t// newfile.WriteString(\"\\n\")\n\t\tnewfile.Sync()\n\t}\n}", "func (c *digisparkI2cConnection) WriteBytes(buf []byte) error {\n\tc.mtx.Lock()\n\tdefer c.mtx.Unlock()\n\n\tif len(buf) > 32 {\n\t\tbuf = buf[:32]\n\t}\n\n\treturn c.writeAndCheckCount(buf, true)\n}", "func WriteToFile(file *os.File, content string, cursor int64) error {\n\t_, err := file.WriteAt([]byte(content), cursor)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}", "func WriteBinaryFile(filename string, content string) (err error) {\n\tf, err := os.Create(filename)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer f.Close()\n\t_, err = f.WriteString(content)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}", "func writeFile(contents []byte, filename string) {\n\terr := ioutil.WriteFile(filename, contents, 0644)\n\tcheckIfError(err)\n}", "func (e *EndToEndTest) WriteFile(repo string, volume string, filename string, content string) error {\n\tmountpoint, err := e.GetVolumePath(repo, volume)\n\tif err != nil {\n\t\treturn err\n\t}\n\tpath := fmt.Sprintf(\"%s/%s\", mountpoint, filename)\n\treturn exec.Command(\"docker\", \"exec\", e.GetContainer(\"server\"), \"sh\", \"-c\",\n\t\tfmt.Sprintf(\"echo -n \\\"%s\\\" > %s\", content, path)).Run()\n}", "func WriteFileByByte(path string, data []byte, fileMod fs.FileMode, coverage bool) error {\n\tif !coverage {\n\t\texists, err := PathExists(path)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif exists {\n\t\t\treturn fmt.Errorf(\"not coverage, which path exist %v\", path)\n\t\t}\n\t}\n\tparentPath := filepath.Dir(path)\n\tif !PathExistsFast(parentPath) {\n\t\terr := os.MkdirAll(parentPath, fileMod)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"can not WriteFileByByte at new dir at mode: %v , at parent path: %v\", fileMod, parentPath)\n\t\t}\n\t}\n\terr := os.WriteFile(path, data, fileMod)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"write data at path: %v, err: %v\", path, err)\n\t}\n\treturn nil\n}", "func WriteToFile(output string, b bytes.Buffer) error {\n\tf, err := os.Create(output)\n\tb.WriteTo(f)\n\treturn err\n}", "func (c *Client) WriteBytes(ctx context.Context, name string, data []byte) error {\n\treturn c.WriteChunked(ctx, name, chunker.NewFromBlob(data, int(c.ChunkMaxSize)))\n}", "func (v *Vfs) WriteFile(filename string, bytes []byte) error {\n\tif len(bytes) == 0 {\n\t\treturn nil\n\t}\n\n\tvfs := (*C.sqlite3_vfs)(unsafe.Pointer(v))\n\n\tcfilename := C.CString(filename)\n\tdefer C.free(unsafe.Pointer(cfilename))\n\n\tbuf := (*C.uint8_t)(unsafe.Pointer(&bytes[0]))\n\tn := C.size_t(len(bytes))\n\n\trc := C.dqlite_file_write(vfs.zName, cfilename, buf, n)\n\tif rc != 0 {\n\t\treturn Error{Code: int(rc & 0xff)}\n\t}\n\n\treturn nil\n}", "func writeStreamToFile(b *[]byte, path string) int {\n\tf, err := os.Create(path)\n\tlogError(err)\n\tdefer f.Close()\n\n\tbytesWritten, err := f.Write(*b)\n\n\tlogError(err)\n\n\treturn bytesWritten\n}", "func (b *i2cBus) WriteBytes(addr byte, value []byte) error {\n\tb.mu.Lock()\n\tdefer b.mu.Unlock()\n\n\tif err := b.init(); err != nil {\n\t\treturn errors.New(\"could not initialise:\" + err.Error())\n\t}\n\n\tif err := b.setAddress(addr); err != nil {\n\t\treturn errors.New(\"could not set adress:\" + err.Error())\n\t}\n\n\toutbuf := value\n\n\thdrp := (*reflect.SliceHeader)(unsafe.Pointer(&outbuf))\n\n\tvar message i2c_msg\n\tmessage.addr = uint16(addr)\n\tmessage.flags = 0\n\tmessage.len = uint16(len(outbuf))\n\tmessage.buf = uintptr(unsafe.Pointer(&hdrp.Data))\n\n\tvar packets i2c_rdwr_ioctl_data\n\n\tpackets.msgs = uintptr(unsafe.Pointer(&message))\n\tpackets.nmsg = 1\n\n\tif _, _, errno := syscall.Syscall(syscall.SYS_IOCTL, b.file.Fd(), rdrwCmd, uintptr(unsafe.Pointer(&packets))); errno != 0 {\n\t\treturn syscall.Errno(errno)\n\t}\n\n\treturn nil\n}", "func (l *LinuxFileSystem) WriteToFile(filepath string, data []byte) error {\n\tlog.Debugf(\"Writing to file: path='%s', data='%s'\", filepath, data)\n\tf, err := os.OpenFile(filepath, os.O_WRONLY, 0755)\n\tif err != nil {\n\t\treturn err\n\t}\n\tn, err := f.Write(data)\n\tif err == nil && n < len(data) {\n\t\terr = io.ErrShortWrite\n\t}\n\tif err1 := f.Close(); err == nil {\n\t\terr = err1\n\t}\n\treturn err\n}", "func (c *Action) WriteBytes(bytes []byte) error {\n\t_, err := c.ResponseWriter.Write(bytes)\n\tif err != nil {\n\t\tc.App.Server.Logger.Println(\"Error during write: \", err)\n\t}\n\treturn err\n}", "func writeToFile(output string, data []byte) error {\n\toutFile, err := os.Create(output)\n\tif err != nil {\n\t\treturn err\n\t}\n\tbytesOut, err := outFile.Write(data)\n\tif err != nil {\n\t\treturn err\n\t}\n\tfmt.Fprintf(os.Stderr, \"%d bytes written successfully to `%s`\\n\", bytesOut, output)\n\terr = outFile.Close()\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}", "func writeFile(path string, content []byte) error {\n\treturn ioutil.WriteFile(path, content, 0644)\n}", "func i2cWriteBytes(buf []byte) (err error) {\n\ttime.Sleep(1 * time.Millisecond) // By design, must not send more than once every 1Ms\n\treg := make([]byte, 1)\n\treg[0] = byte(len(buf))\n\treg = append(reg, buf...)\n\treturn openI2CPort.device.Tx(reg, nil)\n}", "func BytesToNewFile(fileName string, data []byte) (*os.File, error) {\n\t//Creates File\n\tnewFile, err := os.Create(fileName)\n\tif err != nil {\n\t\treturn newFile, err\n\t}\n\n\tdefer newFile.Close()\n\t//Write to file\n\tnewFile.Write(data)\n\tnewFile.Sync()\n\n\treturn newFile, nil\n}", "func FileToBytes(path, name string) ([]byte, error) {\n\tfilename := filepath.Join(path, name)\n\tfile, err := os.Open(filename)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer file.Close()\n\n\tstats, err := file.Stat()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tdata := make([]byte, stats.Size())\n\n\t_, err = file.Read(data)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn data, nil\n}", "func WriteBytes(buffer []byte, offset int, value []byte) {\n copy(buffer[offset:offset + len(value)], value)\n}", "func writeFile(path string, content []byte) error {\n\tf, err := os.OpenFile(path, os.O_WRONLY, 0755) // TODO mirror permissions\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer f.Close()\n\tf.Write(content)\n\treturn nil\n}", "func FilePutContentWithByte(file string, content []byte) (int, error) {\n\tfs, e := os.Create(file)\n\tif e != nil {\n\t\treturn 0, e\n\t}\n\tdefer fs.Close()\n\treturn fs.Write(content)\n}", "func writeToFile(name string, data []byte) {\n\tf, err := os.Create(name)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tdefer f.Close()\n\n\t_, err = f.Write(data)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n}", "func writeFile(file string, content string) {\n d1 := []byte(content)\n err := ioutil.WriteFile(file, d1, 0644)\n if err!=nil {\n os.Stderr.WriteString(err.Error() + \"\\n\")\n }\n}", "func WriteToFile(filename, content string) {\n\tf, err := os.Create(filename)\n\tMustCheck(err)\n\tdefer CloseFile(f)\n\t_, err = f.WriteString(content)\n\tMustCheck(err)\n}", "func createFile(bytes []byte, filepath string) error {\n\terr := ioutil.WriteFile(filepath, bytes, 0644)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}", "func storeToFile(buf bytes.Buffer, outFileName string) error {\n\n\tfile, err := os.Create(outFileName)\n\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tdefer file.Close()\n\n\tnum, err := file.Write(buf.Bytes())\n\n\tif err != nil {\n\t\tfmt.Println(\"Failed to save \" + outFileName + \" error:\" + err.Error())\n\t\treturn err\n\t}\n\n\tfmt.Println(strconv.Itoa(num) + \" bytes has been saved to \" + outFileName)\n\treturn err\n}", "func FileToBytes(path string) []byte {\n\tdata, err := ioutil.ReadFile(path)\n\tif err != nil {\n\t\tlog.WithFields(log.Fields{\n\t\t\t\"path\": path,\n\t\t}).Fatal(err.Error())\n\t}\n\treturn data\n}", "func writeFile(fileName, data string) error {\n\tf, err := os.Create(fileName)\n\tif err != nil {\n\t\treturn err\n\t}\n\t_, err = f.WriteString(data)\n\tif err != nil {\n\t\treturn err\n\t}\n\tf.Sync()\n\treturn nil\n}", "func write_file(outFile, contents string) error {\n\n\terr := ioutil.WriteFile(outFile, []byte(contents), 0644)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\treturn nil\n}", "func writeFileViaBufio() {\n\tprintTraceAndGap()\n\n\tvar dataToWrite []byte\n\tdataToWrite = readFile()\n\n\tfileHandle, errOpen := os.Open(fileToWrite)\n\tif checkError(errOpen) {\n\t\tfmt.Println(\"Error writing to file: \", errOpen)\n\t\tos.Exit(2)\n\t}\n\n\twriter := bufio.NewWriter(fileHandle)\n\t_, errWrite := writer.Write(dataToWrite)\n\n\tif checkError(errWrite) {\n\t\tfmt.Println(\"Error writing to file: \", errOpen)\n\t\tos.Exit(2)\n\t}\n}", "func (b *i2cBus) WriteByte(addr, value byte) error {\n\tb.mu.Lock()\n\tdefer b.mu.Unlock()\n\n\tif err := b.init(); err != nil {\n\t\treturn err\n\t}\n\n\tif err := b.setAddress(addr); err != nil {\n\t\treturn err\n\t}\n\n\tn, err := b.file.Write([]byte{value})\n\n\tif n != 1 {\n\t\terr = fmt.Errorf(\"i2c: Unexpected number (%v) of bytes written in WriteByte\", n)\n\t}\n\n\treturn err\n}", "func (s storage) WriteFile(path string, data []byte, perm os.FileMode) error {\n\treturn ioutil.WriteFile(path, data, perm)\n}", "func WriteToFile(f string ,d []byte) {\n err := ioutil.WriteFile(f, d, 0644)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n}", "func (sm SectorMap) WriteFile(diskbytes []byte, file byte, contents []byte, overwrite bool) (bool, error) {\n\tsectorsNeeded := (len(contents) + 255) / 256\n\tcts := make([]byte, 256*sectorsNeeded)\n\tcopy(cts, contents)\n\texisting := len(sm.SectorsForFile(file))\n\texisted := existing > 0\n\tfree := sm.FreeSectors() + existing\n\tif free < sectorsNeeded {\n\t\treturn existed, errors.OutOfSpacef(\"file %d requires %d sectors, but only %d are available\", file, sectorsNeeded, free)\n\t}\n\tif existed {\n\t\tif !overwrite {\n\t\t\treturn existed, errors.FileExistsf(\"file %d already exists\", file)\n\t\t}\n\t\tsm.Delete(file)\n\t}\n\n\ti := 0\nOUTER:\n\tfor track := byte(0); track < disk.FloppyTracks; track++ {\n\t\tfor sector := byte(0); sector < disk.FloppySectors; sector++ {\n\t\t\tif sm.FileForSector(track, sector) == FileFree {\n\t\t\t\tif err := disk.WriteSector(diskbytes, track, sector, cts[i*256:(i+1)*256]); err != nil {\n\t\t\t\t\treturn existed, err\n\t\t\t\t}\n\t\t\t\tif err := sm.SetFileForSector(track, sector, file); err != nil {\n\t\t\t\t\treturn existed, err\n\t\t\t\t}\n\t\t\t\ti++\n\t\t\t\tif i == sectorsNeeded {\n\t\t\t\t\tbreak OUTER\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\tif err := sm.Persist(diskbytes); err != nil {\n\t\treturn existed, err\n\t}\n\treturn existed, nil\n}", "func writeFile(store driver.StorageDriver, filepath string, buf []byte) (int, error) {\n\tvar n int\n\n\tif stwr, err := store.Writer(context.Background(), filepath, false); err == nil {\n\t\tdefer stwr.Close()\n\n\t\tif n, err = stwr.Write(buf); err != nil {\n\t\t\treturn -1, err\n\t\t}\n\n\t\tif err := stwr.Commit(); err != nil {\n\t\t\treturn -1, err\n\t\t}\n\t} else {\n\t\treturn -1, err\n\t}\n\n\treturn n, nil\n}", "func WriteNewDataOnFile(path string, data []byte)(err error){\n err = ioutil.WriteFile(path, data, 0644)\n if err != nil {logs.Error(\"Error WriteNewData\"); return err}\n \n return nil\n}", "func PutBytes(ctx context.Context, bs Blobstore, key string, data []byte) (string, error) {\n\treader := bytes.NewReader(data)\n\treturn bs.Put(ctx, key, reader)\n}", "func FileToBytes(filePath string) ([]byte, error) {\n\tbytes, err := ioutil.ReadFile(filePath)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn bytes, nil\n}", "func (sshConfig *SSHConfig) CopyBytesToFileAndVerify(contentBytes []byte, remotePath string, permissions string) (err error) {\n\terr = sshConfig.CopyBytesToFile(contentBytes, remotePath, permissions)\n\tif err == nil {\n\t\tif contents, err1 := sshConfig.ReadBytesFromFile(remotePath); err1 == nil {\n\t\t\tresult := bytes.Compare(contents, contentBytes)\n\t\t\tif result != 0 {\n\t\t\t\terr = errors.New(\"File did not transfer successfully\")\n\t\t\t\treturn\n\t\t\t}\n\t\t} else {\n\t\t\terr = errors.New(\"Couldn't read written file to verify contents\")\n\t\t}\n\t}\n\treturn\n}", "func WriteToFile(file string, line string) error{\n\tos.MkdirAll(path.Dir(file), 0777)\n\n\tf, err:= os.OpenFile(file, os.O_RDWR | os.O_CREATE | os.O_APPEND, 0777)\n\tif err != nil {\n\t\tfmt.Fprintf(os.Stderr, \"WARN: Open file %s failed, %s\\n\", file, err)\n\t\treturn err\n\t}\n\tdefer f.Close()\n\n\tencoder := mahonia.NewEncoder(\"gbk\")\n\twriter := bufio.NewWriter(encoder.NewWriter(f))\n\twriter.WriteString(line + \"\\n\")\n\twriter.Flush()\n\t//io.Copy(writer, strings.NewReader(line))\n\treturn nil\n}", "func WriteToFile(fileName, filedata string){\r\n\terr := ioutil.WriteFile(fileName, []byte(filedata), 0644)\r\n\t\tif err != nil {\r\n\t\t\tlog.Fatal(err)\r\n\t\t}\r\n\t\treturn\r\n}", "func (fs FileSystem) WriteFile(filename string, data []byte, perm os.FileMode) error {\n\tfs.testDriver()\n\tfilename = filepath.FromSlash(filename)\n\treturn fs.drv.WriteFile(filename, data, perm)\n}", "func writeFile(fileName string, data []byte) error {\n\tfile, err := os.Create(fileName)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"File creation failed: %v\", err)\n\t}\n\tdefer file.Close()\n\t_, err = file.Write(data)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Error while writting response to file: %v\", err)\n\t}\n\treturn nil\n}", "func (c1 *Custom6) Write(t *storage.TempFile, data []byte) (numBytes int, err error) {\n\tpaddedBytes := storage.PostFill(data, 8*1024, byte('f'))\n\tif numBytes, err = t.File.Write(paddedBytes); err != nil {\n\t\tfmt.Printf(\"Error in writing byte array to file %s. Finished writing %d bytes\", err, numBytes)\n\t\treturn numBytes, err\n\t}\n\treturn numBytes, nil\n}", "func WriteToFile(filename string, data string) error {\n\tfile, err := os.Create(filename)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer file.Close()\n\n\t_, err = io.WriteString(file, data)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn file.Sync()\n}", "func (c *EzClient) PostFileBytes(url, filepath, formName string, contents []byte, params map[string]string) (int, error, string) {\n\t// Setup body\n\tvar body bytes.Buffer\n\twriter := multipart.NewWriter(&body)\n\n\t// Create file form\n\tpart, err := writer.CreateFormFile(formName, filepath)\n\tif err != nil {\n\t\treturn 0, err, \"\"\n\t}\n\n\tpart.Write(contents)\n\n\t// Add additional params\n\tif params != nil {\n\t\tfor key, value := range params {\n\t\t\tfw, _ := writer.CreateFormField(key)\n\t\t\tfw.Write([]byte(value))\n\t\t}\n\t}\n\twriter.Close()\n\n\treq, err := http.NewRequest(\"POST\", url, &body)\n\treq.Header.Add(\"Content-Type\", writer.FormDataContentType())\n\treq.Close = true\n\n\t// Post the file\n\tresp, err := c.Client.Do(req)\n\tdefer resp.Body.Close()\n\tb, _ := ioutil.ReadAll(resp.Body)\n\n\t// Return status and any error code\n\tif resp.StatusCode > 299 {\n\t\treturn resp.StatusCode, errors.New(string(b)), \"\"\n\t}\n\n\treturn resp.StatusCode, nil, string(b)\n}", "func WriteFile(file string) {\n\tlog.Printf(\"Writing file: %s\", file)\n\n\tfileBytes := []byte(\"foo bar\")\n\n\tif err := ioutil.WriteFile(file, fileBytes, 0644); err != nil {\n\t\tlog.Printf(\"Writing to file failed with error: %v\", err)\n\t\treturn\n\t}\n\tlog.Printf(\"Wrote file: %s\", file)\n}", "func putBytes(log log.T, byteArray []byte, offsetStart int, offsetEnd int, inputBytes []byte) (err error) {\n\tbyteArrayLength := len(byteArray)\n\tif offsetStart > byteArrayLength-1 || offsetEnd > byteArrayLength-1 || offsetStart > offsetEnd || offsetStart < 0 {\n\t\tlog.Error(\"putBytes failed: Offset is invalid.\")\n\t\treturn errors.New(\"Offset is outside the byte array.\")\n\t}\n\n\tif offsetEnd-offsetStart+1 != len(inputBytes) {\n\t\tlog.Error(\"putBytes failed: Not enough space to save the bytes.\")\n\t\treturn errors.New(\"Not enough space to save the bytes.\")\n\t}\n\n\tcopy(byteArray[offsetStart:offsetEnd+1], inputBytes)\n\treturn nil\n}", "func WriteFile(fsys fs.FS, filename string, contents []byte) error {\n\tf, err := Create(fsys, filename)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer f.Close()\n\tif _, err := f.Write(contents); err != nil {\n\t\treturn err\n\t}\n\treturn f.Close()\n}", "func (w *Writer) WriteBytes(v []byte) error {\n\t_, err := w.out.Write(v)\n\treturn err\n}", "func (c1 *Custom1) Write(t *storage.TempFile, data []byte) (numBytes int, err error) {\n\tif numBytes, err = t.File.Write(data); err != nil {\n\t\tfmt.Printf(\"Error in writing byte array to file %s. Finished writing %d bytes\", err, numBytes)\n\t\treturn numBytes, err\n\t}\n\treturn numBytes, nil\n}", "func (key *PrivateKey) WriteToFile(filename string) (err error) {\n\tkeyOut, err := os.OpenFile(filename, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0600)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Failed to open %s for writing: %s\", filename, err)\n\t}\n\tif err := pem.Encode(keyOut, key.pemBlock()); err != nil {\n\t\treturn fmt.Errorf(\"Unable to PEM encode private key: %s\", err)\n\t}\n\tif err := keyOut.Close(); err != nil {\n\t\tlogger.Printf(\"Unable to close file: %v\", err)\n\t}\n\treturn\n}", "func WriteToFile(fileName string, data string) error {\n\tfile, err := os.Create(fileName)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer file.Close()\n\n\t_, err = io.WriteString(file, data)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn file.Sync()\n}", "func (w *Writer) WriteBytes(data []byte) {\n\t// check length\n\tw.checkLength(len(data))\n\n\tcopy(w.buffer[w.index:], data)\n\tw.index += len(data)\n}", "func (h *fs) WriteFile(filename string, data []byte) error {\n\treturn ioutil.WriteFile(filename, data, 0700)\n}", "func writeToFile(file *os.File, data uint32, offset int) {\n\tbuffer := make([]byte, UINT32_LENGTH)\n\tbinary.LittleEndian.PutUint32(buffer, data)\n\tfile.WriteAt(buffer, int64(offset))\n}", "func WriteBytes(w io.Writer, data []byte) error {\n\tsize := uint64(len(data))\n\terr := WriteUint64(w, size)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn WriteFixedBytes(w, data)\n}", "func (writer *Writer) WriteBytes(bytes []byte) {\n\twriter.buf.Write(bytes)\n}", "func WriteToFile(p string, ref name.Reference, img v1.Image, opts ...WriteOption) error {\n\tw, err := os.Create(p)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer w.Close()\n\n\treturn Write(ref, img, w, opts...)\n}", "func (s *Scenario) Write(file string, b []byte) error {\n\treturn ioutil.WriteFile(file, b, 0644)\n}", "func writeToFile(fileLocation string, prometheusData []byte) error {\n\tpathToFile := strings.TrimSuffix(fileLocation, fileLocation[strings.LastIndex(fileLocation, \"/\"):])\n\tif _, err := os.Stat(pathToFile); os.IsNotExist(err) {\n\t\tlog.Fatalln(\"file location is not accessible\")\n\t}\n\n\treturn ioutil.WriteFile(fileLocation, prometheusData, 0644)\n}", "func WriteFile(t *testing.T, path string, data []byte) {\n\texpandedPath := ExpandFilepath(t, path)\n\tif err := ioutil.WriteFile(expandedPath, data, 0755); err != nil {\n\t\tt.Fatalf(\"failed to data to %#v: %v\", path, err)\n\t}\n}", "func (f *SyncFromServerFile) WriteFile(ctx context.Context, fi *dokan.FileInfo, bs []byte, offset int64) (n int, err error) {\n\tf.folder.fs.logEnter(ctx, \"SyncFromServerFile Write\")\n\tdefer func() { f.folder.reportErr(ctx, libkbfs.WriteMode, err) }()\n\tif len(bs) == 0 {\n\t\treturn 0, nil\n\t}\n\tfolderBranch := f.folder.getFolderBranch()\n\tif folderBranch == (data.FolderBranch{}) {\n\t\t// Nothing to do.\n\t\treturn len(bs), nil\n\t}\n\terr = f.folder.fs.config.KBFSOps().SyncFromServer(\n\t\tctx, folderBranch, nil)\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\tf.folder.fs.NotificationGroupWait()\n\treturn len(bs), nil\n}", "func (gps *Device) WriteBytes(bytes []byte) {\n\tif gps.uart != nil {\n\t\tgps.uart.Write(bytes)\n\t} else {\n\t\tgps.bus.Tx(gps.address, []byte{}, bytes)\n\t}\n}", "func (bf NaiveBloomFilter) Write(filename string) error {\n\tvar err error\n\tf, err := os.OpenFile(filename, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0777)\n\tdefer f.Close()\n\tif err != nil {\n\t\treturn err\n\t}\n\tvar b bytes.Buffer\n\tfor i := 0; i < int(bf.size); i++ {\n\t\tbin := fmt.Sprintf(\"%d\", bf.bv[i])\n\t\tb.Write([]byte(bin))\n\t}\n\tbv := b.Bytes()\n\tlog.Println(\"Writing byte vector to file...\")\n\tf.Write(bv)\n\tf.Close()\n\tlog.Printf(\"Successfully wrote bytevector to file: %s\\n\", filename)\n\treturn nil\n}", "func (fsOnDisk) WriteFile(name string, c []byte) error {\n\treturn errors.Wrap(os.WriteFile(name, c, 0666)) //nolint:gosec\n}", "func WriteFile(input []byte, file string, overwrite bool) error {\r\n\texists, err := PathExists(file)\r\n\t// Check access.\r\n\tif err != nil {\r\n\t\treturn fmt.Errorf(\"filehelper.WriteFile: check access - %s\", err.Error())\r\n\t}\r\n\t// Check if exists and if overwrite is set.\r\n\tif exists && !overwrite {\r\n\t\treturn fmt.Errorf(\"filehelper.WriteFile: %s exists and overwrite is not set\", file)\r\n\t}\r\n\t// Open/Create file.\r\n\tf, err := os.Create(file)\r\n\tif err != nil {\r\n\t\treturn fmt.Errorf(\"filehelper.WriteFile: create file - %s\", err.Error())\r\n\t}\r\n\tdefer f.Close()\r\n\t// Write to file.\r\n\tn, err := f.Write(input)\r\n\tif err != nil {\r\n\t\treturn fmt.Errorf(\"filehelper.WriteFile: write to file - %s\", err.Error())\r\n\t}\r\n\t// Check if all input was written.\r\n\tif n != len(input) {\r\n\t\treturn fmt.Errorf(\"filehelper.WriteFile: only %d bytes written out of %d\", n, len(input))\r\n\t}\r\n\treturn nil\r\n}", "func (i *ImageBuf) WriteFile(filepath, fileformat string) error {\n\tret := i.WriteFileProgress(filepath, fileformat, nil)\n\truntime.KeepAlive(i)\n\treturn ret\n}", "func (c *keyPair) WriteFile(certPath, keyPath, pwd string) error {\n\tbytesPEM, pkBytesPEM, err := c.EncodeToMemory(pwd)\n\tif err != nil {\n\t\treturn trace.Wrap(err)\n\t}\n\n\terr = os.WriteFile(certPath, bytesPEM, perms)\n\tif err != nil {\n\t\treturn trace.Wrap(err)\n\t}\n\n\terr = os.WriteFile(keyPath, pkBytesPEM, perms)\n\tif err != nil {\n\t\treturn trace.Wrap(err)\n\t}\n\n\treturn nil\n}", "func writeToFile(fileName string, content string) error {\n\n\tlog.Printf(\"Writing '%s' into file %s\", content, fileName)\n\n\t// try opening the file\n\tf, err := os.OpenFile(fileName, os.O_WRONLY, os.ModePerm)\n\tif err != nil {\n\t\tlog.Printf(\"Error by opening %s: %v\", fileName, err)\n\t\treturn err\n\t}\n\tdefer f.Close()\n\n\t// write to file\n\t_, err = f.Write([]byte(content))\n\tif err != nil {\n\t\tlog.Printf(\"Error by writing to %s: %v\", fileName, err)\n\t\treturn err\n\t}\n\n\treturn nil\n}", "func FileAppendContentWithByte(file string, content []byte) (int, error) {\n\n\tf, err := os.OpenFile(file, os.O_CREATE|os.O_APPEND|os.O_WRONLY, 0666)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\tdefer f.Close()\n\treturn f.Write(content)\n}", "func (dm *dataManager) writeByte(address uint, b byte) (err ProcessException) {\n\tdata := []byte{b}\n\n\terr = dm.process.WriteBytes(address, data)\n\n\treturn\n}", "func writeToDisk(data []byte, directory, filename string) {\n\tos.MkdirAll(directory, os.ModePerm)\n\tpath := directory + filename\n\tf, err := os.Create(path)\n\tcommon.CheckError(err)\n\tdefer f.Close()\n\t_, err = f.Write(data)\n\tcommon.CheckRead(err)\n\tf.Sync()\n}", "func WriteFile(input interface{}, filename string) error {\n\tvar (\n\t\tbytes []byte\n\t\terr error\n\t)\n\tif bytes, err = Write(input); err != nil {\n\t\treturn err\n\t}\n\treturn ioutil.WriteFile(filename, bytes, 0666)\n}", "func (s *AzureBlobStorage) WriteFile(ctx context.Context, name string, data []byte) error {\n\tclient := s.containerClient.NewBlockBlobClient(s.withPrefix(name))\n\t// the encryption scope/key and the access tier can not be both in the HTTP headers\n\toptions := &blockblob.UploadBufferOptions{\n\t\tCPKScopeInfo: s.cpkScope,\n\t\tCPKInfo: s.cpkInfo,\n\t}\n\n\tif len(s.accessTier) > 0 {\n\t\toptions.AccessTier = &s.accessTier\n\t}\n\t_, err := client.UploadBuffer(ctx, data, options)\n\tif err != nil {\n\t\treturn errors.Annotatef(err, \"Failed to write azure blob file, file info: bucket(container)='%s', key='%s'\", s.options.Bucket, s.withPrefix(name))\n\t}\n\treturn nil\n}", "func (r *RealRunner) WriteFile(path, contents string) error {\n\t// Note: os.Create uses mode=0666.\n\treturn ioutil.WriteFile(path, []byte(contents), 0666)\n}", "func (s *Statistic) SaveToDiskFile(outPath string) error {\n\toutMap := map[string]interface{}{\n\t\t\"PeerId\": s.PeerId,\n\t\t\"NumBlockSend\": s.NumBlockSend,\n\t\t\"NumBlockRecv\":s.NumBlockRecv,\n\t\t\"NumDupBlock\": s.NumDupBlock,\n\t}\n\tjs := utils.Map2json(outMap)\n\t_, err := utils.WriteBytes(outPath, []byte(js))\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}", "func (w *Writer) WriteBytes(bytes []byte) {\n\tsetWriterRef(w, nil, nil)\n\twriteBytes(w, bytes)\n}", "func FileWriteString(f *os.File, s string) (int, error)", "func WriteTextFile(filePath, contents string) error {\n return WriteBinaryFile(filePath, []byte(contents))\n}", "func WriteByte(buffer []byte, offset int, value byte) {\n buffer[offset] = value\n}", "func WriteFile(filename string, data []byte, perm os.FileMode) error {\n\tf, err := FS.OpenFile(CTX, filename, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, perm)\n\tif err != nil {\n\t\treturn err\n\t}\n\tn, err := f.Write(data)\n\tif err == nil && n < len(data) {\n\t\terr = io.ErrShortWrite\n\t}\n\tif err1 := f.Close(); err == nil {\n\t\terr = err1\n\t}\n\treturn err\n}", "func WriteFile(filename string, data []byte, perm os.FileMode) error {\n\tf, err := FS.OpenFile(CTX, filename, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, perm)\n\tif err != nil {\n\t\treturn err\n\t}\n\tn, err := f.Write(data)\n\tif err == nil && n < len(data) {\n\t\terr = io.ErrShortWrite\n\t}\n\tif err1 := f.Close(); err == nil {\n\t\terr = err1\n\t}\n\treturn err\n}", "func WriteFile(filename string, data []byte, perm os.FileMode) error {\n\tf, err := FS.OpenFile(CTX, filename, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, perm)\n\tif err != nil {\n\t\treturn err\n\t}\n\tn, err := f.Write(data)\n\tif err == nil && n < len(data) {\n\t\terr = io.ErrShortWrite\n\t}\n\tif err1 := f.Close(); err == nil {\n\t\terr = err1\n\t}\n\treturn err\n}", "func WriteFile(filename string, data []byte, perm os.FileMode) error {\n\tf, err := FS.OpenFile(CTX, filename, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, perm)\n\tif err != nil {\n\t\treturn err\n\t}\n\tn, err := f.Write(data)\n\tif err == nil && n < len(data) {\n\t\terr = io.ErrShortWrite\n\t}\n\tif err1 := f.Close(); err == nil {\n\t\terr = err1\n\t}\n\treturn err\n}", "func WriteFile(filename string, data []byte, perm os.FileMode) error {\n\tf, err := FS.OpenFile(CTX, filename, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, perm)\n\tif err != nil {\n\t\treturn err\n\t}\n\tn, err := f.Write(data)\n\tif err == nil && n < len(data) {\n\t\terr = io.ErrShortWrite\n\t}\n\tif err1 := f.Close(); err == nil {\n\t\terr = err1\n\t}\n\treturn err\n}", "func (s Seed) WriteToFile(path string, overwrite bool) error {\n\t// If file does not exist or overwrite is true\n\tif _, err := os.Stat(path); os.IsNotExist(err) || overwrite {\n\t\treturn ioutil.WriteFile(path, s.EncodeHex(), 0600)\n\t}\n\n\treturn fmt.Errorf(\"file %s exists. No ovewrite action will be performed\", path)\n}" ]
[ "0.70434415", "0.6866407", "0.6832017", "0.68032885", "0.66986525", "0.6693543", "0.65425426", "0.64673275", "0.6426073", "0.6332181", "0.6302297", "0.6260653", "0.6243535", "0.6230753", "0.61927813", "0.6178638", "0.61687887", "0.61561227", "0.61376625", "0.61336935", "0.61236864", "0.61047894", "0.609229", "0.60854006", "0.5996963", "0.59904313", "0.59699196", "0.59442025", "0.59140444", "0.5908726", "0.5895567", "0.587794", "0.5844304", "0.5841857", "0.58347714", "0.58105993", "0.5792668", "0.57900476", "0.5752492", "0.5734166", "0.57271236", "0.5716593", "0.57163364", "0.5713512", "0.56984496", "0.5681783", "0.5677291", "0.56661505", "0.5644483", "0.5629296", "0.56214094", "0.56141883", "0.56021106", "0.55968654", "0.55919653", "0.55885935", "0.55778384", "0.5577176", "0.5572538", "0.5571391", "0.5570429", "0.5556461", "0.5553488", "0.5545993", "0.55451626", "0.55365604", "0.55341685", "0.5526017", "0.5522357", "0.5520886", "0.5518181", "0.55163413", "0.5508283", "0.5506208", "0.550216", "0.54971564", "0.54933655", "0.54750806", "0.5473083", "0.54607934", "0.54490167", "0.5444774", "0.5426858", "0.5418737", "0.54171413", "0.54007405", "0.539285", "0.5390258", "0.5381947", "0.5379492", "0.5379404", "0.5379367", "0.5376953", "0.53741944", "0.53669554", "0.53669554", "0.53669554", "0.53669554", "0.53669554", "0.5362873" ]
0.80710757
0
Deprecated: Use SecretVersion_State.Descriptor instead.
func (SecretVersion_State) EnumDescriptor() ([]byte, []int) { return file_google_cloud_secrets_v1beta1_resources_proto_rawDescGZIP(), []int{1, 0} }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (*SecretVersion) Descriptor() ([]byte, []int) {\n\treturn file_google_cloud_secrets_v1beta1_resources_proto_rawDescGZIP(), []int{1}\n}", "func (*Secret) Descriptor() ([]byte, []int) {\n\treturn file_pkg_flow_grpc_secrets_proto_rawDescGZIP(), []int{0}\n}", "func (*SetStateSpec) Descriptor() ([]byte, []int) {\n\treturn file_org_apache_beam_model_pipeline_v1_beam_runner_api_proto_rawDescGZIP(), []int{16}\n}", "func (Instance_State) EnumDescriptor() ([]byte, []int) {\n\treturn file_google_spanner_admin_instance_v1_spanner_instance_admin_proto_rawDescGZIP(), []int{2, 0}\n}", "func (InstanceConfig_State) EnumDescriptor() ([]byte, []int) {\n\treturn file_google_spanner_admin_instance_v1_spanner_instance_admin_proto_rawDescGZIP(), []int{1, 1}\n}", "func (*SetStateRequest) Descriptor() ([]byte, []int) {\n\treturn file_vm_vm_proto_rawDescGZIP(), []int{3}\n}", "func (*SecretEdge) Descriptor() ([]byte, []int) {\n\treturn file_pkg_flow_grpc_secrets_proto_rawDescGZIP(), []int{1}\n}", "func (*StateVersionInfo) Descriptor() ([]byte, []int) {\n\treturn file_metastateService_proto_rawDescGZIP(), []int{7}\n}", "func (*Secret) Descriptor() ([]byte, []int) {\n\treturn file_google_cloud_secrets_v1beta1_resources_proto_rawDescGZIP(), []int{0}\n}", "func (*SetSecretRequest) Descriptor() ([]byte, []int) {\n\treturn file_pkg_flow_grpc_secrets_proto_rawDescGZIP(), []int{5}\n}", "func (*SetSecretResponse) Descriptor() ([]byte, []int) {\n\treturn file_pkg_flow_grpc_secrets_proto_rawDescGZIP(), []int{6}\n}", "func (*Secrets) Descriptor() ([]byte, []int) {\n\treturn file_pkg_flow_grpc_secrets_proto_rawDescGZIP(), []int{2}\n}", "func (*DeleteSecretRequest) Descriptor() ([]byte, []int) {\n\treturn file_pkg_flow_grpc_secrets_proto_rawDescGZIP(), []int{7}\n}", "func (*State) Descriptor() ([]byte, []int) {\n\treturn file_api_protoc_entity_state_message_proto_rawDescGZIP(), []int{0}\n}", "func (*SetStateResponse) Descriptor() ([]byte, []int) {\n\treturn file_vm_vm_proto_rawDescGZIP(), []int{4}\n}", "func (InstanceStatus_InstanceState) EnumDescriptor() ([]byte, []int) {\n\treturn file_cloudprovider_externalgrpc_protos_externalgrpc_proto_rawDescGZIP(), []int{29, 0}\n}", "func (*StateSpec) Descriptor() ([]byte, []int) {\n\treturn file_org_apache_beam_model_pipeline_v1_beam_runner_api_proto_rawDescGZIP(), []int{9}\n}", "func (*SecretPayload) Descriptor() ([]byte, []int) {\n\treturn file_google_cloud_secrets_v1beta1_resources_proto_rawDescGZIP(), []int{3}\n}", "func (*Secret) Descriptor() ([]byte, []int) {\n\treturn file_yandex_cloud_datatransfer_v1_endpoint_common_proto_rawDescGZIP(), []int{1}\n}", "func (StateOptions_StateConsistency) EnumDescriptor() ([]byte, []int) {\n\treturn file_runtime_proto_rawDescGZIP(), []int{37, 1}\n}", "func (*GenesisResponse_AppState_Version) Descriptor() ([]byte, []int) {\n\treturn file_resources_proto_rawDescGZIP(), []int{28, 1, 12}\n}", "func (Artifact_State) EnumDescriptor() ([]byte, []int) {\n\treturn file_ml_metadata_proto_metadata_store_proto_rawDescGZIP(), []int{2, 0}\n}", "func (*GetStateRequest) Descriptor() ([]byte, []int) {\n\treturn file_runtime_proto_rawDescGZIP(), []int{27}\n}", "func (*SecretsRequest) Descriptor() ([]byte, []int) {\n\treturn file_pkg_flow_grpc_secrets_proto_rawDescGZIP(), []int{3}\n}", "func (*DeviceStateGetReply_DeviceState) Descriptor() ([]byte, []int) {\n\treturn file_api_worker_v1_device_state_proto_rawDescGZIP(), []int{7, 0}\n}", "func (CommonMetadata_State) EnumDescriptor() ([]byte, []int) {\n\treturn file_google_datastore_admin_v1beta1_datastore_admin_proto_rawDescGZIP(), []int{0, 0}\n}", "func (Friend_State) EnumDescriptor() ([]byte, []int) {\n\treturn file_api_proto_rawDescGZIP(), []int{37, 0}\n}", "func (*GetStateSummaryRequest) Descriptor() ([]byte, []int) {\n\treturn file_vm_vm_proto_rawDescGZIP(), []int{43}\n}", "func (*DeleteStateRequest) Descriptor() ([]byte, []int) {\n\treturn file_runtime_proto_rawDescGZIP(), []int{32}\n}", "func (Execution_State) EnumDescriptor() ([]byte, []int) {\n\treturn file_ml_metadata_proto_metadata_store_proto_rawDescGZIP(), []int{5, 0}\n}", "func (State) EnumDescriptor() ([]byte, []int) {\n\treturn file_vm_vm_proto_rawDescGZIP(), []int{0}\n}", "func (*SecretsResponse) Descriptor() ([]byte, []int) {\n\treturn file_pkg_flow_grpc_secrets_proto_rawDescGZIP(), []int{4}\n}", "func (PortStimulus_State) EnumDescriptor() ([]byte, []int) {\n\treturn file_testvector_tv_proto_rawDescGZIP(), []int{5, 0}\n}", "func (*GetStateSummaryResponse) Descriptor() ([]byte, []int) {\n\treturn file_vm_vm_proto_rawDescGZIP(), []int{44}\n}", "func (*StateOptions) Descriptor() ([]byte, []int) {\n\treturn file_runtime_proto_rawDescGZIP(), []int{37}\n}", "func (*DeviceStateGetRequest) Descriptor() ([]byte, []int) {\n\treturn file_api_worker_v1_device_state_proto_rawDescGZIP(), []int{6}\n}", "func (SilenceRequest_SilenceState) EnumDescriptor() ([]byte, []int) {\n\treturn file_githubcard_proto_rawDescGZIP(), []int{5, 0}\n}", "func (Todo_State) EnumDescriptor() ([]byte, []int) {\n\treturn file_todo_proto_rawDescGZIP(), []int{0, 0}\n}", "func (*UpdateStateRequest) Descriptor() ([]byte, []int) {\n\treturn file_grpc_user_proto_rawDescGZIP(), []int{22}\n}", "func (*GetStateResponse) Descriptor() ([]byte, []int) {\n\treturn file_runtime_proto_rawDescGZIP(), []int{31}\n}", "func (x *fastReflection_GenesisState) Descriptor() protoreflect.MessageDescriptor {\n\treturn md_GenesisState\n}", "func (x *fastReflection_GenesisState) Descriptor() protoreflect.MessageDescriptor {\n\treturn md_GenesisState\n}", "func (FeaturestoreMonitoringConfig_ImportFeaturesAnalysis_State) EnumDescriptor() ([]byte, []int) {\n\treturn file_google_cloud_aiplatform_v1_featurestore_monitoring_proto_rawDescGZIP(), []int{0, 1, 0}\n}", "func (Environment_State) EnumDescriptor() ([]byte, []int) {\n\treturn file_google_cloud_dialogflow_v2_environment_proto_rawDescGZIP(), []int{0, 0}\n}", "func (*S2C_TokenUpdate) Descriptor() ([]byte, []int) {\n\treturn file_global_token_proto_rawDescGZIP(), []int{3}\n}", "func (*Deprecation) Descriptor() ([]byte, []int) {\n\treturn file_external_cfgmgmt_response_nodes_proto_rawDescGZIP(), []int{8}\n}", "func (*StateRequest) Descriptor() ([]byte, []int) {\n\treturn file_github_com_containerd_containerd_runtime_v1_shim_v1_shim_proto_rawDescGZIP(), []int{7}\n}", "func (GroupUserList_GroupUser_State) EnumDescriptor() ([]byte, []int) {\n\treturn file_api_proto_rawDescGZIP(), []int{43, 0, 0}\n}", "func (*GenesisResponse_AppState_UpdateVote) Descriptor() ([]byte, []int) {\n\treturn file_resources_proto_rawDescGZIP(), []int{28, 1, 11}\n}", "func (PolicyState) EnumDescriptor() ([]byte, []int) {\n\treturn file_management_proto_rawDescGZIP(), []int{9}\n}", "func (PolicyState) EnumDescriptor() ([]byte, []int) {\n\treturn file_management_proto_rawDescGZIP(), []int{7}\n}", "func (OnSellState) EnumDescriptor() ([]byte, []int) {\n\treturn file_api_proto_global_Global_proto_rawDescGZIP(), []int{16}\n}", "func (DeviceState) EnumDescriptor() ([]byte, []int) {\n\treturn file_src_nap_nap_proto_rawDescGZIP(), []int{2}\n}", "func (*StateResponse) Descriptor() ([]byte, []int) {\n\treturn file_github_com_containerd_containerd_runtime_v1_shim_v1_shim_proto_rawDescGZIP(), []int{8}\n}", "func (*PasswordLockoutPolicyUpdate) Descriptor() ([]byte, []int) {\n\treturn file_management_proto_rawDescGZIP(), []int{46}\n}", "func (SegmentState) EnumDescriptor() ([]byte, []int) {\n\treturn file_go_chromium_org_luci_analysis_internal_changepoints_proto_output_buffer_proto_rawDescGZIP(), []int{0}\n}", "func (*ClientSecret) Descriptor() ([]byte, []int) {\n\treturn file_management_proto_rawDescGZIP(), []int{100}\n}", "func (*ClientSecret) Descriptor() ([]byte, []int) {\n\treturn file_management_proto_rawDescGZIP(), []int{103}\n}", "func (*DeviceStateRefreshRequest) Descriptor() ([]byte, []int) {\n\treturn file_api_worker_v1_device_state_proto_rawDescGZIP(), []int{4}\n}", "func (StateOptions_StateConcurrency) EnumDescriptor() ([]byte, []int) {\n\treturn file_runtime_proto_rawDescGZIP(), []int{37, 0}\n}", "func (*UpdateStateResponse) Descriptor() ([]byte, []int) {\n\treturn file_proto_micro_mall_logistics_proto_logistics_business_logistics_business_proto_rawDescGZIP(), []int{2}\n}", "func (State) EnumDescriptor() ([]byte, []int) {\n\treturn file_pkg_broker_config_targets_proto_rawDescGZIP(), []int{0}\n}", "func (Execution_State) EnumDescriptor() ([]byte, []int) {\n\treturn file_google_cloud_notebooks_v1_execution_proto_rawDescGZIP(), []int{1, 0}\n}", "func (*MapStateSpec) Descriptor() ([]byte, []int) {\n\treturn file_org_apache_beam_model_pipeline_v1_beam_runner_api_proto_rawDescGZIP(), []int{14}\n}", "func (Federation_State) EnumDescriptor() ([]byte, []int) {\n\treturn file_google_cloud_metastore_v1_metastore_federation_proto_rawDescGZIP(), []int{0, 0}\n}", "func (*UpdateStateRequest) Descriptor() ([]byte, []int) {\n\treturn file_proto_micro_mall_logistics_proto_logistics_business_logistics_business_proto_rawDescGZIP(), []int{1}\n}", "func (DatabaseInstance_SqlInstanceState) EnumDescriptor() ([]byte, []int) {\n\treturn file_google_cloud_sql_v1beta4_cloud_sql_resources_proto_rawDescGZIP(), []int{14, 0}\n}", "func (*CNETMsg_SignonState) Descriptor() ([]byte, []int) {\n\treturn file_artifact_networkbasetypes_proto_rawDescGZIP(), []int{11}\n}", "func (*CNETMsg_SignonState) Descriptor() ([]byte, []int) {\n\treturn file_csgo_netmessages_proto_rawDescGZIP(), []int{6}\n}", "func (*State) Descriptor() ([]byte, []int) {\n\treturn file_orc8r_protos_mconfig_mconfigs_proto_rawDescGZIP(), []int{8}\n}", "func (OperationMetadata_State) EnumDescriptor() ([]byte, []int) {\n\treturn file_google_cloud_datacatalog_lineage_v1_lineage_proto_rawDescGZIP(), []int{5, 0}\n}", "func (CMsgArcanaVotes_VotingState) EnumDescriptor() ([]byte, []int) {\n\treturn file_dota_gcmessages_client_proto_rawDescGZIP(), []int{191, 0}\n}", "func (*UeContext) Descriptor() ([]byte, []int) {\n\treturn file_lte_protos_oai_mme_nas_state_proto_rawDescGZIP(), []int{5}\n}", "func (*DeviceStateGetReply) Descriptor() ([]byte, []int) {\n\treturn file_api_worker_v1_device_state_proto_rawDescGZIP(), []int{7}\n}", "func (TargetState_Status) EnumDescriptor() ([]byte, []int) {\n\treturn file_yandex_cloud_loadbalancer_v1_network_load_balancer_proto_rawDescGZIP(), []int{3, 0}\n}", "func (*TargetState) Descriptor() ([]byte, []int) {\n\treturn file_yandex_cloud_loadbalancer_v1_network_load_balancer_proto_rawDescGZIP(), []int{3}\n}", "func (*StateDetail) Descriptor() ([]byte, []int) {\n\treturn file_response_proto_rawDescGZIP(), []int{8}\n}", "func (*CNETMsg_SignonState) Descriptor() ([]byte, []int) {\n\treturn file_netmessages_proto_rawDescGZIP(), []int{6}\n}", "func (*ReadModifyWriteStateSpec) Descriptor() ([]byte, []int) {\n\treturn file_org_apache_beam_model_pipeline_v1_beam_runner_api_proto_rawDescGZIP(), []int{10}\n}", "func (*UpdateTokenRequest) Descriptor() ([]byte, []int) {\n\treturn file_access_service_token_proto_rawDescGZIP(), []int{2}\n}", "func (ConstantSampler_ConstantDecision) EnumDescriptor() ([]byte, []int) {\n\treturn file_opencensus_proto_trace_v1_trace_config_proto_rawDescGZIP(), []int{2, 0}\n}", "func (*MigrationStateEvent) Descriptor() ([]byte, []int) {\n\treturn file_google_datastore_admin_v1_migration_proto_rawDescGZIP(), []int{0}\n}", "func (*MembershipState) Descriptor() ([]byte, []int) {\n\treturn file_google_cloud_gkehub_v1_membership_proto_rawDescGZIP(), []int{7}\n}", "func (*PasswordComplexityPolicyUpdate) Descriptor() ([]byte, []int) {\n\treturn file_management_proto_rawDescGZIP(), []int{38}\n}", "func (CBroadcast_BroadcastViewerState_Notification_EViewerState) EnumDescriptor() ([]byte, []int) {\n\treturn file_steammessages_broadcast_steamclient_proto_rawDescGZIP(), []int{64, 0}\n}", "func (*StreamingStateChange) Descriptor() ([]byte, []int) {\n\treturn file_google_cloud_video_livestream_logging_v1_logs_proto_rawDescGZIP(), []int{1}\n}", "func (UserGroupList_UserGroup_State) EnumDescriptor() ([]byte, []int) {\n\treturn file_api_proto_rawDescGZIP(), []int{91, 0, 0}\n}", "func (State) EnumDescriptor() ([]byte, []int) {\n\treturn file_determined_trial_v1_trial_proto_rawDescGZIP(), []int{0}\n}", "func (StandardPTransforms_DeprecatedPrimitives) EnumDescriptor() ([]byte, []int) {\n\treturn file_org_apache_beam_model_pipeline_v1_beam_runner_api_proto_rawDescGZIP(), []int{4, 1}\n}", "func (ConstantSampler_ConstantDecision) EnumDescriptor() ([]byte, []int) {\n\treturn file_github_com_solo_io_gloo_projects_gloo_api_external_envoy_config_trace_v3_opencensus_proto_rawDescGZIP(), []int{3, 0}\n}", "func (*CredentialsKVProto) Descriptor() ([]byte, []int) {\n\treturn file_Security_proto_rawDescGZIP(), []int{1}\n}", "func (ReferenceTransactionHookRequest_State) EnumDescriptor() ([]byte, []int) {\n\treturn file_hook_proto_rawDescGZIP(), []int{6, 0}\n}", "func (NodeState) EnumDescriptor() ([]byte, []int) {\n\treturn file_protobuf_clusrun_proto_rawDescGZIP(), []int{0}\n}", "func (x *fastReflection_GenesisState) Get(descriptor protoreflect.FieldDescriptor) protoreflect.Value {\n\tswitch descriptor.FullName() {\n\tcase \"cosmos.distribution.v1beta1.GenesisState.params\":\n\t\tvalue := x.Params\n\t\treturn protoreflect.ValueOfMessage(value.ProtoReflect())\n\tcase \"cosmos.distribution.v1beta1.GenesisState.fee_pool\":\n\t\tvalue := x.FeePool\n\t\treturn protoreflect.ValueOfMessage(value.ProtoReflect())\n\tcase \"cosmos.distribution.v1beta1.GenesisState.delegator_withdraw_infos\":\n\t\tif len(x.DelegatorWithdrawInfos) == 0 {\n\t\t\treturn protoreflect.ValueOfList(&_GenesisState_3_list{})\n\t\t}\n\t\tlistValue := &_GenesisState_3_list{list: &x.DelegatorWithdrawInfos}\n\t\treturn protoreflect.ValueOfList(listValue)\n\tcase \"cosmos.distribution.v1beta1.GenesisState.previous_proposer\":\n\t\tvalue := x.PreviousProposer\n\t\treturn protoreflect.ValueOfString(value)\n\tcase \"cosmos.distribution.v1beta1.GenesisState.outstanding_rewards\":\n\t\tif len(x.OutstandingRewards) == 0 {\n\t\t\treturn protoreflect.ValueOfList(&_GenesisState_5_list{})\n\t\t}\n\t\tlistValue := &_GenesisState_5_list{list: &x.OutstandingRewards}\n\t\treturn protoreflect.ValueOfList(listValue)\n\tcase \"cosmos.distribution.v1beta1.GenesisState.validator_accumulated_commissions\":\n\t\tif len(x.ValidatorAccumulatedCommissions) == 0 {\n\t\t\treturn protoreflect.ValueOfList(&_GenesisState_6_list{})\n\t\t}\n\t\tlistValue := &_GenesisState_6_list{list: &x.ValidatorAccumulatedCommissions}\n\t\treturn protoreflect.ValueOfList(listValue)\n\tcase \"cosmos.distribution.v1beta1.GenesisState.validator_historical_rewards\":\n\t\tif len(x.ValidatorHistoricalRewards) == 0 {\n\t\t\treturn protoreflect.ValueOfList(&_GenesisState_7_list{})\n\t\t}\n\t\tlistValue := &_GenesisState_7_list{list: &x.ValidatorHistoricalRewards}\n\t\treturn protoreflect.ValueOfList(listValue)\n\tcase \"cosmos.distribution.v1beta1.GenesisState.validator_current_rewards\":\n\t\tif len(x.ValidatorCurrentRewards) == 0 {\n\t\t\treturn protoreflect.ValueOfList(&_GenesisState_8_list{})\n\t\t}\n\t\tlistValue := &_GenesisState_8_list{list: &x.ValidatorCurrentRewards}\n\t\treturn protoreflect.ValueOfList(listValue)\n\tcase \"cosmos.distribution.v1beta1.GenesisState.delegator_starting_infos\":\n\t\tif len(x.DelegatorStartingInfos) == 0 {\n\t\t\treturn protoreflect.ValueOfList(&_GenesisState_9_list{})\n\t\t}\n\t\tlistValue := &_GenesisState_9_list{list: &x.DelegatorStartingInfos}\n\t\treturn protoreflect.ValueOfList(listValue)\n\tcase \"cosmos.distribution.v1beta1.GenesisState.validator_slash_events\":\n\t\tif len(x.ValidatorSlashEvents) == 0 {\n\t\t\treturn protoreflect.ValueOfList(&_GenesisState_10_list{})\n\t\t}\n\t\tlistValue := &_GenesisState_10_list{list: &x.ValidatorSlashEvents}\n\t\treturn protoreflect.ValueOfList(listValue)\n\tdefault:\n\t\tif descriptor.IsExtension() {\n\t\t\tpanic(fmt.Errorf(\"proto3 declared messages do not support extensions: cosmos.distribution.v1beta1.GenesisState\"))\n\t\t}\n\t\tpanic(fmt.Errorf(\"message cosmos.distribution.v1beta1.GenesisState does not contain field %s\", descriptor.FullName()))\n\t}\n}", "func (*AlertingCondition_State) Descriptor() ([]byte, []int) {\n\treturn edgelq_monitoring_proto_v3_alerting_condition_proto_rawDescGZIP(), []int{0, 1}\n}", "func (*OrderedListStateSpec) Descriptor() ([]byte, []int) {\n\treturn file_org_apache_beam_model_pipeline_v1_beam_runner_api_proto_rawDescGZIP(), []int{12}\n}", "func (Issue_IssueState) EnumDescriptor() ([]byte, []int) {\n\treturn file_githubcard_proto_rawDescGZIP(), []int{3, 0}\n}", "func (PeerState) EnumDescriptor() ([]byte, []int) {\n\treturn file_raft_serverpb_raft_serverpb_proto_rawDescGZIP(), []int{0}\n}", "func (Run_State) EnumDescriptor() ([]byte, []int) {\n\treturn file_google_cloud_datacatalog_lineage_v1_lineage_proto_rawDescGZIP(), []int{1, 0}\n}", "func (AppState) EnumDescriptor() ([]byte, []int) {\n\treturn file_management_proto_rawDescGZIP(), []int{18}\n}" ]
[ "0.64241785", "0.62223697", "0.62026495", "0.61575454", "0.6137586", "0.60295725", "0.60001373", "0.5997433", "0.5975869", "0.5968872", "0.59638256", "0.59485865", "0.5922023", "0.58976424", "0.58952713", "0.5827793", "0.5794404", "0.5776734", "0.57455724", "0.57246864", "0.5697943", "0.5647782", "0.56453514", "0.56443983", "0.5637351", "0.5632583", "0.5619575", "0.5614686", "0.5610116", "0.56057423", "0.55976087", "0.5588728", "0.55830145", "0.557704", "0.5572252", "0.55682904", "0.5563745", "0.55430233", "0.5529006", "0.55284375", "0.5522907", "0.5522907", "0.55189866", "0.5518332", "0.55141157", "0.5512986", "0.55115634", "0.5510624", "0.5508735", "0.55059564", "0.5498005", "0.5467834", "0.54606384", "0.54598373", "0.54520285", "0.5451584", "0.54490995", "0.5444143", "0.5442326", "0.54270846", "0.54253864", "0.5410951", "0.54103374", "0.54100776", "0.54057455", "0.5405277", "0.5404772", "0.5401212", "0.5400798", "0.5390612", "0.5389606", "0.53857464", "0.5385634", "0.53827524", "0.53611445", "0.5360505", "0.5348279", "0.53291935", "0.53223425", "0.5321443", "0.5315661", "0.53116786", "0.5310177", "0.53049546", "0.5304266", "0.5303202", "0.5301991", "0.53004766", "0.5294301", "0.5283835", "0.52834284", "0.5282333", "0.5280235", "0.52771604", "0.52759683", "0.52742535", "0.5271433", "0.5271166", "0.52656716", "0.52619314" ]
0.69412255
0
Deprecated: Use Secret.ProtoReflect.Descriptor instead.
func (*Secret) Descriptor() ([]byte, []int) { return file_google_cloud_secrets_v1beta1_resources_proto_rawDescGZIP(), []int{0} }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (*CredentialsKVProto) Descriptor() ([]byte, []int) {\n\treturn file_Security_proto_rawDescGZIP(), []int{1}\n}", "func (*TokenProto) Descriptor() ([]byte, []int) {\n\treturn file_Security_proto_rawDescGZIP(), []int{0}\n}", "func (*Secret) Descriptor() ([]byte, []int) {\n\treturn file_pkg_flow_grpc_secrets_proto_rawDescGZIP(), []int{0}\n}", "func (*SecretPayload) Descriptor() ([]byte, []int) {\n\treturn file_google_cloud_secrets_v1beta1_resources_proto_rawDescGZIP(), []int{3}\n}", "func (*DeleteSecretRequest) Descriptor() ([]byte, []int) {\n\treturn file_pkg_flow_grpc_secrets_proto_rawDescGZIP(), []int{7}\n}", "func (*CredentialsProto) Descriptor() ([]byte, []int) {\n\treturn file_Security_proto_rawDescGZIP(), []int{2}\n}", "func (*Secret) Descriptor() ([]byte, []int) {\n\treturn file_yandex_cloud_datatransfer_v1_endpoint_common_proto_rawDescGZIP(), []int{1}\n}", "func (*SecretVersion) Descriptor() ([]byte, []int) {\n\treturn file_google_cloud_secrets_v1beta1_resources_proto_rawDescGZIP(), []int{1}\n}", "func (*SetSecretRequest) Descriptor() ([]byte, []int) {\n\treturn file_pkg_flow_grpc_secrets_proto_rawDescGZIP(), []int{5}\n}", "func (*SecretEdge) Descriptor() ([]byte, []int) {\n\treturn file_pkg_flow_grpc_secrets_proto_rawDescGZIP(), []int{1}\n}", "func (*GetDelegationTokenResponseProto) Descriptor() ([]byte, []int) {\n\treturn file_Security_proto_rawDescGZIP(), []int{4}\n}", "func (*PasswordComplexityPolicyUpdate) Descriptor() ([]byte, []int) {\n\treturn file_management_proto_rawDescGZIP(), []int{38}\n}", "func (*Secrets) Descriptor() ([]byte, []int) {\n\treturn file_pkg_flow_grpc_secrets_proto_rawDescGZIP(), []int{2}\n}", "func (*CancelDelegationTokenResponseProto) Descriptor() ([]byte, []int) {\n\treturn file_Security_proto_rawDescGZIP(), []int{8}\n}", "func (*SetSecretResponse) Descriptor() ([]byte, []int) {\n\treturn file_pkg_flow_grpc_secrets_proto_rawDescGZIP(), []int{6}\n}", "func (*GetDelegationTokenRequestProto) Descriptor() ([]byte, []int) {\n\treturn file_Security_proto_rawDescGZIP(), []int{3}\n}", "func (*Deprecation) Descriptor() ([]byte, []int) {\n\treturn file_external_cfgmgmt_response_nodes_proto_rawDescGZIP(), []int{8}\n}", "func (*ConstantSampler) Descriptor() ([]byte, []int) {\n\treturn file_github_com_solo_io_gloo_projects_gloo_api_external_envoy_config_trace_v3_opencensus_proto_rawDescGZIP(), []int{3}\n}", "func (*ClientSecret) Descriptor() ([]byte, []int) {\n\treturn file_management_proto_rawDescGZIP(), []int{100}\n}", "func (*AddPeerRequest) Descriptor() ([]byte, []int) {\n\treturn file_github_com_yahuizhan_dappley_metrics_go_api_rpc_pb_rpc_proto_rawDescGZIP(), []int{8}\n}", "func (*Embed) Descriptor() ([]byte, []int) {\n\treturn file_chat_v1_messages_proto_rawDescGZIP(), []int{2}\n}", "func (*ClientSecret) Descriptor() ([]byte, []int) {\n\treturn file_management_proto_rawDescGZIP(), []int{103}\n}", "func (*SecretsRequest) Descriptor() ([]byte, []int) {\n\treturn file_pkg_flow_grpc_secrets_proto_rawDescGZIP(), []int{3}\n}", "func (*RenewDelegationTokenResponseProto) Descriptor() ([]byte, []int) {\n\treturn file_Security_proto_rawDescGZIP(), []int{6}\n}", "func ProtoFromDescriptor(d protoreflect.Descriptor) proto.Message {\n\tswitch d := d.(type) {\n\tcase protoreflect.FileDescriptor:\n\t\treturn ProtoFromFileDescriptor(d)\n\tcase protoreflect.MessageDescriptor:\n\t\treturn ProtoFromMessageDescriptor(d)\n\tcase protoreflect.FieldDescriptor:\n\t\treturn ProtoFromFieldDescriptor(d)\n\tcase protoreflect.OneofDescriptor:\n\t\treturn ProtoFromOneofDescriptor(d)\n\tcase protoreflect.EnumDescriptor:\n\t\treturn ProtoFromEnumDescriptor(d)\n\tcase protoreflect.EnumValueDescriptor:\n\t\treturn ProtoFromEnumValueDescriptor(d)\n\tcase protoreflect.ServiceDescriptor:\n\t\treturn ProtoFromServiceDescriptor(d)\n\tcase protoreflect.MethodDescriptor:\n\t\treturn ProtoFromMethodDescriptor(d)\n\tdefault:\n\t\t// WTF??\n\t\tif res, ok := d.(DescriptorProtoWrapper); ok {\n\t\t\treturn res.AsProto()\n\t\t}\n\t\treturn nil\n\t}\n}", "func (*Instance) Descriptor() ([]byte, []int) {\n\treturn file_cloudprovider_externalgrpc_protos_externalgrpc_proto_rawDescGZIP(), []int{28}\n}", "func (*RateLimitingSampler) Descriptor() ([]byte, []int) {\n\treturn file_github_com_solo_io_gloo_projects_gloo_api_external_envoy_config_trace_v3_opencensus_proto_rawDescGZIP(), []int{4}\n}", "func (*PasswordComplexityPolicy) Descriptor() ([]byte, []int) {\n\treturn file_management_proto_rawDescGZIP(), []int{167}\n}", "func (*PasswordComplexityPolicy) Descriptor() ([]byte, []int) {\n\treturn file_management_proto_rawDescGZIP(), []int{36}\n}", "func (*SafetyFeedback) Descriptor() ([]byte, []int) {\n\treturn file_google_ai_generativelanguage_v1beta2_safety_proto_rawDescGZIP(), []int{1}\n}", "func (*EvictWritersResponseProto) Descriptor() ([]byte, []int) {\n\treturn file_ClientDatanodeProtocol_proto_rawDescGZIP(), []int{11}\n}", "func (*InputDisconnect) Descriptor() ([]byte, []int) {\n\treturn file_google_cloud_video_livestream_logging_v1_logs_proto_rawDescGZIP(), []int{10}\n}", "func (*PasswordLeakVerification) Descriptor() ([]byte, []int) {\n\treturn file_google_cloud_recaptchaenterprise_v1beta1_recaptchaenterprise_proto_rawDescGZIP(), []int{4}\n}", "func (*PrivateApiCF) Descriptor() ([]byte, []int) {\n\treturn file_pkg_kascfg_kascfg_proto_rawDescGZIP(), []int{24}\n}", "func (*CallCredentials) Descriptor() ([]byte, []int) {\n\treturn file_github_com_solo_io_gloo_projects_gloo_api_v1_ssl_ssl_proto_rawDescGZIP(), []int{4}\n}", "func (*Token) Descriptor() ([]byte, []int) {\n\treturn file_GrpcServices_auth_proto_rawDescGZIP(), []int{1}\n}", "func (*TraceProto) Descriptor() ([]byte, []int) {\n\treturn file_internal_tracing_extended_extended_trace_proto_rawDescGZIP(), []int{0}\n}", "func (*Disconnect) Descriptor() ([]byte, []int) {\n\treturn file_uni_proto_rawDescGZIP(), []int{11}\n}", "func (*DirectiveCreateValidator) Descriptor() ([]byte, []int) {\n\treturn file_Harmony_proto_rawDescGZIP(), []int{7}\n}", "func (*Token) Descriptor() ([]byte, []int) {\n\treturn file_grpcapi_sequencelabeler_proto_rawDescGZIP(), []int{1}\n}", "func ProtoFromFileDescriptor(d protoreflect.FileDescriptor) *descriptorpb.FileDescriptorProto {\n\tif imp, ok := d.(protoreflect.FileImport); ok {\n\t\td = imp.FileDescriptor\n\t}\n\ttype canProto interface {\n\t\tFileDescriptorProto() *descriptorpb.FileDescriptorProto\n\t}\n\tif res, ok := d.(canProto); ok {\n\t\treturn res.FileDescriptorProto()\n\t}\n\tif res, ok := d.(DescriptorProtoWrapper); ok {\n\t\tif fd, ok := res.AsProto().(*descriptorpb.FileDescriptorProto); ok {\n\t\t\treturn fd\n\t\t}\n\t}\n\treturn protodesc.ToFileDescriptorProto(d)\n}", "func (*CancelDelegationTokenRequestProto) Descriptor() ([]byte, []int) {\n\treturn file_Security_proto_rawDescGZIP(), []int{7}\n}", "func (*MyProto) Descriptor() ([]byte, []int) {\n\treturn file_my_proto_proto_rawDescGZIP(), []int{0}\n}", "func (StandardPTransforms_DeprecatedPrimitives) EnumDescriptor() ([]byte, []int) {\n\treturn file_org_apache_beam_model_pipeline_v1_beam_runner_api_proto_rawDescGZIP(), []int{4, 1}\n}", "func (*TokenWitness) Descriptor() ([]byte, []int) {\n\treturn file_witness_proto_rawDescGZIP(), []int{6}\n}", "func (*DirectiveUndelegate) Descriptor() ([]byte, []int) {\n\treturn file_Harmony_proto_rawDescGZIP(), []int{10}\n}", "func (*PasswordComplexityPolicyID) Descriptor() ([]byte, []int) {\n\treturn file_management_proto_rawDescGZIP(), []int{35}\n}", "func (*Decl) Descriptor() ([]byte, []int) {\n\treturn file_google_api_expr_v1alpha1_checked_proto_rawDescGZIP(), []int{2}\n}", "func (*TelemetryParams) Descriptor() ([]byte, []int) {\n\treturn file_protocol_rpc_rpc_proto_rawDescGZIP(), []int{62}\n}", "func (*Modifier) Descriptor() ([]byte, []int) {\n\treturn file_FillerGame_proto_rawDescGZIP(), []int{6}\n}", "func (*Embed_EmbedField) Descriptor() ([]byte, []int) {\n\treturn file_chat_v1_messages_proto_rawDescGZIP(), []int{2, 1}\n}", "func (x *fastReflection_LightClientAttackEvidence) Descriptor() protoreflect.MessageDescriptor {\n\treturn md_LightClientAttackEvidence\n}", "func (*Example) Descriptor() ([]byte, []int) {\n\treturn file_google_ai_generativelanguage_v1beta2_discuss_service_proto_rawDescGZIP(), []int{4}\n}", "func (*StandardProtocols) Descriptor() ([]byte, []int) {\n\treturn file_org_apache_beam_model_pipeline_v1_beam_runner_api_proto_rawDescGZIP(), []int{54}\n}", "func (*PasswordAgePolicyUpdate) Descriptor() ([]byte, []int) {\n\treturn file_management_proto_rawDescGZIP(), []int{42}\n}", "func (*CredentialAttribute) Descriptor() ([]byte, []int) {\n\treturn file_messages_proto_rawDescGZIP(), []int{9}\n}", "func (*PasswordComplexityPolicyRequest) Descriptor() ([]byte, []int) {\n\treturn file_management_proto_rawDescGZIP(), []int{168}\n}", "func (*SignatureInfo) Descriptor() ([]byte, []int) {\n\treturn file_proto_covidshieldv1_proto_rawDescGZIP(), []int{6}\n}", "func (*GetDatanodeInfoResponseProto) Descriptor() ([]byte, []int) {\n\treturn file_ClientDatanodeProtocol_proto_rawDescGZIP(), []int{13}\n}", "func (*AddPeerResponse) Descriptor() ([]byte, []int) {\n\treturn file_github_com_yahuizhan_dappley_metrics_go_api_rpc_pb_rpc_proto_rawDescGZIP(), []int{30}\n}", "func (*ResourceManifest) Descriptor() ([]byte, []int) {\n\treturn file_google_cloud_gkehub_v1_membership_proto_rawDescGZIP(), []int{4}\n}", "func (*AnalysisMessageWeakSchema) Descriptor() ([]byte, []int) {\n\treturn file_analysis_v1alpha1_message_proto_rawDescGZIP(), []int{1}\n}", "func ProtoFromMethodDescriptor(d protoreflect.MethodDescriptor) *descriptorpb.MethodDescriptorProto {\n\ttype canProto interface {\n\t\tMethodDescriptorProto() *descriptorpb.MethodDescriptorProto\n\t}\n\tif res, ok := d.(canProto); ok {\n\t\treturn res.MethodDescriptorProto()\n\t}\n\tif res, ok := d.(DescriptorProtoWrapper); ok {\n\t\tif md, ok := res.AsProto().(*descriptorpb.MethodDescriptorProto); ok {\n\t\t\treturn md\n\t\t}\n\t}\n\treturn protodesc.ToMethodDescriptorProto(d)\n}", "func (*ValidatorUpdate) Descriptor() ([]byte, []int) {\n\treturn file_tm_replay_proto_rawDescGZIP(), []int{9}\n}", "func (*Credential) Descriptor() ([]byte, []int) {\n\treturn file_messages_proto_rawDescGZIP(), []int{10}\n}", "func (*Metadata) Descriptor() ([]byte, []int) {\n\treturn file_authzed_api_v0_namespace_proto_rawDescGZIP(), []int{0}\n}", "func (*Module) Descriptor() ([]byte, []int) {\n\treturn file_google_devtools_cloudtrace_v2_trace_proto_rawDescGZIP(), []int{3}\n}", "func (*DeleteBrokerPasswordMetadata) Descriptor() ([]byte, []int) {\n\treturn file_yandex_cloud_iot_broker_v1_broker_service_proto_rawDescGZIP(), []int{20}\n}", "func (*GetPeerInfoRequest) Descriptor() ([]byte, []int) {\n\treturn file_github_com_yahuizhan_dappley_metrics_go_api_rpc_pb_rpc_proto_rawDescGZIP(), []int{6}\n}", "func (*Type) Descriptor() ([]byte, []int) {\n\treturn file_google_api_expr_v1alpha1_checked_proto_rawDescGZIP(), []int{1}\n}", "func (*Permission) Descriptor() ([]byte, []int) {\n\treturn file_protodef_user_user_proto_rawDescGZIP(), []int{18}\n}", "func (*ApiWarning) Descriptor() ([]byte, []int) {\n\treturn file_google_cloud_sql_v1_cloud_sql_resources_proto_rawDescGZIP(), []int{1}\n}", "func (*AddInstanceInstruction) Descriptor() ([]byte, []int) {\n\treturn file_proto_api_proto_rawDescGZIP(), []int{12}\n}", "func (x *fastReflection_Evidence) Descriptor() protoreflect.MessageDescriptor {\n\treturn md_Evidence\n}", "func (x *fastReflection_Metadata) Descriptor() protoreflect.MessageDescriptor {\n\treturn md_Metadata\n}", "func (*DirectiveEditValidator) Descriptor() ([]byte, []int) {\n\treturn file_Harmony_proto_rawDescGZIP(), []int{8}\n}", "func (*PatchCollectorsRequest) Descriptor() ([]byte, []int) {\n\treturn file_proto_clarifai_api_service_proto_rawDescGZIP(), []int{161}\n}", "func (*PrivateVisibility) Descriptor() ([]byte, []int) {\n\treturn file_yandex_cloud_dns_v1_dns_zone_proto_rawDescGZIP(), []int{2}\n}", "func (*Credentials) Descriptor() ([]byte, []int) {\n\treturn file_fpc_fpc_proto_rawDescGZIP(), []int{3}\n}", "func (*NetProtoTalker) Descriptor() ([]byte, []int) {\n\treturn file_pkg_smgrpc_smgrpc_proto_rawDescGZIP(), []int{1}\n}", "func (*Token) Descriptor() ([]byte, []int) {\n\treturn file_proto_user_user_proto_rawDescGZIP(), []int{3}\n}", "func (*DisconnectRequest) Descriptor() ([]byte, []int) {\n\treturn file_authorization_proto_rawDescGZIP(), []int{1}\n}", "func (*RenewDelegationTokenRequestProto) Descriptor() ([]byte, []int) {\n\treturn file_Security_proto_rawDescGZIP(), []int{5}\n}", "func (*DeleteBrokerPasswordRequest) Descriptor() ([]byte, []int) {\n\treturn file_yandex_cloud_iot_broker_v1_broker_service_proto_rawDescGZIP(), []int{19}\n}", "func ToDescriptorProto(message protoreflect.MessageDescriptor) *descriptorpb.DescriptorProto {\n\tp := &descriptorpb.DescriptorProto{\n\t\tName: proto.String(string(message.Name())),\n\t\tOptions: proto.Clone(message.Options()).(*descriptorpb.MessageOptions),\n\t}\n\tfor i, fields := 0, message.Fields(); i < fields.Len(); i++ {\n\t\tp.Field = append(p.Field, ToFieldDescriptorProto(fields.Get(i)))\n\t}\n\tfor i, exts := 0, message.Extensions(); i < exts.Len(); i++ {\n\t\tp.Extension = append(p.Extension, ToFieldDescriptorProto(exts.Get(i)))\n\t}\n\tfor i, messages := 0, message.Messages(); i < messages.Len(); i++ {\n\t\tp.NestedType = append(p.NestedType, ToDescriptorProto(messages.Get(i)))\n\t}\n\tfor i, enums := 0, message.Enums(); i < enums.Len(); i++ {\n\t\tp.EnumType = append(p.EnumType, ToEnumDescriptorProto(enums.Get(i)))\n\t}\n\tfor i, xranges := 0, message.ExtensionRanges(); i < xranges.Len(); i++ {\n\t\txrange := xranges.Get(i)\n\t\tp.ExtensionRange = append(p.ExtensionRange, &descriptorpb.DescriptorProto_ExtensionRange{\n\t\t\tStart: proto.Int32(int32(xrange[0])),\n\t\t\tEnd: proto.Int32(int32(xrange[1])),\n\t\t\tOptions: proto.Clone(message.ExtensionRangeOptions(i)).(*descriptorpb.ExtensionRangeOptions),\n\t\t})\n\t}\n\tfor i, oneofs := 0, message.Oneofs(); i < oneofs.Len(); i++ {\n\t\tp.OneofDecl = append(p.OneofDecl, ToOneofDescriptorProto(oneofs.Get(i)))\n\t}\n\tfor i, ranges := 0, message.ReservedRanges(); i < ranges.Len(); i++ {\n\t\trrange := ranges.Get(i)\n\t\tp.ReservedRange = append(p.ReservedRange, &descriptorpb.DescriptorProto_ReservedRange{\n\t\t\tStart: proto.Int32(int32(rrange[0])),\n\t\t\tEnd: proto.Int32(int32(rrange[1])),\n\t\t})\n\t}\n\tfor i, names := 0, message.ReservedNames(); i < names.Len(); i++ {\n\t\tp.ReservedName = append(p.ReservedName, string(names.Get(i)))\n\t}\n\treturn p\n}", "func (*Message12817) Descriptor() ([]byte, []int) {\n\treturn file_datasets_google_message4_benchmark_message4_2_proto_rawDescGZIP(), []int{22}\n}", "func (*SafetySetting) Descriptor() ([]byte, []int) {\n\treturn file_google_ai_generativelanguage_v1beta2_safety_proto_rawDescGZIP(), []int{3}\n}", "func (*LabelledPayload) Descriptor() ([]byte, []int) {\n\treturn file_org_apache_beam_model_pipeline_v1_beam_runner_api_proto_rawDescGZIP(), []int{59}\n}", "func (*AllowedValues) Descriptor() ([]byte, []int) {\n\treturn file_api_assessment_metric_proto_rawDescGZIP(), []int{3}\n}", "func (*TrustedDelegation) Descriptor() ([]byte, []int) {\n\treturn file_go_chromium_org_luci_swarming_proto_config_pools_proto_rawDescGZIP(), []int{3}\n}", "func (x *fastReflection_FlagOptions) Descriptor() protoreflect.MessageDescriptor {\n\treturn md_FlagOptions\n}", "func (*Message) Descriptor() ([]byte, []int) {\n\treturn file_google_ai_generativelanguage_v1beta2_discuss_service_proto_rawDescGZIP(), []int{2}\n}", "func (*WatchRequestTypeProto) Descriptor() ([]byte, []int) {\n\treturn file_raft_proto_rawDescGZIP(), []int{25}\n}", "func (*Permission) Descriptor() ([]byte, []int) {\n\treturn file_ocis_messages_settings_v0_settings_proto_rawDescGZIP(), []int{12}\n}", "func (*SecurityOperation) Descriptor() ([]byte, []int) {\n\treturn file_testvector_tv_proto_rawDescGZIP(), []int{9}\n}", "func (*Description) Descriptor() ([]byte, []int) {\n\treturn file_Harmony_proto_rawDescGZIP(), []int{4}\n}", "func (*GetBalancerBandwidthResponseProto) Descriptor() ([]byte, []int) {\n\treturn file_ClientDatanodeProtocol_proto_rawDescGZIP(), []int{19}\n}", "func (*Token) Descriptor() ([]byte, []int) {\n\treturn file_auth_proto_rawDescGZIP(), []int{0}\n}", "func (*NotAuthorized) Descriptor() ([]byte, []int) {\n\treturn file_example_proto_rawDescGZIP(), []int{1}\n}", "func (*Friend) Descriptor() ([]byte, []int) {\n\treturn file_api_proto_rawDescGZIP(), []int{37}\n}" ]
[ "0.6954283", "0.6922612", "0.68011224", "0.6779633", "0.67407244", "0.67103106", "0.662461", "0.6623578", "0.66090536", "0.65876913", "0.65496767", "0.6473732", "0.64469266", "0.6440782", "0.63975", "0.6394054", "0.637844", "0.6369081", "0.6352151", "0.6339204", "0.6335964", "0.63315195", "0.6317151", "0.631343", "0.6297556", "0.629451", "0.628857", "0.6282947", "0.62808776", "0.6280511", "0.6276647", "0.6256393", "0.6255041", "0.62527025", "0.62481385", "0.62452585", "0.62444663", "0.6243771", "0.62431204", "0.6235702", "0.6233696", "0.62331975", "0.6232073", "0.6231716", "0.62280583", "0.6219141", "0.62160164", "0.6215971", "0.6205661", "0.62017035", "0.62004864", "0.61987084", "0.61949337", "0.61942196", "0.61940396", "0.6187971", "0.6174953", "0.6174109", "0.6163081", "0.61628854", "0.6161089", "0.6157733", "0.61493576", "0.6146637", "0.61436945", "0.61405736", "0.61388445", "0.61322284", "0.6131777", "0.61294144", "0.61254805", "0.61252815", "0.6124606", "0.61204183", "0.61118144", "0.6108597", "0.6108502", "0.6108279", "0.61060035", "0.6105871", "0.61051327", "0.6104915", "0.6101097", "0.6100233", "0.6099437", "0.6099207", "0.6098413", "0.6094853", "0.609156", "0.6091535", "0.60915285", "0.6089391", "0.6089256", "0.6088218", "0.6087639", "0.6085341", "0.6084531", "0.6084079", "0.60817975", "0.6079479" ]
0.6606762
9
Deprecated: Use SecretVersion.ProtoReflect.Descriptor instead.
func (*SecretVersion) Descriptor() ([]byte, []int) { return file_google_cloud_secrets_v1beta1_resources_proto_rawDescGZIP(), []int{1} }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (*CredentialsKVProto) Descriptor() ([]byte, []int) {\n\treturn file_Security_proto_rawDescGZIP(), []int{1}\n}", "func (*SecretPayload) Descriptor() ([]byte, []int) {\n\treturn file_google_cloud_secrets_v1beta1_resources_proto_rawDescGZIP(), []int{3}\n}", "func (*TokenProto) Descriptor() ([]byte, []int) {\n\treturn file_Security_proto_rawDescGZIP(), []int{0}\n}", "func (*Secret) Descriptor() ([]byte, []int) {\n\treturn file_pkg_flow_grpc_secrets_proto_rawDescGZIP(), []int{0}\n}", "func (*DeleteSecretRequest) Descriptor() ([]byte, []int) {\n\treturn file_pkg_flow_grpc_secrets_proto_rawDescGZIP(), []int{7}\n}", "func (*Secret) Descriptor() ([]byte, []int) {\n\treturn file_google_cloud_secrets_v1beta1_resources_proto_rawDescGZIP(), []int{0}\n}", "func (*SecretEdge) Descriptor() ([]byte, []int) {\n\treturn file_pkg_flow_grpc_secrets_proto_rawDescGZIP(), []int{1}\n}", "func (*Secret) Descriptor() ([]byte, []int) {\n\treturn file_yandex_cloud_datatransfer_v1_endpoint_common_proto_rawDescGZIP(), []int{1}\n}", "func (*SetSecretRequest) Descriptor() ([]byte, []int) {\n\treturn file_pkg_flow_grpc_secrets_proto_rawDescGZIP(), []int{5}\n}", "func (*PasswordComplexityPolicyUpdate) Descriptor() ([]byte, []int) {\n\treturn file_management_proto_rawDescGZIP(), []int{38}\n}", "func (*CredentialsProto) Descriptor() ([]byte, []int) {\n\treturn file_Security_proto_rawDescGZIP(), []int{2}\n}", "func (*Deprecation) Descriptor() ([]byte, []int) {\n\treturn file_external_cfgmgmt_response_nodes_proto_rawDescGZIP(), []int{8}\n}", "func (*Secrets) Descriptor() ([]byte, []int) {\n\treturn file_pkg_flow_grpc_secrets_proto_rawDescGZIP(), []int{2}\n}", "func (*ClientSecret) Descriptor() ([]byte, []int) {\n\treturn file_management_proto_rawDescGZIP(), []int{100}\n}", "func (*ClientSecret) Descriptor() ([]byte, []int) {\n\treturn file_management_proto_rawDescGZIP(), []int{103}\n}", "func (*SetSecretResponse) Descriptor() ([]byte, []int) {\n\treturn file_pkg_flow_grpc_secrets_proto_rawDescGZIP(), []int{6}\n}", "func (*GetDelegationTokenResponseProto) Descriptor() ([]byte, []int) {\n\treturn file_Security_proto_rawDescGZIP(), []int{4}\n}", "func (*DeleteVersionRequest) Descriptor() ([]byte, []int) {\n\treturn file_google_appengine_v1_appengine_proto_rawDescGZIP(), []int{14}\n}", "func (*GetVersionRequest) Descriptor() ([]byte, []int) {\n\treturn file_github_com_yahuizhan_dappley_metrics_go_api_rpc_pb_rpc_proto_rawDescGZIP(), []int{9}\n}", "func (*PasswordAgePolicyUpdate) Descriptor() ([]byte, []int) {\n\treturn file_management_proto_rawDescGZIP(), []int{42}\n}", "func (*Version) Descriptor() ([]byte, []int) {\n\treturn file_google_protobuf_compiler_plugin_proto_rawDescGZIP(), []int{0}\n}", "func (*SecretsRequest) Descriptor() ([]byte, []int) {\n\treturn file_pkg_flow_grpc_secrets_proto_rawDescGZIP(), []int{3}\n}", "func (*GetVersionRequest) Descriptor() ([]byte, []int) {\n\treturn file_google_appengine_v1_appengine_proto_rawDescGZIP(), []int{11}\n}", "func (*CancelDelegationTokenResponseProto) Descriptor() ([]byte, []int) {\n\treturn file_Security_proto_rawDescGZIP(), []int{8}\n}", "func (*RenewDelegationTokenResponseProto) Descriptor() ([]byte, []int) {\n\treturn file_Security_proto_rawDescGZIP(), []int{6}\n}", "func (*ValidatorUpdate) Descriptor() ([]byte, []int) {\n\treturn file_tm_replay_proto_rawDescGZIP(), []int{9}\n}", "func (*Embed) Descriptor() ([]byte, []int) {\n\treturn file_chat_v1_messages_proto_rawDescGZIP(), []int{2}\n}", "func (*Instance) Descriptor() ([]byte, []int) {\n\treturn file_cloudprovider_externalgrpc_protos_externalgrpc_proto_rawDescGZIP(), []int{28}\n}", "func (*PasswordLeakVerification) Descriptor() ([]byte, []int) {\n\treturn file_google_cloud_recaptchaenterprise_v1beta1_recaptchaenterprise_proto_rawDescGZIP(), []int{4}\n}", "func (SecretVersion_State) EnumDescriptor() ([]byte, []int) {\n\treturn file_google_cloud_secrets_v1beta1_resources_proto_rawDescGZIP(), []int{1, 0}\n}", "func (*PrivateApiCF) Descriptor() ([]byte, []int) {\n\treturn file_pkg_kascfg_kascfg_proto_rawDescGZIP(), []int{24}\n}", "func (*ConstantSampler) Descriptor() ([]byte, []int) {\n\treturn file_github_com_solo_io_gloo_projects_gloo_api_external_envoy_config_trace_v3_opencensus_proto_rawDescGZIP(), []int{3}\n}", "func (StandardPTransforms_DeprecatedPrimitives) EnumDescriptor() ([]byte, []int) {\n\treturn file_org_apache_beam_model_pipeline_v1_beam_runner_api_proto_rawDescGZIP(), []int{4, 1}\n}", "func (*GetDelegationTokenRequestProto) Descriptor() ([]byte, []int) {\n\treturn file_Security_proto_rawDescGZIP(), []int{3}\n}", "func (*SignatureInfo) Descriptor() ([]byte, []int) {\n\treturn file_proto_covidshieldv1_proto_rawDescGZIP(), []int{6}\n}", "func (*SafetyFeedback) Descriptor() ([]byte, []int) {\n\treturn file_google_ai_generativelanguage_v1beta2_safety_proto_rawDescGZIP(), []int{1}\n}", "func (*Version) Descriptor() ([]byte, []int) {\n\treturn file_s3_proto_s3_proto_rawDescGZIP(), []int{53}\n}", "func (x *fastReflection_LightClientAttackEvidence) Descriptor() protoreflect.MessageDescriptor {\n\treturn md_LightClientAttackEvidence\n}", "func (*CreateVersionRequest) Descriptor() ([]byte, []int) {\n\treturn file_google_appengine_v1_appengine_proto_rawDescGZIP(), []int{12}\n}", "func (*Token) Descriptor() ([]byte, []int) {\n\treturn file_GrpcServices_auth_proto_rawDescGZIP(), []int{1}\n}", "func (*ResourceManifest) Descriptor() ([]byte, []int) {\n\treturn file_google_cloud_gkehub_v1_membership_proto_rawDescGZIP(), []int{4}\n}", "func (*Token) Descriptor() ([]byte, []int) {\n\treturn file_grpcapi_sequencelabeler_proto_rawDescGZIP(), []int{1}\n}", "func (*ValidatorUpdates) Descriptor() ([]byte, []int) {\n\treturn file_core_abci_v1alpha1_abci_proto_rawDescGZIP(), []int{6}\n}", "func (*ApiWarning) Descriptor() ([]byte, []int) {\n\treturn file_google_cloud_sql_v1_cloud_sql_resources_proto_rawDescGZIP(), []int{1}\n}", "func (*VersionParams) Descriptor() ([]byte, []int) {\n\treturn file_tm_replay_proto_rawDescGZIP(), []int{18}\n}", "func (*ApiWarning) Descriptor() ([]byte, []int) {\n\treturn file_google_cloud_sql_v1beta4_cloud_sql_resources_proto_rawDescGZIP(), []int{1}\n}", "func (*VectorClock) Descriptor() ([]byte, []int) {\n\treturn file_pkg_proto_l3_proto_rawDescGZIP(), []int{3}\n}", "func (*AddPeerRequest) Descriptor() ([]byte, []int) {\n\treturn file_github_com_yahuizhan_dappley_metrics_go_api_rpc_pb_rpc_proto_rawDescGZIP(), []int{8}\n}", "func (*TokenWitness) Descriptor() ([]byte, []int) {\n\treturn file_witness_proto_rawDescGZIP(), []int{6}\n}", "func (*S2C_TokenUpdate) Descriptor() ([]byte, []int) {\n\treturn file_global_token_proto_rawDescGZIP(), []int{3}\n}", "func (*EvictWritersResponseProto) Descriptor() ([]byte, []int) {\n\treturn file_ClientDatanodeProtocol_proto_rawDescGZIP(), []int{11}\n}", "func (*UpdateTokenRequest) Descriptor() ([]byte, []int) {\n\treturn file_access_service_token_proto_rawDescGZIP(), []int{2}\n}", "func (*UpdateVersionRequest) Descriptor() ([]byte, []int) {\n\treturn file_google_appengine_v1_appengine_proto_rawDescGZIP(), []int{13}\n}", "func (*InputDisconnect) Descriptor() ([]byte, []int) {\n\treturn file_google_cloud_video_livestream_logging_v1_logs_proto_rawDescGZIP(), []int{10}\n}", "func (*RateLimitingSampler) Descriptor() ([]byte, []int) {\n\treturn file_github_com_solo_io_gloo_projects_gloo_api_external_envoy_config_trace_v3_opencensus_proto_rawDescGZIP(), []int{4}\n}", "func (*Embed_EmbedField) Descriptor() ([]byte, []int) {\n\treturn file_chat_v1_messages_proto_rawDescGZIP(), []int{2, 1}\n}", "func (*UpgradeRuntimeRequest) Descriptor() ([]byte, []int) {\n\treturn file_google_cloud_notebooks_v1_managed_service_proto_rawDescGZIP(), []int{9}\n}", "func (*PasswordComplexityPolicy) Descriptor() ([]byte, []int) {\n\treturn file_management_proto_rawDescGZIP(), []int{167}\n}", "func (*PasswordComplexityPolicy) Descriptor() ([]byte, []int) {\n\treturn file_management_proto_rawDescGZIP(), []int{36}\n}", "func (*Metadata) Descriptor() ([]byte, []int) {\n\treturn file_authzed_api_v0_namespace_proto_rawDescGZIP(), []int{0}\n}", "func (*StandardProtocols) Descriptor() ([]byte, []int) {\n\treturn file_org_apache_beam_model_pipeline_v1_beam_runner_api_proto_rawDescGZIP(), []int{54}\n}", "func (*Module) Descriptor() ([]byte, []int) {\n\treturn file_google_devtools_cloudtrace_v2_trace_proto_rawDescGZIP(), []int{3}\n}", "func (*PasswordComplexityPolicyID) Descriptor() ([]byte, []int) {\n\treturn file_management_proto_rawDescGZIP(), []int{35}\n}", "func (*Example) Descriptor() ([]byte, []int) {\n\treturn file_google_ai_generativelanguage_v1beta2_discuss_service_proto_rawDescGZIP(), []int{4}\n}", "func (*Message6024) Descriptor() ([]byte, []int) {\n\treturn file_datasets_google_message4_benchmark_message4_2_proto_rawDescGZIP(), []int{26}\n}", "func (*NewVersionRequest) Descriptor() ([]byte, []int) {\n\treturn file_versiontracker_proto_rawDescGZIP(), []int{1}\n}", "func (*DirectiveCreateValidator) Descriptor() ([]byte, []int) {\n\treturn file_Harmony_proto_rawDescGZIP(), []int{7}\n}", "func (*RenewDelegationTokenRequestProto) Descriptor() ([]byte, []int) {\n\treturn file_Security_proto_rawDescGZIP(), []int{5}\n}", "func (*SecretsResponse) Descriptor() ([]byte, []int) {\n\treturn file_pkg_flow_grpc_secrets_proto_rawDescGZIP(), []int{4}\n}", "func (*SafetySetting) Descriptor() ([]byte, []int) {\n\treturn file_google_ai_generativelanguage_v1beta2_safety_proto_rawDescGZIP(), []int{3}\n}", "func (*DeleteBrokerPasswordMetadata) Descriptor() ([]byte, []int) {\n\treturn file_yandex_cloud_iot_broker_v1_broker_service_proto_rawDescGZIP(), []int{20}\n}", "func (*CancelDelegationTokenRequestProto) Descriptor() ([]byte, []int) {\n\treturn file_Security_proto_rawDescGZIP(), []int{7}\n}", "func (*Modifier) Descriptor() ([]byte, []int) {\n\treturn file_FillerGame_proto_rawDescGZIP(), []int{6}\n}", "func (*CredentialAttribute) Descriptor() ([]byte, []int) {\n\treturn file_messages_proto_rawDescGZIP(), []int{9}\n}", "func (*PatchCollectorsRequest) Descriptor() ([]byte, []int) {\n\treturn file_proto_clarifai_api_service_proto_rawDescGZIP(), []int{161}\n}", "func (*DirectiveUndelegate) Descriptor() ([]byte, []int) {\n\treturn file_Harmony_proto_rawDescGZIP(), []int{10}\n}", "func (*Message12817) Descriptor() ([]byte, []int) {\n\treturn file_datasets_google_message4_benchmark_message4_2_proto_rawDescGZIP(), []int{22}\n}", "func (*Message12796) Descriptor() ([]byte, []int) {\n\treturn file_datasets_google_message4_benchmark_message4_2_proto_rawDescGZIP(), []int{1}\n}", "func (*SecurityOperation) Descriptor() ([]byte, []int) {\n\treturn file_testvector_tv_proto_rawDescGZIP(), []int{9}\n}", "func (*AnalysisMessageWeakSchema) Descriptor() ([]byte, []int) {\n\treturn file_analysis_v1alpha1_message_proto_rawDescGZIP(), []int{1}\n}", "func (*CallCredentials) Descriptor() ([]byte, []int) {\n\treturn file_github_com_solo_io_gloo_projects_gloo_api_v1_ssl_ssl_proto_rawDescGZIP(), []int{4}\n}", "func (*PasswordLockoutPolicyUpdate) Descriptor() ([]byte, []int) {\n\treturn file_management_proto_rawDescGZIP(), []int{46}\n}", "func (*MetadataUpdateEventProto) Descriptor() ([]byte, []int) {\n\treturn file_inotify_proto_rawDescGZIP(), []int{7}\n}", "func (*GetDatanodeInfoResponseProto) Descriptor() ([]byte, []int) {\n\treturn file_ClientDatanodeProtocol_proto_rawDescGZIP(), []int{13}\n}", "func (*CEmbeddedClient_Token) Descriptor() ([]byte, []int) {\n\treturn file_steammessages_useraccount_steamclient_proto_rawDescGZIP(), []int{21}\n}", "func (*Disconnect) Descriptor() ([]byte, []int) {\n\treturn file_uni_proto_rawDescGZIP(), []int{11}\n}", "func (*GetVersionResponse) Descriptor() ([]byte, []int) {\n\treturn file_github_com_yahuizhan_dappley_metrics_go_api_rpc_pb_rpc_proto_rawDescGZIP(), []int{31}\n}", "func (*Message) Descriptor() ([]byte, []int) {\n\treturn file_google_ai_generativelanguage_v1beta2_discuss_service_proto_rawDescGZIP(), []int{2}\n}", "func (*Metadata) Descriptor() ([]byte, []int) {\n\treturn file_helm_api_proto_rawDescGZIP(), []int{18}\n}", "func (*Meta) Descriptor() ([]byte, []int) {\n\treturn file_token_balance_proto_rawDescGZIP(), []int{13}\n}", "func (*TelemetryParams) Descriptor() ([]byte, []int) {\n\treturn file_protocol_rpc_rpc_proto_rawDescGZIP(), []int{62}\n}", "func (*DirectiveEditValidator) Descriptor() ([]byte, []int) {\n\treturn file_Harmony_proto_rawDescGZIP(), []int{8}\n}", "func (*TelemetryRequest) Descriptor() ([]byte, []int) {\n\treturn file_interservice_license_control_license_control_proto_rawDescGZIP(), []int{11}\n}", "func (*Message12818) Descriptor() ([]byte, []int) {\n\treturn file_datasets_google_message4_benchmark_message4_2_proto_rawDescGZIP(), []int{5}\n}", "func (*PasswordComplexityPolicyRequest) Descriptor() ([]byte, []int) {\n\treturn file_management_proto_rawDescGZIP(), []int{168}\n}", "func (*Credential) Descriptor() ([]byte, []int) {\n\treturn file_messages_proto_rawDescGZIP(), []int{10}\n}", "func (*AddPeerResponse) Descriptor() ([]byte, []int) {\n\treturn file_github_com_yahuizhan_dappley_metrics_go_api_rpc_pb_rpc_proto_rawDescGZIP(), []int{30}\n}", "func (*TraceProto) Descriptor() ([]byte, []int) {\n\treturn file_internal_tracing_extended_extended_trace_proto_rawDescGZIP(), []int{0}\n}", "func (*APILevel) Descriptor() ([]byte, []int) {\n\treturn file_Notify_proto_rawDescGZIP(), []int{4}\n}", "func (*PlanChange_Removed) Descriptor() ([]byte, []int) {\n\treturn edgelq_limits_proto_v1alpha2_plan_change_proto_rawDescGZIP(), []int{0, 3}\n}" ]
[ "0.6847484", "0.6715793", "0.6695759", "0.66937315", "0.6623736", "0.6599705", "0.65578085", "0.65053517", "0.6492708", "0.6484372", "0.6433935", "0.64155066", "0.64061314", "0.63588876", "0.6358342", "0.6349531", "0.6346874", "0.6325663", "0.6314042", "0.63127357", "0.62962794", "0.626823", "0.6257753", "0.6240876", "0.62117845", "0.6183939", "0.61817163", "0.617898", "0.6178261", "0.6156915", "0.61559814", "0.614803", "0.61475116", "0.6145641", "0.61432093", "0.6142404", "0.6126474", "0.61264163", "0.6122969", "0.6113694", "0.61131626", "0.6112892", "0.6108695", "0.61075044", "0.6100521", "0.60925233", "0.6086644", "0.6086411", "0.6084048", "0.60830826", "0.60810345", "0.6076692", "0.6076144", "0.60739565", "0.60637957", "0.6059202", "0.60484326", "0.6042784", "0.6042563", "0.6042411", "0.6039404", "0.60362184", "0.603425", "0.6032253", "0.60313517", "0.60238385", "0.6021025", "0.6019295", "0.60184026", "0.6016691", "0.6013362", "0.601269", "0.6012488", "0.60118055", "0.600417", "0.6002938", "0.5999367", "0.59985965", "0.59976304", "0.59971976", "0.5994568", "0.5993836", "0.59937674", "0.598756", "0.59867126", "0.5985614", "0.5984473", "0.5975949", "0.5975421", "0.5975373", "0.59708923", "0.5968679", "0.5968474", "0.59655684", "0.59653986", "0.5963498", "0.5960049", "0.5959125", "0.59590465", "0.59586734" ]
0.705888
0
Deprecated: Use Replication.ProtoReflect.Descriptor instead.
func (*Replication) Descriptor() ([]byte, []int) { return file_google_cloud_secrets_v1beta1_resources_proto_rawDescGZIP(), []int{2} }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (*Deprecation) Descriptor() ([]byte, []int) {\n\treturn file_external_cfgmgmt_response_nodes_proto_rawDescGZIP(), []int{8}\n}", "func (*PatchCollectorsRequest) Descriptor() ([]byte, []int) {\n\treturn file_proto_clarifai_api_service_proto_rawDescGZIP(), []int{161}\n}", "func (*Listen) Descriptor() ([]byte, []int) {\n\treturn file_pkg_smgrpc_smgrpc_proto_rawDescGZIP(), []int{4}\n}", "func (*Persistent) Descriptor() ([]byte, []int) {\n\treturn file_msgs_msgs_proto_rawDescGZIP(), []int{2}\n}", "func (*TraceProto) Descriptor() ([]byte, []int) {\n\treturn file_internal_tracing_extended_extended_trace_proto_rawDescGZIP(), []int{0}\n}", "func (*InstallSnapshotRequestProto_NotificationProto) Descriptor() ([]byte, []int) {\n\treturn file_raft_proto_rawDescGZIP(), []int{19, 1}\n}", "func (*StaleReadRequestTypeProto) Descriptor() ([]byte, []int) {\n\treturn file_raft_proto_rawDescGZIP(), []int{24}\n}", "func (*AddPeerRequest) Descriptor() ([]byte, []int) {\n\treturn file_github_com_yahuizhan_dappley_metrics_go_api_rpc_pb_rpc_proto_rawDescGZIP(), []int{8}\n}", "func (*GetReplicaVisibleLengthResponseProto) Descriptor() ([]byte, []int) {\n\treturn file_ClientDatanodeProtocol_proto_rawDescGZIP(), []int{1}\n}", "func ProtoFromDescriptor(d protoreflect.Descriptor) proto.Message {\n\tswitch d := d.(type) {\n\tcase protoreflect.FileDescriptor:\n\t\treturn ProtoFromFileDescriptor(d)\n\tcase protoreflect.MessageDescriptor:\n\t\treturn ProtoFromMessageDescriptor(d)\n\tcase protoreflect.FieldDescriptor:\n\t\treturn ProtoFromFieldDescriptor(d)\n\tcase protoreflect.OneofDescriptor:\n\t\treturn ProtoFromOneofDescriptor(d)\n\tcase protoreflect.EnumDescriptor:\n\t\treturn ProtoFromEnumDescriptor(d)\n\tcase protoreflect.EnumValueDescriptor:\n\t\treturn ProtoFromEnumValueDescriptor(d)\n\tcase protoreflect.ServiceDescriptor:\n\t\treturn ProtoFromServiceDescriptor(d)\n\tcase protoreflect.MethodDescriptor:\n\t\treturn ProtoFromMethodDescriptor(d)\n\tdefault:\n\t\t// WTF??\n\t\tif res, ok := d.(DescriptorProtoWrapper); ok {\n\t\t\treturn res.AsProto()\n\t\t}\n\t\treturn nil\n\t}\n}", "func (*LabelDescriptor) Descriptor() ([]byte, []int) {\n\treturn edgelq_logging_proto_v1alpha2_common_proto_rawDescGZIP(), []int{0}\n}", "func (*NetProtoTalker) Descriptor() ([]byte, []int) {\n\treturn file_pkg_smgrpc_smgrpc_proto_rawDescGZIP(), []int{1}\n}", "func (*Message) Descriptor() ([]byte, []int) {\n\treturn file_google_ai_generativelanguage_v1beta2_discuss_service_proto_rawDescGZIP(), []int{2}\n}", "func (*QueryPlanStatusResponseProto) Descriptor() ([]byte, []int) {\n\treturn file_ClientDatanodeProtocol_proto_rawDescGZIP(), []int{25}\n}", "func (*GetDatanodeInfoResponseProto) Descriptor() ([]byte, []int) {\n\treturn file_ClientDatanodeProtocol_proto_rawDescGZIP(), []int{13}\n}", "func (*GetDelegationTokenResponseProto) Descriptor() ([]byte, []int) {\n\treturn file_Security_proto_rawDescGZIP(), []int{4}\n}", "func (*Instance) Descriptor() ([]byte, []int) {\n\treturn file_cloudprovider_externalgrpc_protos_externalgrpc_proto_rawDescGZIP(), []int{28}\n}", "func (*Message6024) Descriptor() ([]byte, []int) {\n\treturn file_datasets_google_message4_benchmark_message4_2_proto_rawDescGZIP(), []int{26}\n}", "func (*PatchAnnotationsRequest) Descriptor() ([]byte, []int) {\n\treturn file_proto_clarifai_api_service_proto_rawDescGZIP(), []int{4}\n}", "func (*StandardProtocols) Descriptor() ([]byte, []int) {\n\treturn file_org_apache_beam_model_pipeline_v1_beam_runner_api_proto_rawDescGZIP(), []int{54}\n}", "func (*DiagOperation) Descriptor() ([]byte, []int) {\n\treturn file_testvector_tv_proto_rawDescGZIP(), []int{10}\n}", "func (*Message12818) Descriptor() ([]byte, []int) {\n\treturn file_datasets_google_message4_benchmark_message4_2_proto_rawDescGZIP(), []int{5}\n}", "func (*MyProto) Descriptor() ([]byte, []int) {\n\treturn file_my_proto_proto_rawDescGZIP(), []int{0}\n}", "func (*TLS) Descriptor() ([]byte, []int) {\n\treturn file_observer_observer_proto_rawDescGZIP(), []int{11}\n}", "func (*RefreshNamenodesResponseProto) Descriptor() ([]byte, []int) {\n\treturn file_ClientDatanodeProtocol_proto_rawDescGZIP(), []int{3}\n}", "func (*Message12796) Descriptor() ([]byte, []int) {\n\treturn file_datasets_google_message4_benchmark_message4_2_proto_rawDescGZIP(), []int{1}\n}", "func (*AddPeerResponse) Descriptor() ([]byte, []int) {\n\treturn file_github_com_yahuizhan_dappley_metrics_go_api_rpc_pb_rpc_proto_rawDescGZIP(), []int{30}\n}", "func (*Example) Descriptor() ([]byte, []int) {\n\treturn file_google_ai_generativelanguage_v1beta2_discuss_service_proto_rawDescGZIP(), []int{4}\n}", "func (*Reconfiguration) Descriptor() ([]byte, []int) {\n\treturn file_msgs_msgs_proto_rawDescGZIP(), []int{1}\n}", "func (*PlanChange_Modified) Descriptor() ([]byte, []int) {\n\treturn edgelq_limits_proto_v1alpha2_plan_change_proto_rawDescGZIP(), []int{0, 1}\n}", "func (*SelectorVerificationRes) Descriptor() ([]byte, []int) {\n\treturn file_proto_selector_verification_msgs_proto_rawDescGZIP(), []int{1}\n}", "func (*RelationTupleDelta) Descriptor() ([]byte, []int) {\n\treturn file_ory_keto_acl_v1alpha1_write_service_proto_rawDescGZIP(), []int{1}\n}", "func (*RenameReq) Descriptor() ([]byte, []int) {\n\treturn file_dfs_proto_rawDescGZIP(), []int{4}\n}", "func (*RoleInfoProto) Descriptor() ([]byte, []int) {\n\treturn file_raft_proto_rawDescGZIP(), []int{42}\n}", "func (*RenameClientCapabilities) Descriptor() ([]byte, []int) {\n\treturn file_protocol_rpc_rpc_proto_rawDescGZIP(), []int{191}\n}", "func (*SemanticTokensDelta) Descriptor() ([]byte, []int) {\n\treturn file_protocol_rpc_rpc_proto_rawDescGZIP(), []int{223}\n}", "func (*PatchCollaboratorsRequest) Descriptor() ([]byte, []int) {\n\treturn file_proto_clarifai_api_service_proto_rawDescGZIP(), []int{21}\n}", "func (*RaftClientReplyProto) Descriptor() ([]byte, []int) {\n\treturn file_raft_proto_rawDescGZIP(), []int{31}\n}", "func (*DelegateOptionsRefs) Descriptor() ([]byte, []int) {\n\treturn file_github_com_solo_io_gloo_projects_gateway_api_v1_virtual_service_proto_rawDescGZIP(), []int{3}\n}", "func (*Primitive) Descriptor() ([]byte, []int) {\n\treturn file_messages_proto_rawDescGZIP(), []int{15}\n}", "func (*Retry_Conf_Grpc) Descriptor() ([]byte, []int) {\n\treturn file_api_mesh_v1alpha1_retry_proto_rawDescGZIP(), []int{0, 0, 3}\n}", "func (*EvictWritersResponseProto) Descriptor() ([]byte, []int) {\n\treturn file_ClientDatanodeProtocol_proto_rawDescGZIP(), []int{11}\n}", "func (*Trickle) Descriptor() ([]byte, []int) {\n\treturn file_cmd_server_grpc_proto_sfu_proto_rawDescGZIP(), []int{4}\n}", "func (*RenewDelegationTokenResponseProto) Descriptor() ([]byte, []int) {\n\treturn file_Security_proto_rawDescGZIP(), []int{6}\n}", "func (*Message6127) Descriptor() ([]byte, []int) {\n\treturn file_datasets_google_message4_benchmark_message4_2_proto_rawDescGZIP(), []int{24}\n}", "func (*APILevel) Descriptor() ([]byte, []int) {\n\treturn file_Notify_proto_rawDescGZIP(), []int{4}\n}", "func (*ValidatorUpdate) Descriptor() ([]byte, []int) {\n\treturn file_tm_replay_proto_rawDescGZIP(), []int{9}\n}", "func (*ProvisioningPolicyChange_Modified) Descriptor() ([]byte, []int) {\n\treturn edgelq_devices_proto_v1alpha_provisioning_policy_change_proto_rawDescGZIP(), []int{0, 1}\n}", "func (*Record) Descriptor() ([]byte, []int) {\n\treturn file_pkg_cluster_api_proto_rawDescGZIP(), []int{4}\n}", "func (*ServerRpcProto) Descriptor() ([]byte, []int) {\n\treturn file_raft_proto_rawDescGZIP(), []int{38}\n}", "func (*CSVCMsg_HltvReplay) Descriptor() ([]byte, []int) {\n\treturn file_netmessages_proto_rawDescGZIP(), []int{51}\n}", "func (*SelectorVerificationsRes) Descriptor() ([]byte, []int) {\n\treturn file_proto_selector_verification_msgs_proto_rawDescGZIP(), []int{3}\n}", "func (*Ref) Descriptor() ([]byte, []int) {\n\treturn file_go_chromium_org_luci_cipd_api_cipd_v1_repo_proto_rawDescGZIP(), []int{13}\n}", "func (*ObservabilityListenCF) Descriptor() ([]byte, []int) {\n\treturn file_pkg_kascfg_kascfg_proto_rawDescGZIP(), []int{2}\n}", "func (*CodeLens) Descriptor() ([]byte, []int) {\n\treturn file_protocol_rpc_rpc_proto_rawDescGZIP(), []int{164}\n}", "func (*GetDelegationTokenRequestProto) Descriptor() ([]byte, []int) {\n\treturn file_Security_proto_rawDescGZIP(), []int{3}\n}", "func (*Discovery) Descriptor() ([]byte, []int) {\n\treturn file_raft_proto_rawDescGZIP(), []int{0}\n}", "func (*ApiWarning) Descriptor() ([]byte, []int) {\n\treturn file_google_cloud_sql_v1_cloud_sql_resources_proto_rawDescGZIP(), []int{1}\n}", "func (*Reference) Descriptor() ([]byte, []int) {\n\treturn file_google_api_expr_v1alpha1_checked_proto_rawDescGZIP(), []int{3}\n}", "func (*CSVCMsg_HltvReplay) Descriptor() ([]byte, []int) {\n\treturn file_csgo_netmessages_proto_rawDescGZIP(), []int{51}\n}", "func (*GetBalancerBandwidthResponseProto) Descriptor() ([]byte, []int) {\n\treturn file_ClientDatanodeProtocol_proto_rawDescGZIP(), []int{19}\n}", "func (*CancelPlanResponseProto) Descriptor() ([]byte, []int) {\n\treturn file_ClientDatanodeProtocol_proto_rawDescGZIP(), []int{23}\n}", "func (*Subscription) Descriptor() ([]byte, []int) {\n\treturn file_proto_gnmi_gnmi_proto_rawDescGZIP(), []int{13}\n}", "func (*ModifyRequest) Descriptor() ([]byte, []int) {\n\treturn file_proto_engine_proto_rawDescGZIP(), []int{10}\n}", "func (*CCLCMsg_HltvReplay) Descriptor() ([]byte, []int) {\n\treturn file_csgo_netmessages_proto_rawDescGZIP(), []int{52}\n}", "func (*Note) Descriptor() ([]byte, []int) {\n\treturn file_determined_project_v1_project_proto_rawDescGZIP(), []int{1}\n}", "func (*CancelDelegationTokenResponseProto) Descriptor() ([]byte, []int) {\n\treturn file_Security_proto_rawDescGZIP(), []int{8}\n}", "func (*ValidateReply) Descriptor() ([]byte, []int) {\n\treturn file_protobuf_clusrun_proto_rawDescGZIP(), []int{18}\n}", "func (*Embed) Descriptor() ([]byte, []int) {\n\treturn file_chat_v1_messages_proto_rawDescGZIP(), []int{2}\n}", "func (*GetReplicaVisibleLengthRequestProto) Descriptor() ([]byte, []int) {\n\treturn file_ClientDatanodeProtocol_proto_rawDescGZIP(), []int{0}\n}", "func (*CCLCMsg_HltvReplay) Descriptor() ([]byte, []int) {\n\treturn file_netmessages_proto_rawDescGZIP(), []int{52}\n}", "func (*WinRMListener) Descriptor() ([]byte, []int) {\n\treturn file_moc_common_computecommon_proto_rawDescGZIP(), []int{0}\n}", "func (*UpgradeRuntimeRequest) Descriptor() ([]byte, []int) {\n\treturn file_google_cloud_notebooks_v1_managed_service_proto_rawDescGZIP(), []int{9}\n}", "func (*Changes) Descriptor() ([]byte, []int) {\n\treturn file_management_proto_rawDescGZIP(), []int{3}\n}", "func (*Changes) Descriptor() ([]byte, []int) {\n\treturn file_management_proto_rawDescGZIP(), []int{3}\n}", "func (*WatchRequestTypeProto) Descriptor() ([]byte, []int) {\n\treturn file_raft_proto_rawDescGZIP(), []int{25}\n}", "func (*RenameRequest) Descriptor() ([]byte, []int) {\n\treturn file_protocol_rpc_rpc_proto_rawDescGZIP(), []int{194}\n}", "func (*Domain) Descriptor() ([]byte, []int) {\n\treturn file_management_proto_rawDescGZIP(), []int{51}\n}", "func (*Domain) Descriptor() ([]byte, []int) {\n\treturn file_management_proto_rawDescGZIP(), []int{51}\n}", "func (*DiagnoseRequest) Descriptor() ([]byte, []int) {\n\treturn file_proto_api_proto_rawDescGZIP(), []int{16}\n}", "func (*Message7928) Descriptor() ([]byte, []int) {\n\treturn file_datasets_google_message4_benchmark_message4_2_proto_rawDescGZIP(), []int{18}\n}", "func (*MonitoredResourceDescriptor) Descriptor() ([]byte, []int) {\n\treturn edgelq_monitoring_proto_v3_monitored_resource_descriptor_proto_rawDescGZIP(), []int{0}\n}", "func (*UnicastMsg) Descriptor() ([]byte, []int) {\n\treturn file_proto_rpc_rpc_proto_rawDescGZIP(), []int{2}\n}", "func (*Builder) Descriptor() ([]byte, []int) {\n\treturn file_go_chromium_org_luci_luci_notify_api_config_notify_proto_rawDescGZIP(), []int{4}\n}", "func (*Message7921) Descriptor() ([]byte, []int) {\n\treturn file_datasets_google_message4_benchmark_message4_2_proto_rawDescGZIP(), []int{19}\n}", "func (*RenameOptions) Descriptor() ([]byte, []int) {\n\treturn file_protocol_rpc_rpc_proto_rawDescGZIP(), []int{192}\n}", "func (*RevokeJobRequest) Descriptor() ([]byte, []int) {\n\treturn file_pkg_noderpc_proto_feeds_manager_proto_rawDescGZIP(), []int{20}\n}", "func (*Description) Descriptor() ([]byte, []int) {\n\treturn file_external_cfgmgmt_response_nodes_proto_rawDescGZIP(), []int{7}\n}", "func (*RefreshNamenodesRequestProto) Descriptor() ([]byte, []int) {\n\treturn file_ClientDatanodeProtocol_proto_rawDescGZIP(), []int{2}\n}", "func (*Disconnect) Descriptor() ([]byte, []int) {\n\treturn file_uni_proto_rawDescGZIP(), []int{11}\n}", "func (*RefreshRequest) Descriptor() ([]byte, []int) {\n\treturn file_cloudprovider_externalgrpc_protos_externalgrpc_proto_rawDescGZIP(), []int{16}\n}", "func (*RenewDelegationTokenRequestProto) Descriptor() ([]byte, []int) {\n\treturn file_Security_proto_rawDescGZIP(), []int{5}\n}", "func (*ApiWarning) Descriptor() ([]byte, []int) {\n\treturn file_google_cloud_sql_v1beta4_cloud_sql_resources_proto_rawDescGZIP(), []int{1}\n}", "func (*Message7919) Descriptor() ([]byte, []int) {\n\treturn file_datasets_google_message4_benchmark_message4_2_proto_rawDescGZIP(), []int{21}\n}", "func (x *fastReflection_ServiceCommandDescriptor) Descriptor() protoreflect.MessageDescriptor {\n\treturn md_ServiceCommandDescriptor\n}", "func (*KeepAlive) Descriptor() ([]byte, []int) {\n\treturn file_chatMsg_msg_proto_rawDescGZIP(), []int{4}\n}", "func (*StateMachineLogEntryProto) Descriptor() ([]byte, []int) {\n\treturn file_raft_proto_rawDescGZIP(), []int{6}\n}", "func (*MetadataProto) Descriptor() ([]byte, []int) {\n\treturn file_raft_proto_rawDescGZIP(), []int{7}\n}", "func (*ReplyCalcPNL) Descriptor() ([]byte, []int) {\n\treturn file_tradingnode2_proto_rawDescGZIP(), []int{3}\n}", "func (*TriggerBlockReportResponseProto) Descriptor() ([]byte, []int) {\n\treturn file_ClientDatanodeProtocol_proto_rawDescGZIP(), []int{17}\n}", "func (*NodeGroup) Descriptor() ([]byte, []int) {\n\treturn file_cloudprovider_externalgrpc_protos_externalgrpc_proto_rawDescGZIP(), []int{0}\n}" ]
[ "0.6727746", "0.6686084", "0.6545586", "0.6480731", "0.6466182", "0.64647496", "0.6437498", "0.6433067", "0.6430104", "0.6422417", "0.6404882", "0.6386649", "0.6378753", "0.6375563", "0.637492", "0.63679546", "0.63624215", "0.63500184", "0.63441485", "0.63429284", "0.6334024", "0.63327855", "0.63321495", "0.6317957", "0.6316529", "0.6315438", "0.63139653", "0.6313047", "0.6312634", "0.6311918", "0.63035625", "0.63003", "0.6299193", "0.6294589", "0.6293992", "0.62924755", "0.62913036", "0.6290172", "0.62881285", "0.6287368", "0.62852186", "0.62783533", "0.6277802", "0.62774456", "0.62752205", "0.6274593", "0.62741137", "0.6271937", "0.6271123", "0.62670434", "0.6266429", "0.6263278", "0.6261566", "0.6260946", "0.6258574", "0.625348", "0.62531614", "0.6252051", "0.62519675", "0.6251688", "0.62514335", "0.6251411", "0.62508786", "0.6250756", "0.6247689", "0.62456304", "0.6245503", "0.62443364", "0.6239593", "0.6235995", "0.62339383", "0.6233696", "0.62307405", "0.62273735", "0.62273735", "0.62222594", "0.6221687", "0.62172675", "0.62172675", "0.6213082", "0.6212946", "0.6212254", "0.62118244", "0.6211169", "0.621083", "0.620947", "0.6208801", "0.62086093", "0.62070394", "0.62036335", "0.61967164", "0.61960137", "0.61940527", "0.6192563", "0.61904454", "0.6189209", "0.6187938", "0.6186798", "0.6186513", "0.6186438", "0.6185832" ]
0.0
-1
Deprecated: Use SecretPayload.ProtoReflect.Descriptor instead.
func (*SecretPayload) Descriptor() ([]byte, []int) { return file_google_cloud_secrets_v1beta1_resources_proto_rawDescGZIP(), []int{3} }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (*LabelledPayload) Descriptor() ([]byte, []int) {\n\treturn file_org_apache_beam_model_pipeline_v1_beam_runner_api_proto_rawDescGZIP(), []int{59}\n}", "func (*ExternalPayload) Descriptor() ([]byte, []int) {\n\treturn file_org_apache_beam_model_pipeline_v1_beam_runner_api_proto_rawDescGZIP(), []int{53}\n}", "func (*DeleteSecretRequest) Descriptor() ([]byte, []int) {\n\treturn file_pkg_flow_grpc_secrets_proto_rawDescGZIP(), []int{7}\n}", "func (*TokenProto) Descriptor() ([]byte, []int) {\n\treturn file_Security_proto_rawDescGZIP(), []int{0}\n}", "func (*PyPIPayload) Descriptor() ([]byte, []int) {\n\treturn file_org_apache_beam_model_pipeline_v1_beam_runner_api_proto_rawDescGZIP(), []int{44}\n}", "func (*Payload) Descriptor() ([]byte, []int) {\n\treturn file_protobuf_message_proto_rawDescGZIP(), []int{1}\n}", "func (*Secret) Descriptor() ([]byte, []int) {\n\treturn file_pkg_flow_grpc_secrets_proto_rawDescGZIP(), []int{0}\n}", "func (*GetDelegationTokenResponseProto) Descriptor() ([]byte, []int) {\n\treturn file_Security_proto_rawDescGZIP(), []int{4}\n}", "func (*Payload) Descriptor() ([]byte, []int) {\n\treturn file_grpc_test_proto_rawDescGZIP(), []int{0}\n}", "func (*ReadPayload) Descriptor() ([]byte, []int) {\n\treturn file_org_apache_beam_model_pipeline_v1_beam_runner_api_proto_rawDescGZIP(), []int{19}\n}", "func (*Secret) Descriptor() ([]byte, []int) {\n\treturn file_yandex_cloud_datatransfer_v1_endpoint_common_proto_rawDescGZIP(), []int{1}\n}", "func (*CredentialsKVProto) Descriptor() ([]byte, []int) {\n\treturn file_Security_proto_rawDescGZIP(), []int{1}\n}", "func (*SecretEdge) Descriptor() ([]byte, []int) {\n\treturn file_pkg_flow_grpc_secrets_proto_rawDescGZIP(), []int{1}\n}", "func (*DockerPayload) Descriptor() ([]byte, []int) {\n\treturn file_org_apache_beam_model_pipeline_v1_beam_runner_api_proto_rawDescGZIP(), []int{51}\n}", "func (*SetSecretRequest) Descriptor() ([]byte, []int) {\n\treturn file_pkg_flow_grpc_secrets_proto_rawDescGZIP(), []int{5}\n}", "func (*CancelDelegationTokenResponseProto) Descriptor() ([]byte, []int) {\n\treturn file_Security_proto_rawDescGZIP(), []int{8}\n}", "func (*Embed) Descriptor() ([]byte, []int) {\n\treturn file_chat_v1_messages_proto_rawDescGZIP(), []int{2}\n}", "func (*GetDelegationTokenRequestProto) Descriptor() ([]byte, []int) {\n\treturn file_Security_proto_rawDescGZIP(), []int{3}\n}", "func (*RenewDelegationTokenResponseProto) Descriptor() ([]byte, []int) {\n\treturn file_Security_proto_rawDescGZIP(), []int{6}\n}", "func (*VotePayload) Descriptor() ([]byte, []int) {\n\treturn file_message_proto_rawDescGZIP(), []int{4}\n}", "func (*Payload) Descriptor() ([]byte, []int) {\n\treturn file_payload_proto_rawDescGZIP(), []int{0}\n}", "func (*PayloadMetadata) Descriptor() ([]byte, []int) {\n\treturn file_ivms101_identity_proto_rawDescGZIP(), []int{7}\n}", "func (*InputDisconnect) Descriptor() ([]byte, []int) {\n\treturn file_google_cloud_video_livestream_logging_v1_logs_proto_rawDescGZIP(), []int{10}\n}", "func (x *fastReflection_LightClientAttackEvidence) Descriptor() protoreflect.MessageDescriptor {\n\treturn md_LightClientAttackEvidence\n}", "func (*ParDoPayload) Descriptor() ([]byte, []int) {\n\treturn file_org_apache_beam_model_pipeline_v1_beam_runner_api_proto_rawDescGZIP(), []int{8}\n}", "func (*HelloPayload) Descriptor() ([]byte, []int) {\n\treturn file_message_proto_rawDescGZIP(), []int{1}\n}", "func (*StatsPayload) Descriptor() ([]byte, []int) {\n\treturn file_datadog_trace_stats_proto_rawDescGZIP(), []int{0}\n}", "func (*Deprecation) Descriptor() ([]byte, []int) {\n\treturn file_external_cfgmgmt_response_nodes_proto_rawDescGZIP(), []int{8}\n}", "func (*Payload) Descriptor() ([]byte, []int) {\n\treturn file_payload_payload_proto_rawDescGZIP(), []int{0}\n}", "func (*ProcessPayload) Descriptor() ([]byte, []int) {\n\treturn file_org_apache_beam_model_pipeline_v1_beam_runner_api_proto_rawDescGZIP(), []int{52}\n}", "func (*CredentialsProto) Descriptor() ([]byte, []int) {\n\treturn file_Security_proto_rawDescGZIP(), []int{2}\n}", "func (*Secret) Descriptor() ([]byte, []int) {\n\treturn file_google_cloud_secrets_v1beta1_resources_proto_rawDescGZIP(), []int{0}\n}", "func (*ConstantSampler) Descriptor() ([]byte, []int) {\n\treturn file_github_com_solo_io_gloo_projects_gloo_api_external_envoy_config_trace_v3_opencensus_proto_rawDescGZIP(), []int{3}\n}", "func (*PubSubWritePayload) Descriptor() ([]byte, []int) {\n\treturn file_org_apache_beam_model_pipeline_v1_beam_runner_api_proto_rawDescGZIP(), []int{26}\n}", "func (*SafetyFeedback) Descriptor() ([]byte, []int) {\n\treturn file_google_ai_generativelanguage_v1beta2_safety_proto_rawDescGZIP(), []int{1}\n}", "func (*NoiseHandshakePayload) Descriptor() ([]byte, []int) {\n\treturn file_pb_payload_proto_rawDescGZIP(), []int{1}\n}", "func (*TestStreamPayload) Descriptor() ([]byte, []int) {\n\treturn file_org_apache_beam_model_pipeline_v1_beam_runner_api_proto_rawDescGZIP(), []int{22}\n}", "func (*RateLimitingSampler) Descriptor() ([]byte, []int) {\n\treturn file_github_com_solo_io_gloo_projects_gloo_api_external_envoy_config_trace_v3_opencensus_proto_rawDescGZIP(), []int{4}\n}", "func (*SecretsRequest) Descriptor() ([]byte, []int) {\n\treturn file_pkg_flow_grpc_secrets_proto_rawDescGZIP(), []int{3}\n}", "func (*PasswordComplexityPolicyUpdate) Descriptor() ([]byte, []int) {\n\treturn file_management_proto_rawDescGZIP(), []int{38}\n}", "func (*SetSecretResponse) Descriptor() ([]byte, []int) {\n\treturn file_pkg_flow_grpc_secrets_proto_rawDescGZIP(), []int{6}\n}", "func (*TargetValue) Descriptor() ([]byte, []int) {\n\treturn file_packetbroker_api_iam_v1_service_proto_rawDescGZIP(), []int{7}\n}", "func (*MessagePayloadFormatters) Descriptor() ([]byte, []int) {\n\treturn file_ttn_lorawan_v3_messages_proto_rawDescGZIP(), []int{16}\n}", "func (*SecretVersion) Descriptor() ([]byte, []int) {\n\treturn file_google_cloud_secrets_v1beta1_resources_proto_rawDescGZIP(), []int{1}\n}", "func (*TraceProto) Descriptor() ([]byte, []int) {\n\treturn file_internal_tracing_extended_extended_trace_proto_rawDescGZIP(), []int{0}\n}", "func (*StandardProtocols) Descriptor() ([]byte, []int) {\n\treturn file_org_apache_beam_model_pipeline_v1_beam_runner_api_proto_rawDescGZIP(), []int{54}\n}", "func (*CombinePayload) Descriptor() ([]byte, []int) {\n\treturn file_org_apache_beam_model_pipeline_v1_beam_runner_api_proto_rawDescGZIP(), []int{21}\n}", "func (*EvictWritersResponseProto) Descriptor() ([]byte, []int) {\n\treturn file_ClientDatanodeProtocol_proto_rawDescGZIP(), []int{11}\n}", "func (*Secrets) Descriptor() ([]byte, []int) {\n\treturn file_pkg_flow_grpc_secrets_proto_rawDescGZIP(), []int{2}\n}", "func (*WatchRequestTypeProto) Descriptor() ([]byte, []int) {\n\treturn file_raft_proto_rawDescGZIP(), []int{25}\n}", "func (*RenewDelegationTokenRequestProto) Descriptor() ([]byte, []int) {\n\treturn file_Security_proto_rawDescGZIP(), []int{5}\n}", "func (*CancelDelegationTokenRequestProto) Descriptor() ([]byte, []int) {\n\treturn file_Security_proto_rawDescGZIP(), []int{7}\n}", "func (*Embed_EmbedField) Descriptor() ([]byte, []int) {\n\treturn file_chat_v1_messages_proto_rawDescGZIP(), []int{2, 1}\n}", "func (*RevokeFactoryCertificateRequest) Descriptor() ([]byte, []int) {\n\treturn file_token_proto_rawDescGZIP(), []int{15}\n}", "func (*TelemetryParams) Descriptor() ([]byte, []int) {\n\treturn file_protocol_rpc_rpc_proto_rawDescGZIP(), []int{62}\n}", "func (*EmbeddedFilePayload) Descriptor() ([]byte, []int) {\n\treturn file_org_apache_beam_model_pipeline_v1_beam_runner_api_proto_rawDescGZIP(), []int{43}\n}", "func (*IdentityPayload) Descriptor() ([]byte, []int) {\n\treturn file_ivms101_identity_proto_rawDescGZIP(), []int{0}\n}", "func (*ExternalConfigurationPayload) Descriptor() ([]byte, []int) {\n\treturn file_external_transforms_proto_rawDescGZIP(), []int{0}\n}", "func (*DeleteWebhookRequest) Descriptor() ([]byte, []int) {\n\treturn file_events_Event_proto_rawDescGZIP(), []int{6}\n}", "func (*FeedbackMetrics) Descriptor() ([]byte, []int) {\n\treturn file_ssn_dataservice_v1_dataservice_proto_rawDescGZIP(), []int{12}\n}", "func ProtoFromDescriptor(d protoreflect.Descriptor) proto.Message {\n\tswitch d := d.(type) {\n\tcase protoreflect.FileDescriptor:\n\t\treturn ProtoFromFileDescriptor(d)\n\tcase protoreflect.MessageDescriptor:\n\t\treturn ProtoFromMessageDescriptor(d)\n\tcase protoreflect.FieldDescriptor:\n\t\treturn ProtoFromFieldDescriptor(d)\n\tcase protoreflect.OneofDescriptor:\n\t\treturn ProtoFromOneofDescriptor(d)\n\tcase protoreflect.EnumDescriptor:\n\t\treturn ProtoFromEnumDescriptor(d)\n\tcase protoreflect.EnumValueDescriptor:\n\t\treturn ProtoFromEnumValueDescriptor(d)\n\tcase protoreflect.ServiceDescriptor:\n\t\treturn ProtoFromServiceDescriptor(d)\n\tcase protoreflect.MethodDescriptor:\n\t\treturn ProtoFromMethodDescriptor(d)\n\tdefault:\n\t\t// WTF??\n\t\tif res, ok := d.(DescriptorProtoWrapper); ok {\n\t\t\treturn res.AsProto()\n\t\t}\n\t\treturn nil\n\t}\n}", "func (*AddPeerRequest) Descriptor() ([]byte, []int) {\n\treturn file_github_com_yahuizhan_dappley_metrics_go_api_rpc_pb_rpc_proto_rawDescGZIP(), []int{8}\n}", "func (StandardPTransforms_DeprecatedPrimitives) EnumDescriptor() ([]byte, []int) {\n\treturn file_org_apache_beam_model_pipeline_v1_beam_runner_api_proto_rawDescGZIP(), []int{4, 1}\n}", "func (*DeleteWebhookRequest) Descriptor() ([]byte, []int) {\n\treturn file_uac_Event_proto_rawDescGZIP(), []int{5}\n}", "func (*DirectiveUndelegate) Descriptor() ([]byte, []int) {\n\treturn file_Harmony_proto_rawDescGZIP(), []int{10}\n}", "func (*Webhook) Descriptor() ([]byte, []int) {\n\treturn file_google_actions_sdk_v2_webhook_proto_rawDescGZIP(), []int{0}\n}", "func (*MavenPayload) Descriptor() ([]byte, []int) {\n\treturn file_org_apache_beam_model_pipeline_v1_beam_runner_api_proto_rawDescGZIP(), []int{45}\n}", "func (*AnalysisMessageWeakSchema) Descriptor() ([]byte, []int) {\n\treturn file_analysis_v1alpha1_message_proto_rawDescGZIP(), []int{1}\n}", "func (*Example) Descriptor() ([]byte, []int) {\n\treturn file_google_ai_generativelanguage_v1beta2_discuss_service_proto_rawDescGZIP(), []int{4}\n}", "func (*ApiListener) Descriptor() ([]byte, []int) {\n\treturn file_envoy_config_listener_v2_api_listener_proto_rawDescGZIP(), []int{0}\n}", "func (*ProvisioningPolicyChange_Removed) Descriptor() ([]byte, []int) {\n\treturn edgelq_devices_proto_v1alpha_provisioning_policy_change_proto_rawDescGZIP(), []int{0, 3}\n}", "func (*Payload) Descriptor() ([]byte, []int) {\n\treturn file_v1_transport_transport_proto_rawDescGZIP(), []int{0}\n}", "func (x *fastReflection_Evidence) Descriptor() protoreflect.MessageDescriptor {\n\treturn md_Evidence\n}", "func (*Meta) Descriptor() ([]byte, []int) {\n\treturn file_dictybase_api_jsonapi_payload_proto_rawDescGZIP(), []int{5}\n}", "func (*DeleteBrokerPasswordRequest) Descriptor() ([]byte, []int) {\n\treturn file_yandex_cloud_iot_broker_v1_broker_service_proto_rawDescGZIP(), []int{19}\n}", "func (*PlanChange_Removed) Descriptor() ([]byte, []int) {\n\treturn edgelq_limits_proto_v1alpha2_plan_change_proto_rawDescGZIP(), []int{0, 3}\n}", "func (*FormatMessage) Descriptor() ([]byte, []int) {\n\treturn file_google_devtools_clouddebugger_v2_data_proto_rawDescGZIP(), []int{0}\n}", "func (*DeregisterRequest) Descriptor() ([]byte, []int) {\n\treturn file_proto_engine_proto_rawDescGZIP(), []int{8}\n}", "func (*Module) Descriptor() ([]byte, []int) {\n\treturn file_google_devtools_cloudtrace_v2_trace_proto_rawDescGZIP(), []int{3}\n}", "func (*AddInstanceInstruction) Descriptor() ([]byte, []int) {\n\treturn file_proto_api_proto_rawDescGZIP(), []int{12}\n}", "func (*TestPayload) Descriptor() ([]byte, []int) {\n\treturn file_proto_testing_testing_proto_rawDescGZIP(), []int{0}\n}", "func (*Message12818) Descriptor() ([]byte, []int) {\n\treturn file_datasets_google_message4_benchmark_message4_2_proto_rawDescGZIP(), []int{5}\n}", "func (*PasswordAgePolicyUpdate) Descriptor() ([]byte, []int) {\n\treturn file_management_proto_rawDescGZIP(), []int{42}\n}", "func (*Message) Descriptor() ([]byte, []int) {\n\treturn file_google_ai_generativelanguage_v1beta2_discuss_service_proto_rawDescGZIP(), []int{2}\n}", "func (*ApiWarning) Descriptor() ([]byte, []int) {\n\treturn file_google_cloud_sql_v1_cloud_sql_resources_proto_rawDescGZIP(), []int{1}\n}", "func (*Message6024) Descriptor() ([]byte, []int) {\n\treturn file_datasets_google_message4_benchmark_message4_2_proto_rawDescGZIP(), []int{26}\n}", "func (*Message12817) Descriptor() ([]byte, []int) {\n\treturn file_datasets_google_message4_benchmark_message4_2_proto_rawDescGZIP(), []int{22}\n}", "func (*OriginalDetectIntentRequest) Descriptor() ([]byte, []int) {\n\treturn file_google_cloud_dialogflow_v2beta1_webhook_proto_rawDescGZIP(), []int{2}\n}", "func (*KeyTransportMessage) Descriptor() ([]byte, []int) {\n\treturn file_fpc_fpc_proto_rawDescGZIP(), []int{7}\n}", "func (*Disconnect) Descriptor() ([]byte, []int) {\n\treturn file_uni_proto_rawDescGZIP(), []int{11}\n}", "func (*PasswordLeakVerification) Descriptor() ([]byte, []int) {\n\treturn file_google_cloud_recaptchaenterprise_v1beta1_recaptchaenterprise_proto_rawDescGZIP(), []int{4}\n}", "func (*DiagnoseRequest) Descriptor() ([]byte, []int) {\n\treturn file_proto_api_proto_rawDescGZIP(), []int{16}\n}", "func (*AddInstanceRequest) Descriptor() ([]byte, []int) {\n\treturn file_myshoes_proto_rawDescGZIP(), []int{0}\n}", "func (*ClientSecret) Descriptor() ([]byte, []int) {\n\treturn file_management_proto_rawDescGZIP(), []int{100}\n}", "func (*TokenWitness) Descriptor() ([]byte, []int) {\n\treturn file_witness_proto_rawDescGZIP(), []int{6}\n}", "func (*AddBrokerPasswordRequest) Descriptor() ([]byte, []int) {\n\treturn file_yandex_cloud_iot_broker_v1_broker_service_proto_rawDescGZIP(), []int{17}\n}", "func (*SignatureInfo) Descriptor() ([]byte, []int) {\n\treturn file_proto_covidshieldv1_proto_rawDescGZIP(), []int{6}\n}", "func (*ClientStatsPayload) Descriptor() ([]byte, []int) {\n\treturn file_datadog_trace_stats_proto_rawDescGZIP(), []int{1}\n}", "func (*Modifier) Descriptor() ([]byte, []int) {\n\treturn file_FillerGame_proto_rawDescGZIP(), []int{6}\n}", "func (*PasswordComplexityPolicyRequest) Descriptor() ([]byte, []int) {\n\treturn file_management_proto_rawDescGZIP(), []int{168}\n}" ]
[ "0.65490156", "0.64903533", "0.63943386", "0.6318891", "0.62863195", "0.6273548", "0.621698", "0.6194263", "0.61777174", "0.6158098", "0.6156442", "0.6155339", "0.61546296", "0.6109918", "0.610977", "0.6063295", "0.604994", "0.60420156", "0.6019964", "0.60105217", "0.60040504", "0.60025674", "0.5994716", "0.598629", "0.5985571", "0.5979214", "0.597176", "0.5968441", "0.5955992", "0.5952181", "0.5940944", "0.5938358", "0.59329754", "0.5929169", "0.5923981", "0.5917797", "0.59137094", "0.59108263", "0.59102315", "0.5902444", "0.5901486", "0.58993644", "0.58936924", "0.58936685", "0.5888898", "0.58823514", "0.5880737", "0.58685833", "0.5867629", "0.5860551", "0.58589375", "0.58585083", "0.5854989", "0.5851495", "0.5841775", "0.5828395", "0.5827812", "0.5825358", "0.5824653", "0.58221483", "0.5820849", "0.58175373", "0.5812026", "0.58098155", "0.58069694", "0.5804627", "0.58045155", "0.5804376", "0.5803596", "0.58029073", "0.5800007", "0.5796616", "0.5789124", "0.57845086", "0.5784142", "0.57810616", "0.57757413", "0.57700247", "0.5768562", "0.57638067", "0.57597715", "0.575939", "0.57536376", "0.5745619", "0.5744211", "0.574185", "0.57385546", "0.57385343", "0.5735904", "0.57352847", "0.5732802", "0.5725232", "0.572403", "0.57198405", "0.5716929", "0.5715539", "0.57106316", "0.5708918", "0.57087475", "0.57032585" ]
0.6862886
0
Deprecated: Use Replication_Automatic.ProtoReflect.Descriptor instead.
func (*Replication_Automatic) Descriptor() ([]byte, []int) { return file_google_cloud_secrets_v1beta1_resources_proto_rawDescGZIP(), []int{2, 0} }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (*Deprecation) Descriptor() ([]byte, []int) {\n\treturn file_external_cfgmgmt_response_nodes_proto_rawDescGZIP(), []int{8}\n}", "func (*PatchCollectorsRequest) Descriptor() ([]byte, []int) {\n\treturn file_proto_clarifai_api_service_proto_rawDescGZIP(), []int{161}\n}", "func (*Persistent) Descriptor() ([]byte, []int) {\n\treturn file_msgs_msgs_proto_rawDescGZIP(), []int{2}\n}", "func (*Listen) Descriptor() ([]byte, []int) {\n\treturn file_pkg_smgrpc_smgrpc_proto_rawDescGZIP(), []int{4}\n}", "func (*Instance) Descriptor() ([]byte, []int) {\n\treturn file_cloudprovider_externalgrpc_protos_externalgrpc_proto_rawDescGZIP(), []int{28}\n}", "func (*ListenApiCF) Descriptor() ([]byte, []int) {\n\treturn file_pkg_kascfg_kascfg_proto_rawDescGZIP(), []int{22}\n}", "func (*NetProtoTalker) Descriptor() ([]byte, []int) {\n\treturn file_pkg_smgrpc_smgrpc_proto_rawDescGZIP(), []int{1}\n}", "func (*Example) Descriptor() ([]byte, []int) {\n\treturn file_google_ai_generativelanguage_v1beta2_discuss_service_proto_rawDescGZIP(), []int{4}\n}", "func (*PatchAnnotationsRequest) Descriptor() ([]byte, []int) {\n\treturn file_proto_clarifai_api_service_proto_rawDescGZIP(), []int{4}\n}", "func (*Preferences) Descriptor() ([]byte, []int) {\n\treturn file_google_ads_googleads_v2_services_reach_plan_service_proto_rawDescGZIP(), []int{8}\n}", "func (*ObservabilityListenCF) Descriptor() ([]byte, []int) {\n\treturn file_pkg_kascfg_kascfg_proto_rawDescGZIP(), []int{2}\n}", "func (x *fastReflection_ServiceCommandDescriptor) Descriptor() protoreflect.MessageDescriptor {\n\treturn md_ServiceCommandDescriptor\n}", "func (*Discovery) Descriptor() ([]byte, []int) {\n\treturn file_raft_proto_rawDescGZIP(), []int{0}\n}", "func (*DelegateOptionsRefs) Descriptor() ([]byte, []int) {\n\treturn file_github_com_solo_io_gloo_projects_gateway_api_v1_virtual_service_proto_rawDescGZIP(), []int{3}\n}", "func (*Message) Descriptor() ([]byte, []int) {\n\treturn file_google_ai_generativelanguage_v1beta2_discuss_service_proto_rawDescGZIP(), []int{2}\n}", "func (x *fastReflection_LightClientAttackEvidence) Descriptor() protoreflect.MessageDescriptor {\n\treturn md_LightClientAttackEvidence\n}", "func (*Decl) Descriptor() ([]byte, []int) {\n\treturn file_google_api_expr_v1alpha1_checked_proto_rawDescGZIP(), []int{2}\n}", "func (*StandardProtocols) Descriptor() ([]byte, []int) {\n\treturn file_org_apache_beam_model_pipeline_v1_beam_runner_api_proto_rawDescGZIP(), []int{54}\n}", "func (x *fastReflection_FlagOptions) Descriptor() protoreflect.MessageDescriptor {\n\treturn md_FlagOptions\n}", "func (*InstallSnapshotRequestProto_NotificationProto) Descriptor() ([]byte, []int) {\n\treturn file_raft_proto_rawDescGZIP(), []int{19, 1}\n}", "func (*AddPeerRequest) Descriptor() ([]byte, []int) {\n\treturn file_github_com_yahuizhan_dappley_metrics_go_api_rpc_pb_rpc_proto_rawDescGZIP(), []int{8}\n}", "func (*ObservabilityCF) Descriptor() ([]byte, []int) {\n\treturn file_pkg_kascfg_kascfg_proto_rawDescGZIP(), []int{15}\n}", "func (*Reconfiguration) Descriptor() ([]byte, []int) {\n\treturn file_msgs_msgs_proto_rawDescGZIP(), []int{1}\n}", "func (*Reference) Descriptor() ([]byte, []int) {\n\treturn file_google_api_expr_v1alpha1_checked_proto_rawDescGZIP(), []int{3}\n}", "func (*MyProto) Descriptor() ([]byte, []int) {\n\treturn file_my_proto_proto_rawDescGZIP(), []int{0}\n}", "func (*Message6024) Descriptor() ([]byte, []int) {\n\treturn file_datasets_google_message4_benchmark_message4_2_proto_rawDescGZIP(), []int{26}\n}", "func (*Reconfiguration_NewClient) Descriptor() ([]byte, []int) {\n\treturn file_msgs_msgs_proto_rawDescGZIP(), []int{1, 0}\n}", "func (*MaximizeConversions) Descriptor() ([]byte, []int) {\n\treturn file_google_ads_googleads_v14_common_bidding_proto_rawDescGZIP(), []int{6}\n}", "func (*Message12818) Descriptor() ([]byte, []int) {\n\treturn file_datasets_google_message4_benchmark_message4_2_proto_rawDescGZIP(), []int{5}\n}", "func (*DiagOperation) Descriptor() ([]byte, []int) {\n\treturn file_testvector_tv_proto_rawDescGZIP(), []int{10}\n}", "func (*AnalysisMessageWeakSchema) Descriptor() ([]byte, []int) {\n\treturn file_analysis_v1alpha1_message_proto_rawDescGZIP(), []int{1}\n}", "func (*Topic) Descriptor() ([]byte, []int) {\n\treturn file_service_proto_rawDescGZIP(), []int{1}\n}", "func (*Primitive) Descriptor() ([]byte, []int) {\n\treturn file_messages_proto_rawDescGZIP(), []int{15}\n}", "func (*Message12796) Descriptor() ([]byte, []int) {\n\treturn file_datasets_google_message4_benchmark_message4_2_proto_rawDescGZIP(), []int{1}\n}", "func (*FeedbackMetrics) Descriptor() ([]byte, []int) {\n\treturn file_ssn_dataservice_v1_dataservice_proto_rawDescGZIP(), []int{12}\n}", "func (*ApiListener) Descriptor() ([]byte, []int) {\n\treturn file_envoy_config_listener_v2_api_listener_proto_rawDescGZIP(), []int{0}\n}", "func (x *fastReflection_MsgUpdateParams) Descriptor() protoreflect.MessageDescriptor {\n\treturn md_MsgUpdateParams\n}", "func (*RefreshServiceAclRequestProto) Descriptor() ([]byte, []int) {\n\treturn file_RefreshAuthorizationPolicyProtocol_proto_rawDescGZIP(), []int{0}\n}", "func (*PatchAnnotationsStatusRequest) Descriptor() ([]byte, []int) {\n\treturn file_proto_clarifai_api_service_proto_rawDescGZIP(), []int{5}\n}", "func (*APILevel) Descriptor() ([]byte, []int) {\n\treturn file_Notify_proto_rawDescGZIP(), []int{4}\n}", "func (*PrivateApiCF) Descriptor() ([]byte, []int) {\n\treturn file_pkg_kascfg_kascfg_proto_rawDescGZIP(), []int{24}\n}", "func (*StaleReadRequestTypeProto) Descriptor() ([]byte, []int) {\n\treturn file_raft_proto_rawDescGZIP(), []int{24}\n}", "func (*RefreshNamenodesRequestProto) Descriptor() ([]byte, []int) {\n\treturn file_ClientDatanodeProtocol_proto_rawDescGZIP(), []int{2}\n}", "func (*RelationTupleDelta) Descriptor() ([]byte, []int) {\n\treturn file_ory_keto_acl_v1alpha1_write_service_proto_rawDescGZIP(), []int{1}\n}", "func (*LabelDescriptor) Descriptor() ([]byte, []int) {\n\treturn edgelq_logging_proto_v1alpha2_common_proto_rawDescGZIP(), []int{0}\n}", "func (*ApiWarning) Descriptor() ([]byte, []int) {\n\treturn file_google_cloud_sql_v1_cloud_sql_resources_proto_rawDescGZIP(), []int{1}\n}", "func ProtoFromDescriptor(d protoreflect.Descriptor) proto.Message {\n\tswitch d := d.(type) {\n\tcase protoreflect.FileDescriptor:\n\t\treturn ProtoFromFileDescriptor(d)\n\tcase protoreflect.MessageDescriptor:\n\t\treturn ProtoFromMessageDescriptor(d)\n\tcase protoreflect.FieldDescriptor:\n\t\treturn ProtoFromFieldDescriptor(d)\n\tcase protoreflect.OneofDescriptor:\n\t\treturn ProtoFromOneofDescriptor(d)\n\tcase protoreflect.EnumDescriptor:\n\t\treturn ProtoFromEnumDescriptor(d)\n\tcase protoreflect.EnumValueDescriptor:\n\t\treturn ProtoFromEnumValueDescriptor(d)\n\tcase protoreflect.ServiceDescriptor:\n\t\treturn ProtoFromServiceDescriptor(d)\n\tcase protoreflect.MethodDescriptor:\n\t\treturn ProtoFromMethodDescriptor(d)\n\tdefault:\n\t\t// WTF??\n\t\tif res, ok := d.(DescriptorProtoWrapper); ok {\n\t\t\treturn res.AsProto()\n\t\t}\n\t\treturn nil\n\t}\n}", "func (*Builder) Descriptor() ([]byte, []int) {\n\treturn file_go_chromium_org_luci_luci_notify_api_config_notify_proto_rawDescGZIP(), []int{4}\n}", "func (x *fastReflection_Evidence) Descriptor() protoreflect.MessageDescriptor {\n\treturn md_Evidence\n}", "func (*TraceProto) Descriptor() ([]byte, []int) {\n\treturn file_internal_tracing_extended_extended_trace_proto_rawDescGZIP(), []int{0}\n}", "func (*Message7921) Descriptor() ([]byte, []int) {\n\treturn file_datasets_google_message4_benchmark_message4_2_proto_rawDescGZIP(), []int{19}\n}", "func (*Record) Descriptor() ([]byte, []int) {\n\treturn file_pkg_cluster_api_proto_rawDescGZIP(), []int{4}\n}", "func (*Retry_Conf_Grpc) Descriptor() ([]byte, []int) {\n\treturn file_api_mesh_v1alpha1_retry_proto_rawDescGZIP(), []int{0, 0, 3}\n}", "func (*ManualCpv) Descriptor() ([]byte, []int) {\n\treturn file_google_ads_googleads_v14_common_bidding_proto_rawDescGZIP(), []int{5}\n}", "func (*UpgradeRuntimeRequest) Descriptor() ([]byte, []int) {\n\treturn file_google_cloud_notebooks_v1_managed_service_proto_rawDescGZIP(), []int{9}\n}", "func (*RefreshNamenodesResponseProto) Descriptor() ([]byte, []int) {\n\treturn file_ClientDatanodeProtocol_proto_rawDescGZIP(), []int{3}\n}", "func (*PolicyRule) Descriptor() ([]byte, []int) {\n\treturn file_github_com_solo_io_skv2_api_multicluster_v1alpha1_cluster_proto_rawDescGZIP(), []int{2}\n}", "func (*ValidatorUpdate) Descriptor() ([]byte, []int) {\n\treturn file_tm_replay_proto_rawDescGZIP(), []int{9}\n}", "func (*Message7920) Descriptor() ([]byte, []int) {\n\treturn file_datasets_google_message4_benchmark_message4_2_proto_rawDescGZIP(), []int{20}\n}", "func (x *fastReflection_Metadata) Descriptor() protoreflect.MessageDescriptor {\n\treturn md_Metadata\n}", "func (*Name) Descriptor() ([]byte, []int) {\n\treturn file_examples_documents_example_proto_rawDescGZIP(), []int{25}\n}", "func (*Trickle) Descriptor() ([]byte, []int) {\n\treturn file_cmd_server_grpc_proto_sfu_proto_rawDescGZIP(), []int{4}\n}", "func (*GetDelegationTokenRequestProto) Descriptor() ([]byte, []int) {\n\treturn file_Security_proto_rawDescGZIP(), []int{3}\n}", "func (*StrategyChange) Descriptor() ([]byte, []int) {\n\treturn file_rpc_proto_rawDescGZIP(), []int{6}\n}", "func (*GetDatanodeInfoResponseProto) Descriptor() ([]byte, []int) {\n\treturn file_ClientDatanodeProtocol_proto_rawDescGZIP(), []int{13}\n}", "func (*DelegateAction) Descriptor() ([]byte, []int) {\n\treturn file_github_com_solo_io_gloo_projects_gateway_api_v1_virtual_service_proto_rawDescGZIP(), []int{4}\n}", "func (*DiagnoseRequest) Descriptor() ([]byte, []int) {\n\treturn file_proto_api_proto_rawDescGZIP(), []int{16}\n}", "func (*NewEpoch_RemoteEpochChange) Descriptor() ([]byte, []int) {\n\treturn file_msgs_msgs_proto_rawDescGZIP(), []int{25, 0}\n}", "func (*Person) Descriptor() ([]byte, []int) {\n\treturn file_protos_face_recognition_service_proto_rawDescGZIP(), []int{4}\n}", "func (*RefreshRequest) Descriptor() ([]byte, []int) {\n\treturn file_cloudprovider_externalgrpc_protos_externalgrpc_proto_rawDescGZIP(), []int{16}\n}", "func (*DiagnoseRuntimeRequest) Descriptor() ([]byte, []int) {\n\treturn file_google_cloud_notebooks_v1_managed_service_proto_rawDescGZIP(), []int{14}\n}", "func (*RenewDelegationTokenRequestProto) Descriptor() ([]byte, []int) {\n\treturn file_Security_proto_rawDescGZIP(), []int{5}\n}", "func (*EpochChange) Descriptor() ([]byte, []int) {\n\treturn file_msgs_msgs_proto_rawDescGZIP(), []int{21}\n}", "func (*ApiCF) Descriptor() ([]byte, []int) {\n\treturn file_pkg_kascfg_kascfg_proto_rawDescGZIP(), []int{23}\n}", "func (*DirectiveUndelegate) Descriptor() ([]byte, []int) {\n\treturn file_Harmony_proto_rawDescGZIP(), []int{10}\n}", "func (*GenerateMessageRequest) Descriptor() ([]byte, []int) {\n\treturn file_google_ai_generativelanguage_v1beta2_discuss_service_proto_rawDescGZIP(), []int{0}\n}", "func (*GetDelegationTokenResponseProto) Descriptor() ([]byte, []int) {\n\treturn file_Security_proto_rawDescGZIP(), []int{4}\n}", "func (*RevokeJobRequest) Descriptor() ([]byte, []int) {\n\treturn file_pkg_noderpc_proto_feeds_manager_proto_rawDescGZIP(), []int{20}\n}", "func (*ManualCpc) Descriptor() ([]byte, []int) {\n\treturn file_google_ads_googleads_v14_common_bidding_proto_rawDescGZIP(), []int{3}\n}", "func (*ListenKubernetesApiCF) Descriptor() ([]byte, []int) {\n\treturn file_pkg_kascfg_kascfg_proto_rawDescGZIP(), []int{8}\n}", "func (*Description) Descriptor() ([]byte, []int) {\n\treturn file_external_cfgmgmt_response_nodes_proto_rawDescGZIP(), []int{7}\n}", "func (*SemanticTokensDelta) Descriptor() ([]byte, []int) {\n\treturn file_protocol_rpc_rpc_proto_rawDescGZIP(), []int{223}\n}", "func (*DiskBalancerSettingResponseProto) Descriptor() ([]byte, []int) {\n\treturn file_ClientDatanodeProtocol_proto_rawDescGZIP(), []int{27}\n}", "func (*Message6578) Descriptor() ([]byte, []int) {\n\treturn file_datasets_google_message4_benchmark_message4_2_proto_rawDescGZIP(), []int{7}\n}", "func (*UnicastMsg) Descriptor() ([]byte, []int) {\n\treturn file_proto_rpc_rpc_proto_rawDescGZIP(), []int{2}\n}", "func (*Controls) Descriptor() ([]byte, []int) {\n\treturn file_msgType_proto_rawDescGZIP(), []int{21}\n}", "func (*Message12817) Descriptor() ([]byte, []int) {\n\treturn file_datasets_google_message4_benchmark_message4_2_proto_rawDescGZIP(), []int{22}\n}", "func (*Message7865) Descriptor() ([]byte, []int) {\n\treturn file_datasets_google_message4_benchmark_message4_2_proto_rawDescGZIP(), []int{15}\n}", "func (*UpdateDatasetRequest) Descriptor() ([]byte, []int) {\n\treturn file_google_cloud_automl_v1_service_proto_rawDescGZIP(), []int{4}\n}", "func (*QueryPlanStatusResponseProto) Descriptor() ([]byte, []int) {\n\treturn file_ClientDatanodeProtocol_proto_rawDescGZIP(), []int{25}\n}", "func (*DeleteCollectorsRequest) Descriptor() ([]byte, []int) {\n\treturn file_proto_clarifai_api_service_proto_rawDescGZIP(), []int{162}\n}", "func (*Note) Descriptor() ([]byte, []int) {\n\treturn file_determined_project_v1_project_proto_rawDescGZIP(), []int{1}\n}", "func (*Message10319) Descriptor() ([]byte, []int) {\n\treturn file_datasets_google_message4_benchmark_message4_2_proto_rawDescGZIP(), []int{6}\n}", "func (*Correctness) Descriptor() ([]byte, []int) {\n\treturn file_ssn_dataservice_v1_dataservice_proto_rawDescGZIP(), []int{13}\n}", "func (*Message6127) Descriptor() ([]byte, []int) {\n\treturn file_datasets_google_message4_benchmark_message4_2_proto_rawDescGZIP(), []int{24}\n}", "func (*RenameReq) Descriptor() ([]byte, []int) {\n\treturn file_dfs_proto_rawDescGZIP(), []int{4}\n}", "func (*MetadataProto) Descriptor() ([]byte, []int) {\n\treturn file_raft_proto_rawDescGZIP(), []int{7}\n}", "func (*Control) Descriptor() ([]byte, []int) {\n\treturn file_msgType_proto_rawDescGZIP(), []int{20}\n}", "func (*TokenProto) Descriptor() ([]byte, []int) {\n\treturn file_Security_proto_rawDescGZIP(), []int{0}\n}", "func (*Type) Descriptor() ([]byte, []int) {\n\treturn file_google_api_expr_v1alpha1_checked_proto_rawDescGZIP(), []int{1}\n}", "func (*AddPeerResponse) Descriptor() ([]byte, []int) {\n\treturn file_github_com_yahuizhan_dappley_metrics_go_api_rpc_pb_rpc_proto_rawDescGZIP(), []int{30}\n}" ]
[ "0.6894834", "0.67953676", "0.66490424", "0.6634654", "0.65959626", "0.65788096", "0.6574221", "0.65688884", "0.65657634", "0.65597504", "0.65399843", "0.65367705", "0.6523806", "0.65232563", "0.65229344", "0.65124637", "0.6510672", "0.6508254", "0.6489727", "0.6485264", "0.6477258", "0.6473561", "0.64698344", "0.646439", "0.6446943", "0.6445361", "0.6441876", "0.6437205", "0.6436464", "0.6425983", "0.6425893", "0.64192104", "0.6417048", "0.6416661", "0.6416266", "0.6414906", "0.6409612", "0.6406515", "0.6400846", "0.64008445", "0.63988405", "0.6390105", "0.6389919", "0.6385548", "0.6384041", "0.6382528", "0.6380457", "0.63775593", "0.63762784", "0.63757336", "0.63718385", "0.6371242", "0.6367273", "0.6366523", "0.6366001", "0.6364285", "0.63630193", "0.63630116", "0.6362416", "0.636191", "0.63614655", "0.63583094", "0.6357697", "0.63569874", "0.63557607", "0.6353412", "0.6349202", "0.6345989", "0.63434255", "0.63429475", "0.6341958", "0.634074", "0.6340622", "0.6339142", "0.6337164", "0.63370335", "0.6335666", "0.6335065", "0.63328695", "0.63288444", "0.63279027", "0.6327449", "0.6327224", "0.6326636", "0.6325794", "0.6325591", "0.6325439", "0.63227147", "0.63199544", "0.6316826", "0.6316275", "0.6316258", "0.63158435", "0.631526", "0.6314833", "0.63125736", "0.6310696", "0.63104004", "0.6308181", "0.63070464", "0.6305771" ]
0.0
-1
Deprecated: Use Replication_UserManaged.ProtoReflect.Descriptor instead.
func (*Replication_UserManaged) Descriptor() ([]byte, []int) { return file_google_cloud_secrets_v1beta1_resources_proto_rawDescGZIP(), []int{2, 1} }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (*McUserProto) Descriptor() ([]byte, []int) {\n\treturn file_pkg_mcclient_mcuser_mcuser_proto_proto_rawDescGZIP(), []int{0}\n}", "func (*LoginedUserRep) Descriptor() ([]byte, []int) {\n\treturn file_proto_user_proto_rawDescGZIP(), []int{3}\n}", "func (*NetMessageSplitscreenUserChanged) Descriptor() ([]byte, []int) {\n\treturn file_artifact_networksystem_protomessages_proto_rawDescGZIP(), []int{0}\n}", "func (*UserModifyResp) Descriptor() ([]byte, []int) {\n\treturn file_api_interface_v1_user_proto_rawDescGZIP(), []int{1}\n}", "func (*UserRep) Descriptor() ([]byte, []int) {\n\treturn file_proto_user_proto_rawDescGZIP(), []int{1}\n}", "func (*User) Descriptor() ([]byte, []int) {\n\treturn file_proto_fandncloud_service_user_user_proto_rawDescGZIP(), []int{0}\n}", "func (*User) Descriptor() ([]byte, []int) {\n\treturn file_protodef_user_user_proto_rawDescGZIP(), []int{2}\n}", "func (*User) Descriptor() ([]byte, []int) {\n\treturn file_grpc_user_proto_rawDescGZIP(), []int{2}\n}", "func (*User) Descriptor() ([]byte, []int) {\n\treturn file_usermanage_usermanage_proto_rawDescGZIP(), []int{1}\n}", "func (*Permission) Descriptor() ([]byte, []int) {\n\treturn file_protodef_user_user_proto_rawDescGZIP(), []int{18}\n}", "func (*NewUser) Descriptor() ([]byte, []int) {\n\treturn file_usermanage_usermanage_proto_rawDescGZIP(), []int{0}\n}", "func (*User) Descriptor() ([]byte, []int) {\n\treturn file_proto_indrasaputra_orvosi_v1_orvosi_proto_rawDescGZIP(), []int{2}\n}", "func (*User) Descriptor() ([]byte, []int) {\n\treturn file_GrpcServices_auth_proto_rawDescGZIP(), []int{2}\n}", "func (*UpdatePermissionRequest) Descriptor() ([]byte, []int) {\n\treturn file_protodef_user_user_proto_rawDescGZIP(), []int{16}\n}", "func (*User) Descriptor() ([]byte, []int) {\n\treturn file_proto_user_proto_rawDescGZIP(), []int{1}\n}", "func (*User) Descriptor() ([]byte, []int) {\n\treturn file_proto_auth_proto_rawDescGZIP(), []int{3}\n}", "func (*Permission) Descriptor() ([]byte, []int) {\n\treturn file_proto_fandncloud_service_user_user_proto_rawDescGZIP(), []int{6}\n}", "func (*User) Descriptor() ([]byte, []int) {\n\treturn file_protob_user_service_proto_rawDescGZIP(), []int{0}\n}", "func (*UserCollection) Descriptor() ([]byte, []int) {\n\treturn file_protodef_user_user_proto_rawDescGZIP(), []int{3}\n}", "func (*RefreshToken) Descriptor() ([]byte, []int) {\n\treturn file_proto_fandncloud_service_user_user_proto_rawDescGZIP(), []int{8}\n}", "func (*User) Descriptor() ([]byte, []int) {\n\treturn file_user_service_proto_rawDescGZIP(), []int{6}\n}", "func (*User) Descriptor() ([]byte, []int) {\n\treturn file_proto_user_proto_rawDescGZIP(), []int{0}\n}", "func (*User) Descriptor() ([]byte, []int) {\n\treturn file_proto_user_proto_rawDescGZIP(), []int{0}\n}", "func (*WatchUserRequest) Descriptor() ([]byte, []int) {\n\treturn edgelq_iam_proto_v1alpha_user_service_proto_rawDescGZIP(), []int{5}\n}", "func (*ListUserRequest) Descriptor() ([]byte, []int) {\n\treturn file_proto_user_proto_rawDescGZIP(), []int{6}\n}", "func (*Group) Descriptor() ([]byte, []int) {\n\treturn file_proto_fandncloud_service_user_user_proto_rawDescGZIP(), []int{3}\n}", "func (*User) Descriptor() ([]byte, []int) {\n\treturn file_proto_service_proto_rawDescGZIP(), []int{0}\n}", "func (*User) Descriptor() ([]byte, []int) {\n\treturn file_proto_service_proto_rawDescGZIP(), []int{0}\n}", "func (*UpdateUserRequest) Descriptor() ([]byte, []int) {\n\treturn file_protodef_user_user_proto_rawDescGZIP(), []int{0}\n}", "func (*User) Descriptor() ([]byte, []int) {\n\treturn file_service_account_proto_entities_entities_proto_rawDescGZIP(), []int{0}\n}", "func (*ResposneUser) Descriptor() ([]byte, []int) {\n\treturn file_user_service_proto_rawDescGZIP(), []int{2}\n}", "func (*Role) Descriptor() ([]byte, []int) {\n\treturn file_protodef_user_user_proto_rawDescGZIP(), []int{10}\n}", "func (*ProtoCreateUser) Descriptor() ([]byte, []int) {\n\treturn file_user_proto_rawDescGZIP(), []int{1}\n}", "func (*ReadUserRequest) Descriptor() ([]byte, []int) {\n\treturn file_proto_user_proto_rawDescGZIP(), []int{4}\n}", "func (*DeleteUserRequest) Descriptor() ([]byte, []int) {\n\treturn file_buf_alpha_registry_v1alpha1_user_proto_rawDescGZIP(), []int{13}\n}", "func (*DeleteUserRequest) Descriptor() ([]byte, []int) {\n\treturn file_protob_user_service_proto_rawDescGZIP(), []int{7}\n}", "func (*User) Descriptor() ([]byte, []int) {\n\treturn file_proto_user_user_proto_rawDescGZIP(), []int{0}\n}", "func (*User) Descriptor() ([]byte, []int) {\n\treturn file_proto_user_user_proto_rawDescGZIP(), []int{0}\n}", "func (*PermissionCollection) Descriptor() ([]byte, []int) {\n\treturn file_protodef_user_user_proto_rawDescGZIP(), []int{19}\n}", "func (*DeleteUserRequest) Descriptor() ([]byte, []int) {\n\treturn file_pkg_pb_user_user_proto_rawDescGZIP(), []int{11}\n}", "func (*User) Descriptor() ([]byte, []int) {\n\treturn file_user_message_proto_rawDescGZIP(), []int{0}\n}", "func (*User) Descriptor() ([]byte, []int) {\n\treturn file_users_proto_rawDescGZIP(), []int{6}\n}", "func (*User) Descriptor() ([]byte, []int) {\n\treturn file_API_user_proto_rawDescGZIP(), []int{2}\n}", "func (*DeleteUserReply) Descriptor() ([]byte, []int) {\n\treturn file_user_proto_rawDescGZIP(), []int{8}\n}", "func (*User) Descriptor() ([]byte, []int) {\n\treturn file_pkg_pb_user_user_proto_rawDescGZIP(), []int{0}\n}", "func (*ReportUserRequest) Descriptor() ([]byte, []int) {\n\treturn file_api_protobuf_spec_connection_user_v1_proto_rawDescGZIP(), []int{26}\n}", "func (*DeleteUserRequest) Descriptor() ([]byte, []int) {\n\treturn file_user_user_proto_rawDescGZIP(), []int{9}\n}", "func (*User) Descriptor() ([]byte, []int) {\n\treturn file_proto_account_proto_rawDescGZIP(), []int{3}\n}", "func (*User) Descriptor() ([]byte, []int) {\n\treturn file_pb_auth_proto_rawDescGZIP(), []int{0}\n}", "func (*UserMembershipView) Descriptor() ([]byte, []int) {\n\treturn file_management_proto_rawDescGZIP(), []int{138}\n}", "func (*User) Descriptor() ([]byte, []int) {\n\treturn file_msgdata_proto_rawDescGZIP(), []int{2}\n}", "func (*Membership) Descriptor() ([]byte, []int) {\n\treturn file_odpf_meta_User_proto_rawDescGZIP(), []int{2}\n}", "func (*User) Descriptor() ([]byte, []int) {\n\treturn file_odpf_meta_User_proto_rawDescGZIP(), []int{1}\n}", "func (*UserView) Descriptor() ([]byte, []int) {\n\treturn file_management_proto_rawDescGZIP(), []int{15}\n}", "func (*UpdateUserRequest) Descriptor() ([]byte, []int) {\n\treturn file_grpc_user_proto_rawDescGZIP(), []int{15}\n}", "func (*IgnoreUserRequest) Descriptor() ([]byte, []int) {\n\treturn file_api_protobuf_spec_connection_user_v1_proto_rawDescGZIP(), []int{9}\n}", "func (*PermissionData) Descriptor() ([]byte, []int) {\n\treturn file_protodef_user_user_proto_rawDescGZIP(), []int{20}\n}", "func (*RemoveTeamUser) Descriptor() ([]byte, []int) {\n\treturn file_uac_Team_proto_rawDescGZIP(), []int{9}\n}", "func (*User) Descriptor() ([]byte, []int) {\n\treturn file_userapi_proto_rawDescGZIP(), []int{1}\n}", "func (*DeleteUserRequest) Descriptor() ([]byte, []int) {\n\treturn file_user_proto_rawDescGZIP(), []int{11}\n}", "func (*DeleteUserRequest) Descriptor() ([]byte, []int) {\n\treturn file_server_pb_UserService_proto_rawDescGZIP(), []int{7}\n}", "func (*User) Descriptor() ([]byte, []int) {\n\treturn file_buf_alpha_registry_v1alpha1_user_proto_rawDescGZIP(), []int{0}\n}", "func (*DeleteUserReply) Descriptor() ([]byte, []int) {\n\treturn file_api_user_v1_user_proto_rawDescGZIP(), []int{5}\n}", "func (*UpdateUserRequest) Descriptor() ([]byte, []int) {\n\treturn file_pkg_pb_user_user_proto_rawDescGZIP(), []int{9}\n}", "func (*User) Descriptor() ([]byte, []int) {\n\treturn file_userpb_user_proto_rawDescGZIP(), []int{0}\n}", "func (*User) Descriptor() ([]byte, []int) {\n\treturn file_user_common_proto_rawDescGZIP(), []int{2}\n}", "func (*ExistingUserRelationships) Descriptor() ([]byte, []int) {\n\treturn file_protodef_user_user_proto_rawDescGZIP(), []int{6}\n}", "func (*User) Descriptor() ([]byte, []int) {\n\treturn file_management_proto_rawDescGZIP(), []int{12}\n}", "func (*NewUserRelationships) Descriptor() ([]byte, []int) {\n\treturn file_protodef_user_user_proto_rawDescGZIP(), []int{7}\n}", "func (*ListUserRequest) Descriptor() ([]byte, []int) {\n\treturn file_api_user_v1_user_proto_rawDescGZIP(), []int{8}\n}", "func (*DeleteUserReq) Descriptor() ([]byte, []int) {\n\treturn file_user_proto_rawDescGZIP(), []int{7}\n}", "func (*UserView) Descriptor() ([]byte, []int) {\n\treturn file_management_proto_rawDescGZIP(), []int{13}\n}", "func (*UserRequest) Descriptor() ([]byte, []int) {\n\treturn file_grpcuser_proto_user_proto_rawDescGZIP(), []int{0}\n}", "func (*ProtoUserSchema) Descriptor() ([]byte, []int) {\n\treturn file_user_proto_rawDescGZIP(), []int{0}\n}", "func (*DeleteUserRequest) Descriptor() ([]byte, []int) {\n\treturn file_api_user_v1_user_proto_rawDescGZIP(), []int{4}\n}", "func (*User) Descriptor() ([]byte, []int) {\n\treturn file_users_proto_rawDescGZIP(), []int{0}\n}", "func (*User) Descriptor() ([]byte, []int) {\n\treturn file_users_proto_rawDescGZIP(), []int{0}\n}", "func (*RemoveUserServerScopeRequest) Descriptor() ([]byte, []int) {\n\treturn file_buf_alpha_registry_v1alpha1_user_proto_rawDescGZIP(), []int{29}\n}", "func (*DisconnectUsersRequest) Descriptor() ([]byte, []int) {\n\treturn file_api_protobuf_spec_connection_user_v1_proto_rawDescGZIP(), []int{18}\n}", "func (*GetUserInfoByTokenRequest) Descriptor() ([]byte, []int) {\n\treturn file_proto_user_user_proto_rawDescGZIP(), []int{12}\n}", "func (*ListUserReply) Descriptor() ([]byte, []int) {\n\treturn file_v1_user_proto_rawDescGZIP(), []int{5}\n}", "func (*DeleteUserRequest) Descriptor() ([]byte, []int) {\n\treturn edgelq_iam_proto_v1alpha_user_service_proto_rawDescGZIP(), []int{11}\n}", "func (*CreatePermissionRequest) Descriptor() ([]byte, []int) {\n\treturn file_protodef_user_user_proto_rawDescGZIP(), []int{17}\n}", "func (*User) Descriptor() ([]byte, []int) {\n\treturn file_protos_users_proto_rawDescGZIP(), []int{0}\n}", "func (*User) Descriptor() ([]byte, []int) {\n\treturn file_user_user_proto_rawDescGZIP(), []int{1}\n}", "func (*DeleteUserRequest) Descriptor() ([]byte, []int) {\n\treturn file_user_service_proto_rawDescGZIP(), []int{1}\n}", "func (*User) Descriptor() ([]byte, []int) {\n\treturn file_chat_proto_rawDescGZIP(), []int{2}\n}", "func (*RefreshUserRequest) Descriptor() ([]byte, []int) {\n\treturn file_directory_proto_rawDescGZIP(), []int{2}\n}", "func (*ListUserReply) Descriptor() ([]byte, []int) {\n\treturn file_api_user_v1_user_proto_rawDescGZIP(), []int{9}\n}", "func (*UpdateUserRequest) Descriptor() ([]byte, []int) {\n\treturn file_user_user_proto_rawDescGZIP(), []int{7}\n}", "func (*Token) Descriptor() ([]byte, []int) {\n\treturn file_proto_user_user_proto_rawDescGZIP(), []int{3}\n}", "func (*User) Descriptor() ([]byte, []int) {\n\treturn file_example_proto_rawDescGZIP(), []int{4}\n}", "func (*UserRequest) Descriptor() ([]byte, []int) {\n\treturn file_myapp_user_proto_rawDescGZIP(), []int{1}\n}", "func (*DeleteUserResponse) Descriptor() ([]byte, []int) {\n\treturn file_buf_alpha_registry_v1alpha1_user_proto_rawDescGZIP(), []int{14}\n}", "func (*User) Descriptor() ([]byte, []int) {\n\treturn file_user_copy_proto_rawDescGZIP(), []int{0}\n}", "func (*CreateUserRequest) Descriptor() ([]byte, []int) {\n\treturn file_protodef_user_user_proto_rawDescGZIP(), []int{1}\n}", "func (*User) Descriptor() ([]byte, []int) {\n\treturn file_types_v1_user_proto_rawDescGZIP(), []int{0}\n}", "func (*AddUserRequest) Descriptor() ([]byte, []int) {\n\treturn file_proto_user_user_proto_rawDescGZIP(), []int{8}\n}", "func (*User) Descriptor() ([]byte, []int) {\n\treturn file_message_proto_rawDescGZIP(), []int{1}\n}", "func (*AddUserServerScopeRequest) Descriptor() ([]byte, []int) {\n\treturn file_buf_alpha_registry_v1alpha1_user_proto_rawDescGZIP(), []int{25}\n}" ]
[ "0.70660174", "0.69403785", "0.678979", "0.6760362", "0.6759122", "0.6756026", "0.67284197", "0.66994125", "0.6697244", "0.66709644", "0.66658753", "0.66588396", "0.6652014", "0.66486746", "0.6647983", "0.66478443", "0.6627967", "0.66196585", "0.6619095", "0.6616627", "0.6610968", "0.66026646", "0.66026646", "0.65938795", "0.6592791", "0.6591028", "0.6583832", "0.6583832", "0.65836805", "0.6577094", "0.6573564", "0.65719664", "0.6554503", "0.65538824", "0.65530443", "0.65526533", "0.6552273", "0.6552273", "0.65481395", "0.65449834", "0.65388423", "0.6538112", "0.6536575", "0.6534424", "0.65321654", "0.65234524", "0.65227264", "0.65202653", "0.6517872", "0.65135115", "0.65128225", "0.65110266", "0.6507021", "0.6505639", "0.6505104", "0.6504396", "0.65006083", "0.6499123", "0.64985377", "0.6494872", "0.6493451", "0.64932775", "0.6492185", "0.6486763", "0.64859766", "0.64846027", "0.6483458", "0.6481991", "0.64813703", "0.64792055", "0.6477792", "0.6477105", "0.64738494", "0.64721316", "0.64710987", "0.6469733", "0.6469733", "0.6467415", "0.6466096", "0.6464", "0.6462982", "0.64612913", "0.6460767", "0.6460081", "0.64596456", "0.64548415", "0.6453026", "0.6447965", "0.64468855", "0.6446425", "0.64443165", "0.64419585", "0.6439751", "0.6439702", "0.64394194", "0.64381593", "0.64363676", "0.64348304", "0.6429857", "0.6428909" ]
0.68749994
2
Deprecated: Use Replication_UserManaged_Replica.ProtoReflect.Descriptor instead.
func (*Replication_UserManaged_Replica) Descriptor() ([]byte, []int) { return file_google_cloud_secrets_v1beta1_resources_proto_rawDescGZIP(), []int{2, 1, 0} }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (*ReplicaInfo) Descriptor() ([]byte, []int) {\n\treturn file_google_spanner_admin_instance_v1_spanner_instance_admin_proto_rawDescGZIP(), []int{0}\n}", "func (ReplicaInfo_ReplicaType) EnumDescriptor() ([]byte, []int) {\n\treturn file_google_spanner_admin_instance_v1_spanner_instance_admin_proto_rawDescGZIP(), []int{0, 0}\n}", "func (*GetReplicaVisibleLengthResponseProto) Descriptor() ([]byte, []int) {\n\treturn file_ClientDatanodeProtocol_proto_rawDescGZIP(), []int{1}\n}", "func (*Deprecation) Descriptor() ([]byte, []int) {\n\treturn file_external_cfgmgmt_response_nodes_proto_rawDescGZIP(), []int{8}\n}", "func (*GetDelegationTokenResponseProto) Descriptor() ([]byte, []int) {\n\treturn file_Security_proto_rawDescGZIP(), []int{4}\n}", "func (*Replication_UserManaged) Descriptor() ([]byte, []int) {\n\treturn file_google_cloud_secrets_v1beta1_resources_proto_rawDescGZIP(), []int{2, 1}\n}", "func (*McUserProto) Descriptor() ([]byte, []int) {\n\treturn file_pkg_mcclient_mcuser_mcuser_proto_proto_rawDescGZIP(), []int{0}\n}", "func (*DatabaseInstance_SqlFailoverReplica) Descriptor() ([]byte, []int) {\n\treturn file_google_cloud_sql_v1beta4_cloud_sql_resources_proto_rawDescGZIP(), []int{14, 0}\n}", "func (*LoginedUserRep) Descriptor() ([]byte, []int) {\n\treturn file_proto_user_proto_rawDescGZIP(), []int{3}\n}", "func (*GetReplicaVisibleLengthRequestProto) Descriptor() ([]byte, []int) {\n\treturn file_ClientDatanodeProtocol_proto_rawDescGZIP(), []int{0}\n}", "func (*UserRep) Descriptor() ([]byte, []int) {\n\treturn file_proto_user_proto_rawDescGZIP(), []int{1}\n}", "func (*RenewDelegationTokenResponseProto) Descriptor() ([]byte, []int) {\n\treturn file_Security_proto_rawDescGZIP(), []int{6}\n}", "func (*NewEpoch_RemoteEpochChange) Descriptor() ([]byte, []int) {\n\treturn file_msgs_msgs_proto_rawDescGZIP(), []int{25, 0}\n}", "func (*PatchCollectorsRequest) Descriptor() ([]byte, []int) {\n\treturn file_proto_clarifai_api_service_proto_rawDescGZIP(), []int{161}\n}", "func (*RelationTupleDelta) Descriptor() ([]byte, []int) {\n\treturn file_ory_keto_acl_v1alpha1_write_service_proto_rawDescGZIP(), []int{1}\n}", "func (*DisassociatePrimaryUserRequest) Descriptor() ([]byte, []int) {\n\treturn file_token_proto_rawDescGZIP(), []int{8}\n}", "func (x *fastReflection_LightClientAttackEvidence) Descriptor() protoreflect.MessageDescriptor {\n\treturn md_LightClientAttackEvidence\n}", "func (*Permission) Descriptor() ([]byte, []int) {\n\treturn file_proto_fandncloud_service_user_user_proto_rawDescGZIP(), []int{6}\n}", "func (*UserModifyResp) Descriptor() ([]byte, []int) {\n\treturn file_api_interface_v1_user_proto_rawDescGZIP(), []int{1}\n}", "func (*ReplicaConfiguration) Descriptor() ([]byte, []int) {\n\treturn file_google_cloud_sql_v1beta4_cloud_sql_resources_proto_rawDescGZIP(), []int{52}\n}", "func (*ApnResourceInternal) Descriptor() ([]byte, []int) {\n\treturn file_lte_cloud_go_services_subscriberdb_protos_subscriberdb_proto_rawDescGZIP(), []int{11}\n}", "func (*DemoteMasterMySqlReplicaConfiguration) Descriptor() ([]byte, []int) {\n\treturn file_google_cloud_sql_v1beta4_cloud_sql_resources_proto_rawDescGZIP(), []int{18}\n}", "func (*DeleteUserReply) Descriptor() ([]byte, []int) {\n\treturn file_user_proto_rawDescGZIP(), []int{8}\n}", "func (*DemoteMasterMySqlReplicaConfiguration) Descriptor() ([]byte, []int) {\n\treturn file_google_cloud_sql_v1_cloud_sql_resources_proto_rawDescGZIP(), []int{12}\n}", "func (*PermissionCollection) Descriptor() ([]byte, []int) {\n\treturn file_protodef_user_user_proto_rawDescGZIP(), []int{19}\n}", "func (*GetDatanodeInfoResponseProto) Descriptor() ([]byte, []int) {\n\treturn file_ClientDatanodeProtocol_proto_rawDescGZIP(), []int{13}\n}", "func (*MonitoredResourceDescriptor) Descriptor() ([]byte, []int) {\n\treturn edgelq_monitoring_proto_v3_monitored_resource_descriptor_proto_rawDescGZIP(), []int{0}\n}", "func (*UserSlice) Descriptor() ([]byte, []int) {\n\treturn file_proto_service_proto_rawDescGZIP(), []int{2}\n}", "func (*Access) Descriptor() ([]byte, []int) {\n\treturn file_yandex_cloud_mdb_clickhouse_v1_cluster_proto_rawDescGZIP(), []int{9}\n}", "func (*User) Descriptor() ([]byte, []int) {\n\treturn file_proto_indrasaputra_orvosi_v1_orvosi_proto_rawDescGZIP(), []int{2}\n}", "func (*NetMessageSplitscreenUserChanged) Descriptor() ([]byte, []int) {\n\treturn file_artifact_networksystem_protomessages_proto_rawDescGZIP(), []int{0}\n}", "func (*Device) Descriptor() ([]byte, []int) {\n\treturn file_incus_migration_migrate_proto_rawDescGZIP(), []int{2}\n}", "func (*Permission) Descriptor() ([]byte, []int) {\n\treturn file_protodef_user_user_proto_rawDescGZIP(), []int{18}\n}", "func (*ApiWarning) Descriptor() ([]byte, []int) {\n\treturn file_google_cloud_sql_v1_cloud_sql_resources_proto_rawDescGZIP(), []int{1}\n}", "func (*RecoveryCode) Descriptor() ([]byte, []int) {\n\treturn file_grpc_user_proto_rawDescGZIP(), []int{17}\n}", "func (*TrustedDelegation) Descriptor() ([]byte, []int) {\n\treturn file_go_chromium_org_luci_swarming_proto_config_pools_proto_rawDescGZIP(), []int{3}\n}", "func (*DisassociatePrimaryUserResponse) Descriptor() ([]byte, []int) {\n\treturn file_token_proto_rawDescGZIP(), []int{9}\n}", "func (*WinRMListener) Descriptor() ([]byte, []int) {\n\treturn file_moc_common_computecommon_proto_rawDescGZIP(), []int{0}\n}", "func (*CancelDelegationTokenResponseProto) Descriptor() ([]byte, []int) {\n\treturn file_Security_proto_rawDescGZIP(), []int{8}\n}", "func ProtoReplicaDescFactory() proto.Message {\n\treturn NewReplicaDesc()\n}", "func (*RefreshNamenodesResponseProto) Descriptor() ([]byte, []int) {\n\treturn file_ClientDatanodeProtocol_proto_rawDescGZIP(), []int{3}\n}", "func (*Persistent) Descriptor() ([]byte, []int) {\n\treturn file_msgs_msgs_proto_rawDescGZIP(), []int{2}\n}", "func (*PatchCollaboratorsRequest) Descriptor() ([]byte, []int) {\n\treturn file_proto_clarifai_api_service_proto_rawDescGZIP(), []int{21}\n}", "func (*RaftClientReplyProto) Descriptor() ([]byte, []int) {\n\treturn file_raft_proto_rawDescGZIP(), []int{31}\n}", "func (*ApiWarning) Descriptor() ([]byte, []int) {\n\treturn file_google_cloud_sql_v1beta4_cloud_sql_resources_proto_rawDescGZIP(), []int{1}\n}", "func (*DetachMetadataRequest) Descriptor() ([]byte, []int) {\n\treturn file_go_chromium_org_luci_cipd_api_cipd_v1_repo_proto_rawDescGZIP(), []int{22}\n}", "func (*DeleteUserReply) Descriptor() ([]byte, []int) {\n\treturn file_api_user_v1_user_proto_rawDescGZIP(), []int{5}\n}", "func (*UserCollection) Descriptor() ([]byte, []int) {\n\treturn file_protodef_user_user_proto_rawDescGZIP(), []int{3}\n}", "func (*Preferences) Descriptor() ([]byte, []int) {\n\treturn file_google_ads_googleads_v2_services_reach_plan_service_proto_rawDescGZIP(), []int{8}\n}", "func (*NewCollectionReply) Descriptor() ([]byte, []int) {\n\treturn file_threads_proto_rawDescGZIP(), []int{14}\n}", "func (*Discovery) Descriptor() ([]byte, []int) {\n\treturn file_raft_proto_rawDescGZIP(), []int{0}\n}", "func (*Reconfiguration_NewClient) Descriptor() ([]byte, []int) {\n\treturn file_msgs_msgs_proto_rawDescGZIP(), []int{1, 0}\n}", "func (*UpdateRemoteMirrorRequest) Descriptor() ([]byte, []int) {\n\treturn file_remote_proto_rawDescGZIP(), []int{0}\n}", "func (*ExistingUserRelationships) Descriptor() ([]byte, []int) {\n\treturn file_protodef_user_user_proto_rawDescGZIP(), []int{6}\n}", "func (*MySqlReplicaConfiguration) Descriptor() ([]byte, []int) {\n\treturn file_google_cloud_sql_v1beta4_cloud_sql_resources_proto_rawDescGZIP(), []int{43}\n}", "func (*DiskBalancerSettingResponseProto) Descriptor() ([]byte, []int) {\n\treturn file_ClientDatanodeProtocol_proto_rawDescGZIP(), []int{27}\n}", "func (*Group) Descriptor() ([]byte, []int) {\n\treturn file_proto_fandncloud_service_user_user_proto_rawDescGZIP(), []int{3}\n}", "func (*PlmnIdentity) Descriptor() ([]byte, []int) {\n\treturn file_e2sm_mho_go_v2_e2sm_v2_proto_rawDescGZIP(), []int{76}\n}", "func (*RefreshToken) Descriptor() ([]byte, []int) {\n\treturn file_proto_fandncloud_service_user_user_proto_rawDescGZIP(), []int{8}\n}", "func (*FoldingRangeClientCapabilities) Descriptor() ([]byte, []int) {\n\treturn file_protocol_rpc_rpc_proto_rawDescGZIP(), []int{196}\n}", "func (*UserView) Descriptor() ([]byte, []int) {\n\treturn file_management_proto_rawDescGZIP(), []int{13}\n}", "func (*DiagnoseRuntimeRequest) Descriptor() ([]byte, []int) {\n\treturn file_google_cloud_notebooks_v1_managed_service_proto_rawDescGZIP(), []int{14}\n}", "func (*UserView) Descriptor() ([]byte, []int) {\n\treturn file_management_proto_rawDescGZIP(), []int{15}\n}", "func (*MemberReceiveAddressDeleteResp) Descriptor() ([]byte, []int) {\n\treturn file_ums_proto_rawDescGZIP(), []int{89}\n}", "func (*UserMembershipView) Descriptor() ([]byte, []int) {\n\treturn file_management_proto_rawDescGZIP(), []int{138}\n}", "func (*UpdatePermissionRequest) Descriptor() ([]byte, []int) {\n\treturn file_protodef_user_user_proto_rawDescGZIP(), []int{16}\n}", "func (*Role) Descriptor() ([]byte, []int) {\n\treturn file_protodef_user_user_proto_rawDescGZIP(), []int{10}\n}", "func (*GetDelegationTokenRequestProto) Descriptor() ([]byte, []int) {\n\treturn file_Security_proto_rawDescGZIP(), []int{3}\n}", "func (*Example) Descriptor() ([]byte, []int) {\n\treturn file_google_ai_generativelanguage_v1beta2_discuss_service_proto_rawDescGZIP(), []int{4}\n}", "func (*UpdateCollectionReply) Descriptor() ([]byte, []int) {\n\treturn file_threads_proto_rawDescGZIP(), []int{16}\n}", "func (*SemanticTokensDelta) Descriptor() ([]byte, []int) {\n\treturn file_protocol_rpc_rpc_proto_rawDescGZIP(), []int{223}\n}", "func (*DisconnectedRequest) Descriptor() ([]byte, []int) {\n\treturn file_vm_vm_proto_rawDescGZIP(), []int{29}\n}", "func (*UpgradeRuntimeRequest) Descriptor() ([]byte, []int) {\n\treturn file_google_cloud_notebooks_v1_managed_service_proto_rawDescGZIP(), []int{9}\n}", "func (*ClientUpdateMessagesDeleted) Descriptor() ([]byte, []int) {\n\treturn file_client_updates_proto_rawDescGZIP(), []int{1}\n}", "func (*ResourceOptions) Descriptor() ([]byte, []int) {\n\treturn file_google_cloud_gkehub_v1_membership_proto_rawDescGZIP(), []int{3}\n}", "func (x *fastReflection_ServiceCommandDescriptor) Descriptor() protoreflect.MessageDescriptor {\n\treturn md_ServiceCommandDescriptor\n}", "func (*MySqlReplicaConfiguration) Descriptor() ([]byte, []int) {\n\treturn file_google_cloud_sql_v1_cloud_sql_resources_proto_rawDescGZIP(), []int{20}\n}", "func (*UpdateDeleteDisconnectedServicesConfigRes) Descriptor() ([]byte, []int) {\n\treturn file_external_applications_applications_proto_rawDescGZIP(), []int{18}\n}", "func (*DisableVolumeReplicationRequest) Descriptor() ([]byte, []int) {\n\treturn file_replication_proto_rawDescGZIP(), []int{2}\n}", "func (*UpdateRemoteMirrorRequest_Remote) Descriptor() ([]byte, []int) {\n\treturn file_remote_proto_rawDescGZIP(), []int{0, 0}\n}", "func (*OnboardUserPreferences) Descriptor() ([]byte, []int) {\n\treturn file_teleport_userpreferences_v1_onboard_proto_rawDescGZIP(), []int{0}\n}", "func (*MemberDeleteResp) Descriptor() ([]byte, []int) {\n\treturn file_ums_proto_rawDescGZIP(), []int{8}\n}", "func (*User) Descriptor() ([]byte, []int) {\n\treturn file_example_proto_rawDescGZIP(), []int{4}\n}", "func (*MemberReceiveAddressUpdateResp) Descriptor() ([]byte, []int) {\n\treturn file_ums_proto_rawDescGZIP(), []int{87}\n}", "func (*NewUserRelationships) Descriptor() ([]byte, []int) {\n\treturn file_protodef_user_user_proto_rawDescGZIP(), []int{7}\n}", "func (*DiagOperation) Descriptor() ([]byte, []int) {\n\treturn file_testvector_tv_proto_rawDescGZIP(), []int{10}\n}", "func (*UpdateClusterMetadata) Descriptor() ([]byte, []int) {\n\treturn file_yandex_cloud_mdb_greenplum_v1_cluster_service_proto_rawDescGZIP(), []int{6}\n}", "func (*UserOwnedGrafeasNote) Descriptor() ([]byte, []int) {\n\treturn file_google_cloud_binaryauthorization_v1_resources_proto_rawDescGZIP(), []int{4}\n}", "func (*DeleteLeaderboardRecordRequest) Descriptor() ([]byte, []int) {\n\treturn file_api_proto_rawDescGZIP(), []int{31}\n}", "func (*User) Descriptor() ([]byte, []int) {\n\treturn file_proto_service_proto_rawDescGZIP(), []int{0}\n}", "func (*User) Descriptor() ([]byte, []int) {\n\treturn file_proto_service_proto_rawDescGZIP(), []int{0}\n}", "func (*ReadTensorboardUsageResponse_PerUserUsageData) Descriptor() ([]byte, []int) {\n\treturn file_google_cloud_aiplatform_v1_tensorboard_service_proto_rawDescGZIP(), []int{7, 0}\n}", "func (*RoleCollection) Descriptor() ([]byte, []int) {\n\treturn file_protodef_user_user_proto_rawDescGZIP(), []int{11}\n}", "func (*RevokeJobRequest) Descriptor() ([]byte, []int) {\n\treturn file_pkg_noderpc_proto_feeds_manager_proto_rawDescGZIP(), []int{20}\n}", "func (*Replication) Descriptor() ([]byte, []int) {\n\treturn file_google_cloud_secrets_v1beta1_resources_proto_rawDescGZIP(), []int{2}\n}", "func (*Friend) Descriptor() ([]byte, []int) {\n\treturn file_grpc_user_proto_rawDescGZIP(), []int{0}\n}", "func (*RoleInfoProto) Descriptor() ([]byte, []int) {\n\treturn file_raft_proto_rawDescGZIP(), []int{42}\n}", "func (*NewDBReply) Descriptor() ([]byte, []int) {\n\treturn file_threads_proto_rawDescGZIP(), []int{6}\n}", "func (*CMsgRefreshPartnerAccountLink) Descriptor() ([]byte, []int) {\n\treturn file_dota_gcmessages_client_proto_rawDescGZIP(), []int{65}\n}", "func (*Disconnect) Descriptor() ([]byte, []int) {\n\treturn file_uni_proto_rawDescGZIP(), []int{11}\n}" ]
[ "0.6919288", "0.67240775", "0.66489303", "0.6557044", "0.63410616", "0.63238233", "0.6313102", "0.6294632", "0.62538755", "0.6247766", "0.6238576", "0.6229779", "0.6208036", "0.6207874", "0.6207713", "0.61889184", "0.6183292", "0.61792266", "0.6175798", "0.61689883", "0.6168818", "0.61660755", "0.6166073", "0.6163629", "0.61621964", "0.61563164", "0.6153408", "0.61504465", "0.6148748", "0.6142117", "0.61411554", "0.61390424", "0.6129949", "0.61281306", "0.6124024", "0.61229783", "0.6117544", "0.61161005", "0.6112837", "0.61103946", "0.61098486", "0.6109222", "0.6103514", "0.60910195", "0.6090219", "0.6085658", "0.60835785", "0.60738796", "0.6070584", "0.60702056", "0.60659045", "0.6061747", "0.60555553", "0.60549176", "0.6047986", "0.6047607", "0.6044324", "0.6041708", "0.60392743", "0.6036991", "0.6031976", "0.60309076", "0.6030333", "0.6028857", "0.60268193", "0.60233814", "0.60213923", "0.60207", "0.6019716", "0.60182947", "0.601744", "0.60174376", "0.6016074", "0.6015444", "0.6013186", "0.6012296", "0.6012027", "0.60109776", "0.60099494", "0.6009118", "0.6008294", "0.6007781", "0.6007532", "0.6003853", "0.6003819", "0.600204", "0.60016435", "0.59999853", "0.59993255", "0.59969753", "0.59969753", "0.5996619", "0.5989473", "0.59893215", "0.5988759", "0.598605", "0.5985642", "0.59850556", "0.59848326", "0.59845996" ]
0.71663374
0
Must be called as soon as the program starts to initialize Args
func getArguments() { // define pointers to the arguments which will be filled up when flag.Parse() is called langFlag := flag.String("l", string(auto), "Which language to use. Args are: lua | wren | moon | auto") dirFlag := flag.String("d", ".", "The directory containing the main file and the subfiles") outFlag := flag.String("o", "out", "The output file (sans extension)") watchFlag := flag.Bool("w", false, "Whether to enable Watch mode, which automatically recompiles if a file has changed in the directory") definesFlag := flag.String("D", "", "Used to pass in defines before compiling. Format is -D \"var1=value;var2=value;var3=value\"") // begin parsing the flags flag.Parse() // these setup functions have to be performed in this particular order // because they depend on certain fields of Args to be set when they are called _setDir(*dirFlag) _setLanguage(*langFlag) _setOutputFile(*outFlag) _setDefines(*definesFlag) Args.watchMode = *watchFlag // this gives all the non-flag command line args Args.positional = flag.Args() }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func argsInit() {\n\tArgs = make([]string, 0, 0)\n\tArgs = append(Args, os.Args...)\n\tExecFile = options.GetExecFileByPid(os.Getpid())\n\t// default opt Parser\n\t// do not include ExecFile\n\topts = options.NewOptParser(Args[1:])\n\tArgLine = options.ArgsToSpLine(Args)\n\tArgFullLine = options.CleanArgLine(os.Args[0] + \" \" + opts.String())\n\t//\n}", "func initArgs(){\n\t//master -config ./master.json\n\tflag.StringVar(&confFile, \"config\", \"./master.json\", \"specify master.json as config file\")\n\tflag.Parse()\n}", "func InitArgs(args ...string) func(*LinuxFactory) error {\n\treturn func(l *LinuxFactory) error {\n\t\tname := args[0]\n\t\tif filepath.Base(name) == name {\n\t\t\tif lp, err := exec.LookPath(name); err == nil {\n\t\t\t\tname = lp\n\t\t\t}\n\t\t} else {\n\t\t\tabs, err := filepath.Abs(name)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tname = abs\n\t\t}\n\t\tl.InitPath = \"/proc/self/exe\"\n\t\tl.InitArgs = append([]string{name}, args[1:]...)\n\t\treturn nil\n\t}\n}", "func Args() []string { return CommandLine.args }", "func argInit() args {\n\n\tvar a args\n\tflag.Float64Var(&a.x1, \"x1\", -2.0, \"left position of real axis\")\n\tflag.Float64Var(&a.x2, \"x2\", 1.0, \"right position of real axis\")\n\tflag.Float64Var(&a.y1, \"y1\", -1.5, \"down position of imaginary axis\")\n\tflag.Float64Var(&a.y2, \"y2\", 1.5, \"up position of imaginary axis\")\n\tflag.Float64Var(&a.threshold, \"th\", 4.0, \"squared threshold of the function\")\n\tflag.IntVar(&a.w, \"w\", 1000, \"width in pixels of the image\")\n\tflag.IntVar(&a.h, \"h\", 1000, \"height in pixels of the image\")\n\tflag.IntVar(&a.nIter, \"ni\", 100, \"maximum number of iterations for pixel\")\n\tflag.IntVar(&a.nRoutines, \"nr\", 4, \"number of go routines to be used\")\n\tflag.StringVar(&a.path, \"p\", \"./\", \"path to the generated png image\")\n\n\tflag.Parse()\n\treturn a\n}", "func InitArgs(cliArgs cli.Args) (*Args, error) {\n\targs := Args{Pattern: cliArgs.Get(0)}\n\tpath, err := filepath.Abs(cliArgs.Get(1))\n\tif err != nil {\n\t\treturn nil, errors.Wrap(err, path)\n\t}\n\targs.Path = path\n\treturn &args, nil\n}", "func Args() []string\t{ return os.Args[flags.first_arg:len(os.Args)] }", "func init() {\n\tprepareOptionsFromCommandline(&configFromInit)\n\tparseConfigFromEnvironment(&configFromInit)\n}", "func process_arguments() {\n\tfmt.Println(\"Processing arguments\")\n\tflag.Parse()\n}", "func (t *Terraform) initArgs(p types.ProviderType, cfg map[string]interface{}, clusterDir string) []string {\n\targs := make([]string, 0)\n\n\tvarsFile := filepath.Join(clusterDir, tfVarsFile)\n\n\targs = append(args, fmt.Sprintf(\"-var-file=%s\", varsFile), clusterDir)\n\n\treturn args\n}", "func (o PgbenchSpecOutput) InitArgs() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v PgbenchSpec) *string { return v.InitArgs }).(pulumi.StringPtrOutput)\n}", "func parseArgs() {\n\tflag.StringVar(&cmdline.Config, \"config\", cmdline.Config, \"Path to configutation file\")\n\tflag.StringVar(&cmdline.Host, \"host\", cmdline.Host, \"Host IP or name to bind to\")\n\tflag.IntVar(&cmdline.Port, \"port\", cmdline.Port, \"Port to bind to\")\n\tflag.StringVar(&cmdline.Maildirs, \"maildirs\", cmdline.Maildirs, \"Path to the top level of the user Maildirs\")\n\tflag.StringVar(&cmdline.Logfile, \"log\", cmdline.Logfile, \"Path to logfile\")\n\tflag.BoolVar(&cmdline.Debug, \"debug\", cmdline.Debug, \"Log debugging information\")\n\n\tflag.Parse()\n}", "func main_init()", "func init() {\n\tflag.StringVar(&KubectlPath, \"kubectl-path\", \"\", \"Path to the kubectl binary\")\n\tflag.StringVar(&ClusterctlPath, \"clusterctl-path\", \"\", \"Path to the clusterctl binary\")\n\tflag.StringVar(&DumpPath, \"dump-path\", \"\", \"Path to the kubevirt artifacts dump cmd binary\")\n\tflag.StringVar(&WorkingDir, \"working-dir\", \"\", \"Path used for e2e test files\")\n}", "func init() {\n\tenv = flag.String(\"env\", \"development\", \"current environment\")\n\tport = flag.Int(\"port\", 3000, \"port number\")\n}", "func Main(args ...interface{}) {\n\n\t// ...\n}", "func init() {\n\tusage = fmt.Sprintf(\"Usage: %s config.toml\", os.Args[0])\n}", "func init() {\n\t\n\ta := len(os.Args)\n\n\tif a > 1 {\n\t\tN, _ = strconv.Atoi(os.Args[1]) \t\t// Number of goroutines\n\t\tif a > 2 {\n\t\t\tR, _ = strconv.Atoi(os.Args[2])\t\t// Number of rounds\n\t\t\tif a > 3 {\n\t\t\t\tF, _ = strconv.Atoi(os.Args[3]) // Number of flows \n\t\t\t}\n\t\t}\n\t}\n\n\tif N < 1 { // 0 deadlocks.\n\t\tN = 1000\n\t}\n\n\tif R < 1 {\n\t\tR = 1\n\t}\n\n\tif F > N {\n\t\tF = N\n\t}\n}", "func init() {\n\thostPtr := flag.String(\"host\", \"localhost\", \"ip of host\")\n\tportPtr := flag.String(\"port\", \"12345\", \"port on which to run server\")\n\tflag.Parse()\n\thost = *hostPtr\n\tport = *portPtr\n}", "func init() {\n\tconst (\n\t\tmemMaxStr = \"--mem.max=\"\n\t\tmemMinStr = \"--mem.min=\"\n\t)\n\tfor _, item := range os.Args {\n\t\tif strings.HasPrefix(item, memMaxStr) {\n\t\t\tn := ParseByteNumber(item[len(memMaxStr):])\n\t\t\tif n <= 0 {\n\t\t\t\tpanic(\"cmd flags error:\" + item)\n\t\t\t}\n\t\t\tmaxMem = n\n\t\t} else if strings.HasPrefix(item, memMinStr) {\n\t\t\tn := ParseByteNumber(item[len(memMinStr):])\n\t\t\tif n <= 0 {\n\t\t\t\tpanic(\"cmd flags error:\" + item)\n\t\t\t}\n\t\t\tminMem = n\n\t\t}\n\t}\n\tif maxMem == 0 {\n\t\tmaxMem = 32 * 1024 * 1024 * 1024\n\t}\n\tif minMem == 0 {\n\t\tminMem = 512 * 1024 * 1024\n\t}\n\tgo checkMemory()\n}", "func init() {\n\tfor _, arg := range os.Args {\n\t\tif flag := strings.TrimLeft(arg, \"-\"); flag == TracingEnabledFlag {\n\t\t\tEnabled = true\n\t\t}\n\t}\n}", "func Start(args []string) {\n\tfmt.Println(args)\n}", "func (a *Analytics) Init() {\n\tflag.IntVar(&a.lowerBound, \"l\", 100, \"lower size of input\")\n\tflag.IntVar(&a.upperBound, \"u\", 10000, \"upper size of input\")\n\tflag.IntVar(&a.step, \"s\", 100, \"step of increasing input size\")\n\tflag.IntVar(&a.repetitions, \"r\", 1000, \"number of repetitions for given input size\")\n\n\tflag.Parse()\n}", "func (s *sysCC) InitArgs() [][]byte {\n\treturn s.initArgs\n}", "func init() {\n\tappCmd.AddCommand(appInstallCmd)\n\tappInstallCmd.Flags().StringVarP(&appInstallVersion, \"version\", \"v\", \"\", \"Specify the version of the contribution (optional)\")\n\tappInstallCmd.Flags().StringVarP(&appInstallName, \"name\", \"n\", \"\", \"The name of the contribution (required)\")\n\tappInstallCmd.Flags().BoolVarP(&appInstallPalette, \"palette\", \"p\", false, \"Install palette file\")\n\tappInstallCmd.MarkFlagRequired(\"name\")\n}", "func init() {\n\tkmerSize = indexCmd.Flags().IntP(\"kmerSize\", \"k\", 31, \"size of k-mer\")\n\tsketchSize = indexCmd.Flags().IntP(\"sketchSize\", \"s\", 21, \"size of MinHash sketch\")\n\twindowSize = indexCmd.Flags().IntP(\"windowSize\", \"w\", 100, \"size of window to sketch graph traversals with\")\n\tnumPart = indexCmd.Flags().IntP(\"numPart\", \"x\", 8, \"number of partitions in the LSH Ensemble\")\n\tmaxK = indexCmd.Flags().IntP(\"maxK\", \"y\", 4, \"maxK in the LSH Ensemble\")\n\tmaxSketchSpan = indexCmd.Flags().Int(\"maxSketchSpan\", 30, \"max number of identical neighbouring sketches permitted in any graph traversal\")\n\tmsaDir = indexCmd.Flags().StringP(\"msaDir\", \"m\", \"\", \"directory containing the clustered references (MSA files) - required\")\n\tindexCmd.MarkFlagRequired(\"msaDir\")\n\tRootCmd.AddCommand(indexCmd)\n}", "func init() {\n\tapp.Parse(os.Args[1:])\n\tif *logging {\n\t\tf, err := os.Create(*logFile)\n\t\tif err != nil {\n\t\t\tpanic(err)\n\t\t}\n\t\tlog.SetFlags(0)\n\t\tlog.SetOutput(f)\n\t} else {\n\t\tdis := ioutil.Discard\n\t\tlog.SetOutput(dis)\n\t}\n\tfmt.Println(\"Game starting\\nPoolsize :\\t\", *poolSize)\n\tfmt.Println(\"Generations :\\t\", *generations)\n\tfmt.Println(\"neuronsPL :\\t\", *neuronsPerLayer)\n}", "func init() {\n\ttrimSwitch = alignCmd.Flags().Bool(\"trim\", false, \"enable quality based trimming of reads (post seeding)\")\n\tminQual = alignCmd.Flags().IntP(\"minQual\", \"q\", 20, \"minimum base quality (used in quality based trimming)\")\n\tminRL = alignCmd.Flags().IntP(\"minRL\", \"l\", 100, \"minimum read length (evaluated post trimming)\")\n\tclip = alignCmd.Flags().IntP(\"clip\", \"c\", 5, \"maximum number of clipped bases allowed during local alignment\")\n\tindexDir = alignCmd.Flags().StringP(\"indexDir\", \"i\", \"\", \"directory containing the index files - required\")\n\tfastq = alignCmd.Flags().StringSliceP(\"fastq\", \"f\", []string{}, \"FASTQ file(s) to align\")\n\tgraphDir = alignCmd.PersistentFlags().StringP(\"graphDir\", \"o\", defaultGraphDir, \"directory to save variation graphs to\")\n\talignCmd.MarkFlagRequired(\"indexDir\")\n\tRootCmd.AddCommand(alignCmd)\n}", "func InitFlags() {\n\n pflag.CommandLine.SetNormalizerFunc(WordSepNormalizeFunc)\n pflag.CommandLine.AddGoFlagSet(goflag.ComamndLine)\n pflag.Parse()\n pflag.VisitAll(func(flag *pflag.Flag)) {\n \tglog.V(2).Infof(\"FLAG: --%s=%q\", flag.Name, flag.Value)\n }\n}", "func init() {\n\tflag.Parse()\n}", "func init() {\n\tflag.Parse()\n}", "func init() {\n\truntime.LockOSThread()\n\tflag.IntVar(&flagDesktopNumber, \"desktop\", -1, \"the index of the desktop to create the main window on\")\n\tflag.StringVar(&flagComponentFile, \"cf\", \"component.json\", \"the name of the component file to load and save\")\n}", "func init() {\n\t// We pass the user variable we declared at the package level (above).\n\t// The \"&\" character means we are passing the variable \"by reference\" (as opposed to \"by value\"),\n\t// meaning: we don't want to pass a copy of the user variable. We want to pass the original variable.\n\tflag.StringVarP(&city, \"weather\", \"w\", \"\", \"get weather by [city,country code] (ex: paris,fr)\")\n\tflag.StringVarP(&user, \"user\", \"u\", \"\", \"Search Github Users\")\n\tflag.StringVarP(&repo, \"repo\", \"r\", \"\", \"Search Github repos by User\\n Usage: cli -u [user name] -r 'y'\\n\")\n\tflag.StringVarP(&movie, \"movie\", \"m\", \"\", \"Search Movies\")\n\t// flag.StringVarP(&genre, \"genre\", \"g\", \"\", \"Search Movie by genre\\n Usage: cli -g {not yet implemented}\\n\")\n\tflag.StringVarP(&news, \"news\", \"n\", \"\", \"Search News by country code (ex: fr, us)\")\n\tflag.StringVarP(&category, \"category\", \"c\", \"\", \"Search News by category\\n Usage: cli -n [ISO 3166-1 alpha-2 country code] -c {one of:}\\n [business entertainment general health science sports technology]\")\n\tflag.StringVarP(&reddit, \"reddit\", \"R\", \"\", \"Search Reddit posts by keyword\")\n\tflag.StringVarP(&com, \"com\", \"C\", \"\", \"Search Reddit comments by postId\\n Usage: cli -R [reddit keyword] -C [postId]\\n\")\n\tflag.StringVarP(&proj, \"project\", \"p\", \"\", \"Create a Node.js micro-service by a name\\n Usage: cli -p [project name]\\n to use in terminal emulator under win env\\n\")\n\tflag.StringVarP(&publi, \"publi\", \"P\", \"\", \"Find scientific publications by search-word\\n Usage: cli -P [search term]\\n\")\n\tflag.StringVarP(&osTool, \"env\", \"e\", \"\", \"Display the env as key/val\")\n\tflag.StringVarP(&docker, \"docker\", \"d\", \"\", \"Docker tool\\n Usage: cli -d [list/l]\\n\")\n\tflag.StringVarP(&x, \"x\", \"x\", \"\", \"Width in chars of displayed ascii images\")\n\tflag.StringVarP(&netw, \"net\", \"N\", \"\", \"List local Network available adresses\")\n\tflag.StringVarP(&ip, \"ip\", \"i\", \"\", \"Remote Network details\")\n\tflag.StringVarP(&img, \"ascii\", \"a\", \"\", \"Display ascii art from local images\")\n\n\tdir, _ := syscall.Getwd()\n\tfmt.Println(\"dossier courant:\", dir)\n\t// project()\n\t// fmt.Println(createProject(\"SANDBOX\"))\n}", "func init() {\n\tflag.StringVar(&port, \"port\", \"8080\", \"port to run the utility on\")\n\tflag.BoolVar(&https, \"https\", false, \"run a HTTPS server\")\n\tflag.StringVar(&cert, \"cert\", \"localhost.crt\", \"certificate file\")\n\tflag.StringVar(&key, \"key\", \"localhost.key\", \"private key file\")\n}", "func (s *BaseGShellListener) EnterArguments(ctx *ArgumentsContext) {}", "func Init() {\n\targs, errs := options.Parse(optMap)\n\n\tif len(errs) != 0 {\n\t\tfmtc.Println(\"Arguments parsing errors:\")\n\n\t\tfor _, err := range errs {\n\t\t\tfmtc.Printf(\" %s\\n\", err.Error())\n\t\t}\n\n\t\tos.Exit(1)\n\t}\n\n\tif options.Has(OPT_COMPLETION) {\n\t\tgenCompletion()\n\t}\n\n\tif options.GetB(OPT_NO_COLOR) {\n\t\tfmtc.DisableColors = true\n\t}\n\n\tif options.GetB(OPT_VER) {\n\t\tshowAbout()\n\t\treturn\n\t}\n\n\tif options.GetB(OPT_HELP) || len(args) == 0 {\n\t\tshowUsage()\n\t\treturn\n\t}\n\n\tswitch len(args) {\n\tcase 1:\n\t\tprocess(args[0], \"\")\n\tcase 2:\n\t\tprocess(args[0], args[1])\n\tdefault:\n\t\tshowUsage()\n\t}\n}", "func init() {\r\n// get the arguments to run the server\r\n\tflag.StringVar(&Host,\"httpserver\",DEFAULT_HOST,\"name of HTTP server\")\r\n\tflag.IntVar(&Port,\"port\",DEFAULT_PORT,\"port number\")\r\n\tflag.StringVar(&UrlPath,\"urlpath\",DEFAULT_URLPATH,\"relative url path\")\r\n\tflag.StringVar(&SecretKey,\"key\",DEFAULT_KEY,\"secret key to terminate program via TCP/UDP port\")\r\n\tflag.BoolVar(&isVerbose,\"verbose\",false,\"enable verbose logging output\")\r\n\tflag.Parse()\r\n\tlogger.Print(\"Starting servers on Port:\"+strconv.Itoa(Port)+\" HTTP-server:\"+Host+\" urlpath:\"+UrlPath+\" Key:\"+SecretKey)\r\n\tinitConf()\r\n}", "func init() {\n\tflag.StringVar(&configfile, \"configfile\", \"/data/config/go/best-practices/config.yml\", \"config file full path\")\n\tflag.StringVar(&loggerfile, \"loggerfile\", \"\", \"seelog config file\")\n\tflag.BoolVar(&help, \"h\", false, \"show help\")\n\tflag.IntVar(&port, \"port\", 0, \"service port to listen\")\n\tflag.Parse()\n\n\tif help {\n\t\tflag.Usage()\n\t}\n\t// init logger firstly!!!\n\tmylogger.Init(loggerfile)\n\n\tappConfig.GetConfig(configfile)\n\n\tlogger.Infof(\"Init with config:%+v\", appConfig)\n}", "func init() {\n\toptions.only = make(SelectedCollectors)\n\toptions.exclude = make(SelectedCollectors)\n\n\tflag.Var(&options.only, \"only\", \"Run only the listed collectors (comma-separated list of collector names)\")\n\tflag.Var(&options.exclude, \"exclude\", \"Run all the collectors except those listed (comma-separated list of collector names)\")\n\tflag.StringVar(&options.logLevel, \"log-level\", \"info\", \"Log level (one of 'warn', 'info', 'debug')\")\n}", "func (s *BasecluListener) EnterArgs(ctx *ArgsContext) {}", "func init() {\n\tconst (\n\t\tdefaultRsFilePath = \"./data/rslist1.txt\"\n\t\trsusage = \"File containing list of rsnumbers\"\n\t\tdefaultvcfPathPref = \"\"\n\t\tvusage = \"default path prefix for vcf files\"\n\t\tdefaultThreshold = 0.9\n\t\tthrusage = \"Prob threshold\"\n\t\tdefaultAssayTypes = \"affy,illumina,broad,metabo,exome\"\n\t\tatusage = \"Assay types\"\n\t)\n\tflag.StringVar(&rsFilePath, \"rsfile\", defaultRsFilePath, rsusage)\n\tflag.StringVar(&rsFilePath, \"r\", defaultRsFilePath, rsusage+\" (shorthand)\")\n\tflag.StringVar(&vcfPathPref, \"vcfprfx\", defaultvcfPathPref, vusage)\n\tflag.StringVar(&vcfPathPref, \"v\", defaultvcfPathPref, vusage+\" (shorthand)\")\n\tflag.Float64Var(&threshold, \"threshold\", defaultThreshold, thrusage)\n\tflag.Float64Var(&threshold, \"t\", defaultThreshold, thrusage+\" (shorthand)\")\n\tflag.StringVar(&assayTypes, \"assaytypes\", defaultAssayTypes, atusage)\n\tflag.StringVar(&assayTypes, \"a\", defaultAssayTypes, atusage+\" (shorthand)\")\n\tflag.Parse()\n}", "func init() {\n\thome, err := homedir.Dir() // Fetch the current user home dir.\n\tutils.PanicErr(err) // Panic in case user dir not available\n\tdefaultOutputDir := filepath.Join(home, utils.DEFAULT_OUTPUT_PATH)\n\tgenCmd.Flags().String(utils.FLAG_INPUT, utils.DEFAULT_PDF_PATH, utils.FLAG_INPUT_DESC)\n\tgenCmd.Flags().String(utils.FLAG_OUTPUT, defaultOutputDir, utils.FLAG_OUTPUT_DESC)\n\tgenCmd.Flags().String(utils.FLAG_VOICE, utils.FEMALE_VOICE, utils.FLAG_VOICE_DESC)\n\tRootCmd.AddCommand(genCmd)\n}", "func init() {\n\tflag.BoolVar(&opts.verbose, \"verbose\", false, \"provide verbose output\")\n\tflag.BoolVar(&opts.debug, \"debug\", false, \"provide internal debugging output\")\n\tflag.StringVar(&opts.url, \"url\", \"http://localhost\", \"server URL\")\n\tflag.IntVar(&opts.port, \"port\", 3000, \"server port\")\n}", "func init() {\n\t// NOTE: this will utilize CONSUL_HTTP_ADDR if it is set.\n\tclient, err := consul.NewClient(consul.DefaultConfig())\n\tif err != nil {\n\t\tlog.Fatalf(\"Failed to connect to Consul => {%s}\", err)\n\t}\n\tkv = client.KV()\n\n\tflag.StringVar(&srcKey, \"srcKey\", \"\", \"key to move values from\")\n\tflag.StringVar(&destKey, \"destKey\", \"\", \"key to move values to\")\n\tflag.StringVar(&srcJSON, \"srcJSON\", \"\", \"file to import values from\")\n\tflag.StringVar(&destJSON, \"destJSON\", \"\", \"file to export values to\")\n\tflag.BoolVar(&rename, \"rename\", false, \"place as a rename instead of a insertion\")\n\tflag.Parse()\n}", "func main() {\n\n\tvar argValues string //defining an argValues\n\tif len(os.Args) > 1 { //checking the argument values for ex: go run hello.go hello bhuppal kumar\n\t\targValues = strings.Join(os.Args[1:], \" \")\n\t}\n\tfmt.Println(argValues)\n}", "func init() {\n\tflag.StringVar(&apikey, \"apikey\", os.Getenv(\"VT_API_KEY\"), \"Set environment variable VT_API_KEY to your VT API Key or specify on prompt\")\n\tflag.StringVar(&apiurl, \"apiurl\", \"https://www.virustotal.com/vtapi/v2/\", \"URL of the VirusTotal API to be used.\")\n\tflag.StringVar(&domain, \"domain\", \"\", \"a domain to ask information about from VT.\")\n}", "func init() {\n RootCmd.AddCommand(DeployCmd)\n DeployCmd.Flags().StringP(\"file\", \"f\", \"\", \"file used to specify the job to deploy (required)\")\n DeployCmd.Flags().StringP(\"port\", \"p\", \"\", \"connect to a specific port (default: 3939)\")\n DeployCmd.MarkFlagRequired(\"file\")\n}", "func (o PgbenchSpecPtrOutput) InitArgs() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v *PgbenchSpec) *string {\n\t\tif v == nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn v.InitArgs\n\t}).(pulumi.StringPtrOutput)\n}", "func Init() *cli.App {\n\tapp := cli.NewApp()\n\tapp.Name = \"maklo\"\n\tapp.Usage = \"maklo [command]\"\n\tapp.Author = \"sofyan48\"\n\tapp.Email = \"meongbego@gmail.com\"\n\tapp.Version = \"0.0.1\"\n\tapp.Flags = []cli.Flag{\n\t\tcli.StringFlag{\n\t\t\tName: \"environtment, e\",\n\t\t\tUsage: \"Load Environtment from path\",\n\t\t\tDestination: &Args.Environment,\n\t\t},\n\t}\n\n\treturn app\n}", "func initialize() {\n\treadCommandLine()\n\n\tif *dbFlag == \"\" {\n\t\tinstallEmptyConfiguration()\n\t} else {\n\t\tlogMessage(\"Trying to load configuration from %s\", *dbFlag)\n\t\tfile, err := openFile(*dbFlag)\n\t\tif err != nil {\n\t\t\tinstallEmptyConfiguration()\n\t\t} else {\n\t\t\tputConfiguration(readFully(file))\n\t\t}\n\t}\n\n}", "func ParseArguments() {\n\t/** Initializing system */\n\tconfigParams := InitSystem()\n\n\t/** Parse arguments */\n\tglobalFlags()\n\tkubectl(configParams)\n\tdockerBuild(configParams)\n\thelm(configParams)\n\n\t/** Default behavior */\n\tAlert(\"ERR\", \"This command doesn't exists!\", false)\n\tos.Exit(1)\n}", "func init() {\n\tflag.StringVar(&name, \"name\", \"everyone\", \"The greeting object.\")\n}", "func ParseArgs() {\n\t// Set function to be called if parsing fails.\n\tflag.Usage = usage\n\n\t// Parse CLI arguments.\n\tflag.Parse()\n\n\t// Print usage text and exit if:\n\t// - browser is neither \"chrome\" or \"firefox\",\n\t// - env is neither \"dev\", \"uat\" or \"preprod\",\n\t// - headless is neither \"false\" or \"true\",\n\t// - displayAddress is not valid IP address,\n\t// - port is not a number between 1024-65535\n\tisHeadless, err := strconv.ParseBool(*headless)\n\tif !(validBrowserArg() && validEnvArg() && err == nil && validDisplayArg() && (*port >= 1024 && *port <= 65535)) {\n\t\tusage()\n\t\tos.Exit(2)\n\t}\n\n\t// Set conf global variable.\n\tconf = Conf{\n\t\tBrowser: Browser(*browser),\n\t\tEnv: Env(*env),\n\t\tHeadless: isHeadless,\n\t\tDisplayAddress: *displayAddress,\n\t\tPort: *port,\n\t\tWidth: *width,\n\t\tHeight: *height,\n\t}\n\n\t// Set caps global variable.\n\tSetCaps(conf)\n}", "func init() {\n\n\tflag.StringVar(&host, \"h\", \"\", \"SQL Server hostname or IP\")\n\tflag.StringVar(&user, \"u\", \"\", \"User ID\")\n\tflag.StringVar(&pass, \"p\", \"\", \"Password\")\n\tflag.StringVar(&sqlf, \"s\", \"\", \"SQL Query filename\")\n\tflag.StringVar(&outf, \"o\", \"\", \"Output filename\")\n\n\tif len(os.Args) < 5 {\n\t\tflag.Usage()\n\t\tos.Exit(1)\n\t}\n\n\tflag.Parse()\n\n}", "func parseProgramArgs() {\n\tLocalFolderPath = flag.String(\"l\", \".\", \"Path to local directory to open, default is '.'\")\n\tdpRestURL = flag.String(\"r\", \"\", \"DataPower REST URL\")\n\tdpSomaURL = flag.String(\"s\", \"\", \"DataPower SOMA URL\")\n\tdpUsername = flag.String(\"u\", \"\", \"DataPower user username\")\n\tpassword := flag.String(\"p\", \"\", \"DataPower user password\")\n\tdpDomain = flag.String(\"d\", \"\", \"DataPower domain name\")\n\tproxy = flag.String(\"x\", \"\", \"URL of proxy server for DataPower connection\")\n\tdpConfigName = flag.String(\"c\", \"\", \"Name of DataPower connection configuration to save with given configuration params\")\n\tDebugLogFile = flag.Bool(\"debug\", false, \"Write debug dpcmder.log file in current dir\")\n\tTraceLogFile = flag.Bool(\"trace\", false, \"Write trace dpcmder.log file in current dir\")\n\thelpUsage = flag.Bool(\"h\", false, \"Show dpcmder usage with examples\")\n\thelpFull = flag.Bool(\"help\", false, \"Show dpcmder in-program help on console\")\n\tversion = flag.Bool(\"v\", false, \"Show dpcmder version\")\n\n\tflag.Parse()\n\tsetDpPasswordPlain(*password)\n}", "func init() {\n\t// use all cpus in the system for concurrency\n\truntime.GOMAXPROCS(runtime.NumCPU())\n\n\tlog.SetOutput(os.Stdout)\n\n\tflag.Parse()\n\n\tif *showUsage {\n\t\tflag.Usage()\n\t\tos.Exit(0)\n\t}\n\n\tvar err error\n\tClient, err = as.NewClient(*Host, *Port)\n\tif err != nil {\n\t\tPanicOnError(err)\n\t}\n}", "func (a *ApplyImpl) Args() string {\n\targs := a.ApplyOptions.Args\n\tenableHelmDebug := a.GlobalImpl.Debug\n\n\tif enableHelmDebug {\n\t\targs = fmt.Sprintf(\"%s %s\", args, \"--debug\")\n\t}\n\n\treturn args\n}", "func collectArguments() Arguments {\n\tendpoint := config.Config.ChooseEndpoint(flags.APIEndpoint)\n\ttoken := config.Config.ChooseToken(endpoint, flags.Token)\n\tscheme := config.Config.ChooseScheme(endpoint, flags.Token)\n\n\treturn Arguments{\n\t\tapiEndpoint: endpoint,\n\t\tauthToken: token,\n\t\tscheme: scheme,\n\t\tclusterNameOrID: \"\",\n\t\tuserProvidedToken: flags.Token,\n\t\tverbose: flags.Verbose,\n\t}\n}", "func Init(options ...micro.Option) {\n\n\tsetupApp(cmd.App(), options...)\n\n\tcmd.Init(\n\t\tcmd.Name(Name),\n\t\tcmd.Description(Description),\n\t\tcmd.Version(BuildVersion()),\n\t)\n}", "func init() {\n\tRootCmd.AddCommand(AdminCmd)\n\tAdminCmd.AddCommand(UnsubscribeCmd, HealthCheckDb, HealthCheckStore, HealthCheckStats, SetVasCmd, SetConfig, DeleteConfig)\n\tSetVasCmd.Flags().String(FlagUserID, \"\", \"\")\n\tSetVasCmd.Flags().Bool(FlagEnabled, false, \"\")\n\tSetConfig.Flags().String(FlagKey, \"\", \"\")\n\tSetConfig.Flags().String(FlagValue, \"\", \"\")\n\tDeleteConfig.Flags().String(FlagKey, \"\", \"\")\n}", "func init() {\n\tport = flag.Int(\"port\", 3000, \"an int\")\n}", "func init() {\n\t// Add command names to cfssl usage\n\tflag.IntVar(&log.Level, \"loglevel\", log.LevelInfo, \"Log level\")\n\tflag.Usage = func() {\n\t\tfmt.Fprintf(os.Stderr, usage)\n\t\tfor name := range cmds {\n\t\t\tfmt.Fprintf(os.Stderr, \"%s\\n\", name)\n\t\t}\n\t}\n\t// Register commands.\n\tcmds = map[string]*Command{\n\t\t\"bundle\": CLIBundler,\n\t\t\"sign\": CLISigner,\n\t\t\"serve\": CLIServer,\n\t\t\"version\": CLIVersioner,\n\t\t\"genkey\": CLIGenKey,\n\t\t\"gencert\": CLIGenCert,\n\t\t\"selfsign\": CLISelfSign,\n\t}\n\t// Register all command flags.\n\tregisterFlags()\n}", "func init() {\n\t// We register a flag to get it shown in the default usage.\n\t//\n\t// We don't actually use the parsed flag value, though, since that would require us to call\n\t// flag.Parse() here. If we call flag.Parse(), then higher-level libraries can't easily add\n\t// their own flags, since testing's t.Run() will not re-run flag.Parse() if the flags have\n\t// already been parsed.\n\t//\n\t// Instead, we simply look for our flag text in os.Args.\n\n\tflag.Bool(\"help-docket\", false, \"get help on docket\")\n\n\tfor _, arg := range os.Args {\n\t\tif arg == \"-help-docket\" || arg == \"--help-docket\" {\n\t\t\twriteHelp(os.Stderr)\n\n\t\t\tconst helpExitCode = 2 // This matches what 'go test -h' returns.\n\t\t\tos.Exit(helpExitCode)\n\t\t}\n\t}\n}", "func init() {\n\n\tflag.StringVar(&Token, \"t\", \"\", \"Bot Token\")\n}", "func init() {\n\tvar (\n\t\tfile *os.File\n\t\terr error\n\t)\n\t// declare and parse program flags\n\tflag.StringVar(&processName, \"n\", \"\", \"process name\")\n\tflag.StringVar(&port, \"p\", \"\", \"Port number for worker eg \\\":4001\\\" \")\n\tflag.StringVar(&logFile, \"l\", \"\", \"logfile\")\n\tflag.BoolVar(&offline, \"o\", false, \"Work with no Smallworld connection, only GeoJSON files\")\n\tflag.Parse()\n\tif logFile == \"\" {\n\t\tlog.SetOutput(os.Stdout)\n\t} else {\n\t\tif file, err = os.Create(logFile); err != nil {\n\t\t\tpanic(err)\n\t\t}\n\t\tlog.SetOutput(file)\n\t}\n\tif port == \"\" {\n\t\tlog.Println(\"Missing port number\")\n\t\tos.Exit(1)\n\t}\n\tif ok, _ := regexp.Match(\"^\\\\d+$\", []byte(port)); ok {\n\t\tport = fmt.Sprintf(\":%s\", port)\n\t} else {\n\t\tif ok, _ := regexp.Match(\"^:\\\\d+$\", []byte(port)); !ok {\n\t\t\tlog.Printf(\"Wrong port number format %s. Expected eg. :4000 or 4000\", port)\n\t\t\tos.Exit(1)\n\t\t}\n\t}\n\tlog.SetFlags(log.LstdFlags | log.Lshortfile)\n\tlog.Printf(\"Version: %s\\n\", version)\n\tlog.Printf(\"Port: %s\\n\", port)\n}", "func Init() {\n\tsetup()\n\targs := os.Args[1:]\n\tif len(args) < 1 {\n intro()\n\t\tinteractiveLoop()\n\t}\n\terr := runCommand(args[0], args[1:])\n\tif err != nil {\n\t\tfmt.Fprintf(os.Stderr, \"ERROR: %s\\n\", err)\n\t\tos.Exit(1)\n\t}\n}", "func init() {\n\tAddCommand(\"mds\", \"Check metadata service process\", cmdMds, nil)\n}", "func parseArgs() {\n\tflag.StringVar(&masterPhrase, \"master\", \"\", \"The master phrase to use for password generation. Do NOT forget to escape any special characters contained in the master phrase (e.g. $, space etc).\")\n\tflag.StringVar(&masterPhraseFile, \"master-file\", \"\", \"The path to a file, containing the master phrase.\")\n\n\tflag.StringVar(&domain, \"domain\", \"\", \"The domain for which this password is intended\")\n\tflag.StringVar(&additionalInfo, \"additional-info\", \"\", \"Free text to add (e.g. index/timestamp/username if the previous password was compromized)\")\n\tflag.IntVar(&passLength, \"password-length\", 12, \"Define the length of the password.\")\n\tflag.BoolVar(&addSpecialChars, \"special-characters\", true, \"Whether to add a known set of special characters to the password\")\n\tflag.BoolVar(&addInfoToLog, \"log-info\", false, \"Whether to log the parameters that were used for generation to a file. Note that the password itself will NOT be stored!\")\n\n\tflag.Parse()\n}", "func CommandArgs(allArgs map[string]reflect.Value) {\n\tallArgs[\"config\"] = reflect.ValueOf(&ConfigFile)\n\tallArgs[\"cfg\"] = reflect.ValueOf(&ConfigFile)\n\tallArgs[\"help\"] = reflect.ValueOf(&Help)\n\tallArgs[\"h\"] = reflect.ValueOf(&Help)\n}", "func init() {\n\tRootCmd.AddCommand(DeployCmd)\n\tDeployCmd.Flags().StringP(\"file\", \"f\", \"\", \"file used to specify the job to deploy (required)\")\n\tDeployCmd.Flags().StringP(\"port\", \"p\", \"\", \"connect to a specific port (default: 3939)\")\n\tDeployCmd.MarkFlagRequired(\"file\")\n}", "func Init() {\n\tparseProgramArgs()\n\tlogging.DebugLogFile = *DebugLogFile\n\tlogging.TraceLogFile = *TraceLogFile\n\tlogging.LogDebug(\"config/Init() - dpcmder starting...\")\n\tvalidateProgramArgs()\n\tinitConfiguration()\n\tvalidatePassword()\n}", "func initMockedAppFromArgs(mt timer) *app {\n\tmainGreeter := greeter{\n\t\tT: mt,\n\t}\n\tmainApp := &app{\n\t\tg: mainGreeter,\n\t}\n\treturn mainApp\n}", "func init() {\n\tfor _, arg := range os.Args {\n\t\tif flag := strings.TrimLeft(arg, \"-\"); flag == MetricsEnabledFlag || flag == DashboardEnabledFlag {\n\t\t\tbgmlogs.Info(\"Enabling metics collection\")\n\t\t\tEnabled = true\n\t\t}\n\t}\n\texp.Exp(metics.DefaultRegistry)\n}", "func collectArguments() Arguments {\n\tendpoint := config.Config.ChooseEndpoint(flags.APIEndpoint)\n\ttoken := config.Config.ChooseToken(endpoint, flags.Token)\n\tscheme := config.Config.ChooseScheme(endpoint, flags.Token)\n\treturn Arguments{\n\t\tapiEndpoint: endpoint,\n\t\ttoken: token,\n\t\tscheme: scheme,\n\t}\n}", "func init() {\n\trootCmd.AddCommand(newAdrCmd)\n\tnewAdrCmd.Flags().StringVarP(&projectName, \"project-name\", \"n\", \"\", \"name of adr project\")\n\tnewAdrCmd.Flags().StringVarP(&title, \"adr-title\", \"t\", \"\", \"title of new adr\")\n\tnewAdrCmd.MarkFlagRequired(\"project-name\")\n\tnewAdrCmd.MarkFlagRequired(\"adr-title\")\n}", "func (this *NowStr) MinArgs() int { return 0 }", "func (c *PruneCommand) Init(args []string) error {\n\treturn c.fs.Parse(args)\n}", "func init() {\n\tflagset.Usage = func() {\n\t\tfmt.Println(\"Usage: stellaratomicswap [flags] cmd [cmd args]\")\n\t\tfmt.Println()\n\t\tfmt.Println(\"Commands:\")\n\t\tfmt.Println(\" initiate [-asset code:issuer] <initiator seed> <participant address> <amount>\")\n\t\tfmt.Println(\" participate [-asset code:issuer] <participant seed> <initiator address> <amount> <secret hash>\")\n\t\tfmt.Println(\" redeem <receiver seed> <holdingAccountAdress> <secret>\")\n\t\tfmt.Println(\" refund <refund transaction>\")\n\t\tfmt.Println(\" extractsecret <holdingAccountAdress> <secret hash>\")\n\t\tfmt.Println(\" auditcontract <holdingAccountAdress> < refund transaction>\")\n\t\tfmt.Println()\n\t\tfmt.Println(\"Flags:\")\n\t\tflagset.PrintDefaults()\n\t}\n}", "func (egs *ExampleGenServer) Init(p *ergo.Process, args ...interface{}) (state interface{}) {\n\tfmt.Printf(\"Init: args %v \\n\", args)\n\tegs.process = p\n\tInitialState := &State{\n\t\tvalue: args[0].(int), // 100\n\t}\n\treturn InitialState\n}", "func (c *LocalCmd) Args(args ...string) *LocalCmd {\n\tc.args = args\n\treturn c\n}", "func parseArgs(args []string) (*arguments, error) {\n\tif len(args) == 0 {\n\t\treturn nil, errors.Errorf(\"required input \\\" --src <Src_File> \\\" not found !\")\n\t}\n\n\tapp := kingpin.New(\"mircat\", \"Utility for processing Mir state event logs.\")\n\tsrc := app.Flag(\"src\", \"The input file to read (defaults to stdin).\").Default(os.Stdin.Name()).File()\n\t_, err := app.Parse(args)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn &arguments{\n\t\tsrcFile: *src,\n\t}, nil\n}", "func main() {\n\tapp := cli.NewApp()\n\tapp.Name = \"go-init\"\n\tapp.Usage = \"Initialize a go project\"\n\tapp.Version = \"1.1.0\"\n\n\t// global flags\n\t//Log Level and Config Path\n\tapp.Flags = []cli.Flag{\n\t\tcli.StringFlag{\n\t\t\tName: \"configPath, c\",\n\t\t\tEnvVar: \"CONFIGPATH\",\n\t\t\tUsage: \"config path\",\n\t\t\tValue: \"./\",\n\t\t},\n\t\tcli.StringFlag{\n\t\t\tName: \"configName, n\",\n\t\t\tEnvVar: \"CONFIGNAME\",\n\t\t\tUsage: \"name of the config\",\n\t\t\tValue: \"config\",\n\t\t},\n\t\tcli.BoolFlag{\n\t\t\tName: \"force, f\",\n\t\t\tEnvVar: \"FORCE\",\n\t\t\tUsage: \"force the creatino of the folder\",\n\t\t},\n\t}\n\tapp.Action = func(c *cli.Context) {\n\t\tif len(c.Args()) == 0 {\n\t\t\treturn\n\t\t}\n\t\tif len(c.Args()) == 1 {\n\t\t\tappName = c.Args()[0]\n\t\t\tdirectory = \"./\"\n\n\t\t} else {\n\t\t\tdirectory = c.Args()[0]\n\t\t\tappName = c.Args()[1]\n\t\t}\n\t\tGenerate(c)\n\t}\n\tapp.Before = func(ctx *cli.Context) error {\n\t\tconfigPath = ctx.String(\"configPath\")\n\t\tconfigName = ctx.String(\"ConfigName\")\n\t\tforce = ctx.Bool(\"force\")\n\t\treturn nil\n\t}\n\tapp.Run(os.Args)\n\n}", "func init() {\n\tflag.Parse()\n\t// Init the models and backend redis store.\n\trs := libstore.NewStore(*redisServer)\n\tuser.Setup(rs)\n\tfeed.Setup(rs)\n\t// Init feeder.\n\tfd = feeder.NewFeeder(\"http://localhost:\" + *keywordServerEndPoint)\n}", "func InitFlags(flagset *flag.FlagSet) {\n\tif flagset == nil {\n\t\tflagset = flag.CommandLine\n\t}\n\n\tcommandLine.VisitAll(func(f *flag.Flag) {\n\t\tflagset.Var(f.Value, f.Name, f.Usage)\n\t})\n}", "func main() {\n name := flag.String(\"n\", \"world\", \"Specify the user's name.\")\n\n // See https://pkg.go.dev/flag\n flag.Usage = func() {\n fmt.Fprintf(os.Stderr, \"usage: %s [options]\\n\\nPrints a message as an example of parsing CLI args in Go.\\n\\nOptions:\\n\", os.Args[0])\n\n flag.PrintDefaults()\n }\n\n flag.Parse()\n\n fmt.Printf(\"Hello %s!\\n\\nYou ran the Go seed script!\\n\", *name)\n}", "func SkipArgs() Option { return Option{skipArgs: true} }", "func readargs() config {\n\tvar db string\n\tvar verarg bool\n\tvar nw uint\n\tflag.StringVar(&db, \"database\", \"\", \"database to use for determining forks; if unspecified, no fork detection is performed\")\n\tflag.UintVar(&nw, \"nworkers\", 4, \"number of concurrent workers\")\n\tflag.BoolVar(&verarg, \"version\", false, \"show version information\")\n\tflag.Usage = printusage\n\n\tflag.Parse()\n\n\tif verarg {\n\t\tprintversion()\n\t}\n\n\tif flag.NArg() > 1 {\n\t\tflag.Usage()\n\t}\n\n\trepostore := flag.Arg(0)\n\treturn config{Repostore: repostore, Database: db, NWorkers: uint(nw)}\n}", "func main() {\n\targs := os.Args[1:]\n\t// if len(args) == 5 {\n\t// \tfmt.Println(\"There are 5 arguments\")\n\t// } else if len(args) == 2 {\n\t// \tfmt.Printf(\"There are %d arguments: %s\\n\", len(args), args)\n\t// } else {\n\t// \tfmt.Println(strings.TrimSpace(usage))\n\t// }\n\tif len(args) >= 2 {\n\t\tfmt.Printf(\"There are %d arguments: %s\\n\", len(args), strings.Join(args, \" \"))\n\t} else {\n\t\tfmt.Println(usage)\n\t}\n}", "func Initialize() {\n\tfor cmd, fs := range commandFlagSets {\n\t\tcommandPathFlagSets[cmd.CommandPath()] = fs\n\t}\n}", "func main( /*no arguments*/ ) /*no return value*/ {\n\n\t// Command line arguments are provided by os.Args as []string.\n\tif len(os.Args) == 1 {\n\t\tfmt.Println(\"Hallo Welt!\")\n\t} else {\n\t\tfor i := 1; i < len(os.Args); i++ {\n\t\t\tfmt.Print(os.Args[i])\n\t\t\tif i+1 < len(os.Args) {\n\t\t\t\tfmt.Print(\", \")\n\t\t\t}\n\t\t}\n\t\tfmt.Println()\n\t}\n\n\t// This is a method call.\n\tvar args MyType\n\targs = os.Args\n\tn1 := args.CountArgs()\n\tfmt.Printf(\"n1 = %v\\n\", n1) // %v - value in default format\n\n\t// This is a function call.\n\tn2 := CountArgs(os.Args)\n\tfmt.Printf(\"n2 = %v\\n\", n2)\n}", "func (g *UICommand) Init(args []string) error {\n\terr := g.fs.Parse(args)\n\tif err != nil {\n\t\treturn err\n\t}\n\tg.ProcessDefaults()\n\n\t// load config file if we have one\n\tif g.config != \"\" {\n\t\terr = g.meshConfig.LoadConfigFromFile(g.config)\n\t\tif err != nil {\n\t\t\tlog.WithError(err).Error(\"Config read error\")\n\t\t\treturn fmt.Errorf(\"Unable to read configuration from %s\", g.config)\n\t\t}\n\t}\n\n\terr = g.fs.Parse(args)\n\tif err != nil {\n\t\treturn err\n\t}\n\tlog.WithField(\"cfg\", g.meshConfig).Trace(\"Read\")\n\tlog.WithField(\"cfg.agent\", g.meshConfig.Agent).Trace(\"Read\")\n\n\treturn nil\n}", "func (c *Configurator) SetArgs(args []string) {\n\tc.mu.Lock()\n\tdefer c.mu.Unlock()\n\n\tc.args = args\n}", "func init() {\n\tflag.StringVar(&serverIp, \"ip\", \"127.0.0.1\", \"Configure server IP address (default is 127.0.0.1)\")\n\tflag.IntVar(&serverPort, \"port\", 8888, \"Configure server port (default is 8888)\")\n}", "func init() {\n\tflag.StringVar(&cfg.tzlib, \"tzlib\", \"./tzlib.json\", \"Use database file <tzlib>\")\n\tflag.StringVar(&cfg.from, \"from\", \"\", \"Import HTML from <from> and write database to <zlib>\")\n\tflag.StringVar(&cfg.webroot, \"webroot\", \"\", \"Serve the <webroot> directory at '/'\")\n\tflag.StringVar(&cfg.endpoint, \"endpoint\", \"/dann\", \"Serve the API at <endpoint>\")\n\tflag.StringVar(&cfg.bind, \"bind\", \"localhost:1620\", \"Listen on <bind> for connections. \")\n\n\tflag.Parse()\n}", "func Init() {\n // host flag (required)\n Pattern.Flags().StringVarP(&host, \"host\", \"H\", \"\",\n \"the target machine's IP address\")\n Pattern.MarkFlagRequired(\"host\")\n\n // port flag (required)\n Pattern.Flags().IntVarP(&port, \"port\", \"P\", 0,\n \"the port the target service is running on\")\n Pattern.MarkFlagRequired(\"port\")\n\n // prefix and suffix flags (optional)\n Pattern.Flags().StringVarP(&pref, \"prefix\", \"p\", \"\",\n \"(optional) prefix to put before payload\")\n Pattern.Flags().StringVarP(&suff, \"suffix\", \"s\", \"\",\n \"(optional) suffix to put after payload\")\n\n // length flag (required)\n Pattern.Flags().IntVarP(&length, \"length\", \"l\", 0,\n \"the length of the cyclic pattern sent to the target\")\n Pattern.MarkFlagRequired(\"length\")\n}", "func (launcher *Launcher) SetArgs(args []string) {\n\tlauncher.Mutex.Lock()\n\tlauncher.args = args\n\tlauncher.Mutex.Unlock()\n}", "func init() {\n\tflag.Parse()\n\tPool = newPool(*redisServer)\n}", "func (args *CliArgs) checkArgs() {\n\t// print all filed of the object\n\tif !(args.startPage > 0 && args.endPage > 0 && args.endPage-args.startPage >= 0) {\n\t\tfmt.Fprintf(os.Stderr, \"start page and end page should be positive and endpage should be bigger than startpage\")\n\t\tos.Exit(1)\n\t}\n\n\tif args.isFtype {\n\t\tif args.lineNumPerPage != specialNum {\n\t\t\tfmt.Fprintln(os.Stderr, \"Fatal: setting -f and -l simultaneously is not allowed\")\n\t\t\tos.Exit(1)\n\t\t}\n\t} else {\n\t\tif args.lineNumPerPage == specialNum {\n\t\t\targs.lineNumPerPage = defaultLineNum\n\t\t} else if args.lineNumPerPage < 0 {\n\t\t\tfmt.Fprintln(os.Stderr, \"Fatal: the linenum should be positive\")\n\t\t\tos.Exit(1)\n\t\t}\n\t}\n\n\t// set default number of linenumber\n\tif pflag.NArg() != 0 {\n\t\targs.inFilename = pflag.Args()[0]\n\t}\n\n\tfmt.Printf(\"%+v\", args)\n}", "func (a *app) initializeCLIArrgs() {\n\tflag.BoolVar(&a.cons, \"cons\", false, \"run check/clean process constantly\")\n\tflag.BoolVar(&a.clean, \"clean\", false, \"clean the directory with video files\")\n\tflag.BoolVar(&a.stat, \"stat\", false, \"output Google Drive usage statistic\")\n\tflag.StringVar(&a.videoDir, \"dir\", \"RubetekVideo\", \"name of a directory where video files stored\")\n\tflag.IntVar(&a.checkInterval, \"interval\", 1, \"check interval (minutes)\")\n\tflag.IntVar(&a.delCount, \"count\", 720, \"how many files should be deleted\")\n\tflag.IntVar(&a.freeLimit, \"limit\", 10, \"minimum percentage of free space to keep\")\n\n\tflag.Parse()\n\n\tif !a.stat && !a.cons && !a.clean {\n\t\ta.clean = true\n\t}\n}", "func init() {\n\tcobra.OnInitialize(initConfig)\n\n\tRootCmd.PersistentFlags().StringP(\"config\", \"\", \"\", \"config file to use\")\n\tRootCmd.PersistentFlags().BoolP(\"verbose\", \"v\", false, \"verbose output\")\n\n\tRootCmd.Flags().StringSlice(\"kafka.brokers\", []string{\"localhost:9092\"}, \"kafka brokers address\")\n\tRootCmd.Flags().String(\"metrics.addr\", \"127.0.0.1:9100\", \"metrics address\")\n\n\tif err := viper.BindPFlags(RootCmd.PersistentFlags()); err != nil {\n\t\tlog.WithError(err).Error(\"Could not bind persistent flags\")\n\t}\n\n\tif err := viper.BindPFlags(RootCmd.Flags()); err != nil {\n\t\tlog.WithError(err).Error(\"Could not bind flags\")\n\t}\n}" ]
[ "0.74348855", "0.71042395", "0.6630047", "0.6588652", "0.6358079", "0.62932676", "0.62634456", "0.6233771", "0.62128526", "0.6169773", "0.6157398", "0.6115356", "0.6105673", "0.6088985", "0.605611", "0.6049494", "0.5983334", "0.5979197", "0.59693015", "0.59642893", "0.59551996", "0.59458125", "0.59335196", "0.5915211", "0.58895046", "0.58836824", "0.588253", "0.5850337", "0.5837939", "0.58364415", "0.58364415", "0.5803736", "0.5799459", "0.5797765", "0.5794021", "0.5787891", "0.578563", "0.5784634", "0.578463", "0.5780237", "0.5740671", "0.5734499", "0.5721322", "0.5713834", "0.57016975", "0.5698494", "0.56972235", "0.5691216", "0.5689705", "0.56876916", "0.5684616", "0.56826496", "0.5680271", "0.56738037", "0.5668261", "0.56679153", "0.5664612", "0.565169", "0.56363225", "0.56276965", "0.56184673", "0.56154484", "0.5600106", "0.5596819", "0.5595278", "0.55944175", "0.55815446", "0.5569393", "0.55684805", "0.5561295", "0.5561033", "0.555573", "0.5554619", "0.5536389", "0.5535892", "0.55255", "0.551957", "0.5509291", "0.54985297", "0.54981035", "0.5489776", "0.547978", "0.5477818", "0.54727477", "0.54639393", "0.54634875", "0.5462485", "0.54612887", "0.5461239", "0.54579806", "0.5454547", "0.5454263", "0.5450075", "0.54471064", "0.54428196", "0.544141", "0.54382855", "0.5437016", "0.54341614", "0.5426564" ]
0.6201516
9
buildLogMsg generates an empty CI test plan that prints msg to the build log.
func buildLogMsg(title, msg string) droneyaml.BuildItem { return droneyaml.BuildItem{ Key: "Warning: " + title, Build: droneyaml.Build{ Container: droneyaml.Container{ Image: "library/alpine:3.2", Environment: droneyaml.MapEqualSlice([]string{"MSG=" + msg}), }, Commands: []string{`echo "$MSG"`}, }, } }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (t *tracer) buildMessage() string {\n\tif t.IsNull() {\n\t\treturn \"\"\n\t}\n\n\t// Note: this value is very important, it makes sure the internal calls of this package would not interfere with the real caller we want to catch\n\t// badly set and you will get a line number that does not match with the one corresponding to the call\n\tconst skipCallers int = 2\n\n\tmessage := t.taskSig\n\tif _, _, line, ok := runtime.Caller(skipCallers); ok {\n\t\tmessage += \" \" + t.funcName + t.callerParams + \" [\" + t.fileName + \":\" + strconv.Itoa(line) + \"]\"\n\t}\n\treturn message\n}", "func buildNoRetryString(job string, outliers []string) string {\n\tnoRetryFmt := \"Failed non-flaky tests preventing automatic retry of %s:\\n\\n```\\n%s\\n```%s\"\n\textraFailedTests := \"\"\n\n\tlastIndex := len(outliers)\n\tif len(outliers) > maxFailedTestsToPrint {\n\t\tlastIndex = maxFailedTestsToPrint\n\t\textraFailedTests = fmt.Sprintf(\"\\n\\nand %d more.\", len(outliers)-maxFailedTestsToPrint)\n\t}\n\treturn fmt.Sprintf(noRetryFmt, job, strings.Join(outliers[:lastIndex], \"\\n\"), extraFailedTests)\n}", "func (w *AlertMethod) BuildMessage(rule string, records []*alert.Record) (string, error) {\n\tmsg := fmt.Sprintf(\"%s\\n\", rule)\n\tfor _, record := range records {\n\t\ttext := record.Text\n\t\tif record.Fields != nil {\n\t\t\tvar fields = \"\"\n\t\t\tfor _, field := range record.Fields {\n\t\t\t\tfields += fmt.Sprintf(\"\\n%s: %d\", field.Key, field.Count)\n\t\t\t}\n\t\t\ttext += fields\n\t\t}\n\t\tmsg += text + \"\\n\"\n\t}\n\treturn msg, nil\n}", "func BuildMsg(from sdk.AccAddress, to sdk.AccAddress, coins sdk.Coins) sdk.Msg {\n\tinput := bank.NewInput(from, coins)\n\toutput := bank.NewOutput(to, coins)\n\tmsg := bank.NewMsgSend([]bank.Input{input}, []bank.Output{output})\n\treturn msg\n}", "func Log(msg string) {\n\tfmt.Println(color.BlueString(\"Builder:\") + \" \" + msg)\n}", "func TestPrintLog( test *testing.T ) {\n testLog := ErrorLog{ nil , nil }\n\n testLog.addLog( \"First Log\\n\" )\n testLog.addLog( \"Second Log\\n\" )\n testLog.addLog( \"Third Log\\n\" )\n\n print(\"Expect:\\nFirst Log\\nSecond Log\\nThird Log\\nGot:\\n\")\n\n testLog.printLog()\n}", "func Test_Errorcode_Build(t *testing.T) {\n\n\terrorCode := uerrors.NewCodeErrorWithPrefix(\"test\", \"test0001\")\n\n\terrorCode.WithMsgBody(\"this is error message content with param.\")\n\terrorCode.WithMsgBody(\"params: ${p1} , ${p2} , ${p3}.\")\n\n\t//log.Printf()\n\n\tres := errorCode.Build(\"hello-message \", \"my deal-other \", \"define\")\n\tfmt.Println(res)\n\n\tfmt.Println(\"case 2\")\n\tres = errorCode.Build(\"hello-message2 \", \"my deal-other2 \", \"define2\")\n\tfmt.Println(res)\n\n}", "func buildNewComment(jd *JobData, entries map[string]int, outliers []string) string {\n\tvar cmd string\n\tvar entryString []string\n\tif entries[jd.JobName] >= maxRetries {\n\t\tcmd = buildOutOfRetriesString(jd.JobName)\n\t\tlogWithPrefix(jd, \"expended all %d retries\\n\", maxRetries)\n\t} else if len(outliers) > 0 {\n\t\tcmd = buildNoRetryString(jd.JobName, outliers)\n\t\tlogWithPrefix(jd, \"%d failed tests are not flaky, cannot retry\\n\", len(outliers))\n\t} else {\n\t\tcmd = buildRetryString(jd.JobName, entries)\n\t\tlogWithPrefix(jd, \"all failed tests are flaky, triggering retry\\n\")\n\t}\n\t// print in sorted order so we can actually unit test the results\n\tvar keys []string\n\tfor test := range entries {\n\t\tkeys = append(keys, test)\n\t}\n\tsort.Strings(keys)\n\tfor _, test := range keys {\n\t\tentryString = append(entryString, fmt.Sprintf(\"%s | %d/%d\", test, entries[test], maxRetries))\n\t}\n\treturn fmt.Sprintf(commentTemplate, identifier, strings.Join(entryString, \"\\n\"), cmd)\n}", "func startMessage(cfg *Config) {\n\tif len(cfg.Tasks) > 0 {\n\t\tcolor.Blue(\"You have %d tasks for execute\", len(cfg.Tasks))\n\t}\n\tif len(cfg.ParallelTasks) > 0 {\n\t\tcolor.Blue(\"You have %d parallel tasks for execute\", len(cfg.ParallelTasks))\n\t}\n}", "func sendTestLog(ctx context.Context, comm client.Communicator, conf *internal.TaskConfig, log *model.TestLog) error {\n\treturn errors.Wrap(sendTestLogToCedar(ctx, conf.Task, comm, log), \"sending test logs to Cedar\")\n}", "func (o *ControllerBuildOptions) generateBuildLogURL(podInterface typedcorev1.PodInterface, ns string, activity *v1.PipelineActivity, buildName string, pod *corev1.Pod, location v1.StorageLocation, settings *v1.TeamSettings, initGitCredentials bool, logMasker *kube.LogMasker) (string, error) {\n\n\tvar gitKind string\n\tif initGitCredentials && location.GitURL != \"\" {\n\t\tgitInfo, err := gits.ParseGitURL(location.GitURL)\n\t\tif err != nil {\n\t\t\treturn \"\", errors.Wrapf(err, \"could not parse git URL for storage URL %s\", location.GitURL)\n\t\t}\n\t\tgitKind, err = o.GitServerKind(gitInfo)\n\t\tif err != nil {\n\t\t\treturn \"\", errors.Wrapf(err, \"could not determine git kind for storage URL %s\", location.GitURL)\n\t\t}\n\t}\n\tlog.Logger().Debugf(\"Collecting logs for %s to location %s\", activity.Name, location.Description())\n\tcoll, err := collector.NewCollector(location, o.Git(), gitKind)\n\tif err != nil {\n\t\treturn \"\", errors.Wrapf(err, \"could not create Collector for pod %s in namespace %s with settings %#v\", pod.Name, ns, settings)\n\t}\n\n\towner := activity.RepositoryOwner()\n\trepository := activity.RepositoryName()\n\tbranch := activity.BranchName()\n\tbuildNumber := activity.Spec.Build\n\tif buildNumber == \"\" {\n\t\tbuildNumber = \"1\"\n\t}\n\n\tpathDir := filepath.Join(\"jenkins-x\", \"logs\", owner, repository, branch)\n\tfileName := filepath.Join(pathDir, buildNumber+\".log\")\n\n\tvar clientErrs []error\n\tkubeClient, err := o.KubeClient()\n\tclientErrs = append(clientErrs, err)\n\ttektonClient, _, err := o.TektonClient()\n\tclientErrs = append(clientErrs, err)\n\tjx, _, err := o.JXClient()\n\tclientErrs = append(clientErrs, err)\n\n\terr = errorutil.CombineErrors(clientErrs...)\n\tif err != nil {\n\t\treturn \"\", errors.Wrap(err, \"there was a problem obtaining one of the clients\")\n\t}\n\n\ttektonLogger := logs.TektonLogger{\n\t\tJXClient: jx,\n\t\tKubeClient: kubeClient,\n\t\tTektonClient: tektonClient,\n\t\tNamespace: ns,\n\t}\n\n\tlog.Logger().Debugf(\"Capturing running build logs for %s\", activity.Name)\n\treader := streamMaskedRunningBuildLogs(&tektonLogger, activity, buildName, logMasker)\n\tdefer reader.Close()\n\n\tif initGitCredentials {\n\t\tgc := &credentials.StepGitCredentialsOptions{}\n\t\tcopy := *o.CommonOptions\n\t\tgc.CommonOptions = &copy\n\t\tgc.BatchMode = true\n\t\tlog.Logger().Info(\"running: jx step git credentials\")\n\t\terr = gc.Run()\n\t\tif err != nil {\n\t\t\treturn \"\", errors.Wrapf(err, \"Failed to setup git credentials\")\n\t\t}\n\t}\n\n\tlog.Logger().Infof(\"storing logs for activity %s into storage at %s\", activity.Name, fileName)\n\tlogURL, err := coll.CollectData(reader, fileName)\n\tif err != nil {\n\t\tlog.Logger().Errorf(\"failed to store logs for activity %s into storage at %s: %s\", activity.Name, fileName, err.Error())\n\t\treturn \"\", err\n\t}\n\tlog.Logger().Infof(\"stored logs for activity %s into storage at %s\", activity.Name, fileName)\n\n\treturn logURL, nil\n}", "func (m *Main) BuildReport() {\n\tpass := true\n\tfor _, t := range m.Tests {\n\t\tif !t.Result.Pass {\n\t\t\tpass = false\n\t\t\tbreak\n\t\t}\n\t}\n\tm.Report.Pass = pass\n\tfor _, t := range m.Tests {\n\t\tm.ReportResult(t.Result)\n\t}\n}", "func BuildLogRequestMessage(version int) ([]byte, error) {\n\tswitch version {\n\tcase rtsv2:\n\t\treturn buildMessage2(\n\t\t\tNewRtsConnection_2WithRtsLogRequest(\n\t\t\t\t&RtsLogRequest{\n\t\t\t\t\tMode: 0,\n\t\t\t\t\tFilter: []string{},\n\t\t\t\t},\n\t\t\t),\n\t\t)\n\tcase rtsv3:\n\t\treturn buildMessage3(\n\t\t\tNewRtsConnection_3WithRtsLogRequest(\n\t\t\t\t&RtsLogRequest{\n\t\t\t\t\tMode: 0,\n\t\t\t\t\tFilter: []string{},\n\t\t\t\t},\n\t\t\t),\n\t\t)\n\tcase rtsv4:\n\t\treturn buildMessage4(\n\t\t\tNewRtsConnection_4WithRtsLogRequest(\n\t\t\t\t&RtsLogRequest{\n\t\t\t\t\tMode: 0,\n\t\t\t\t\tFilter: []string{},\n\t\t\t\t},\n\t\t\t),\n\t\t)\n\tcase rtsv5:\n\t\treturn buildMessage5(\n\t\t\tNewRtsConnection_5WithRtsLogRequest(\n\t\t\t\t&RtsLogRequest{\n\t\t\t\t\tMode: 0,\n\t\t\t\t\tFilter: []string{},\n\t\t\t\t},\n\t\t\t),\n\t\t)\n\tdefault:\n\t\treturn nil, errors.New(errUnsupportedVersion)\n\t}\n}", "func (this *commonResult) addLog(header string, org_msg string) {\n\t_, file, line, _ := runtime.Caller(2)\n\t_, fileName := path.Split(file)\n\n\torg_msg = strings.TrimSuffix(org_msg, \"\\n\")\n\n\toutput := fmt.Sprintf(\n\t\theader+\" %s %s %s::%d\",\n\t\torg_msg,\n\t\ttime.Now().Format(time.RFC3339),\n\t\tfileName,\n\t\tline,\n\t)\n\tif !strings.HasSuffix(output, \"\\n\") {\n\t\toutput += \"\\n\"\n\t}\n\n\tthis.messages = append(this.messages, output)\n}", "func (i *Irc) buildMessage(inMsg irc.Msg) msg.Message {\n\t// Check for the user\n\tu := user.User{\n\t\tName: inMsg.Origin,\n\t}\n\n\tchannel := inMsg.Args[0]\n\tif channel == i.config.Get(\"Nick\", \"bot\") {\n\t\tchannel = inMsg.Args[0]\n\t}\n\n\tisAction := false\n\tvar message string\n\tif len(inMsg.Args) > 1 {\n\t\tmessage = inMsg.Args[1]\n\n\t\tisAction = strings.HasPrefix(message, actionPrefix)\n\t\tif isAction {\n\t\t\tmessage = strings.TrimRight(message[len(actionPrefix):], \"\\x01\")\n\t\t\tmessage = strings.TrimSpace(message)\n\t\t}\n\n\t}\n\n\tiscmd := false\n\tfilteredMessage := message\n\tif !isAction {\n\t\tiscmd, filteredMessage = bot.IsCmd(i.config, message)\n\t}\n\n\tmsg := msg.Message{\n\t\tUser: &u,\n\t\tChannel: channel,\n\t\tBody: filteredMessage,\n\t\tRaw: message,\n\t\tCommand: iscmd,\n\t\tAction: isAction,\n\t\tTime: time.Now(),\n\t\tHost: inMsg.Host,\n\t}\n\n\treturn msg\n}", "func BuildReport(r Result) string {\n\tif r.err != nil {\n\t\treturn \"-------------------------------------\\n\" + r.Url + \" report\\n-------------------------------------\\ntime : \" + r.time + \"\\nerror : \" + r.err.Error() + \"\\n\\n\"\n\t}\n\treturn \"-------------------------------------\\n\" + r.Url + \" report\\n-------------------------------------\\ntime : \" + r.time + \"\\nexpired : \" + strconv.FormatBool(!r.valid) + \"\\nexpiration : \" + r.expiry + \"\\n\\n\"\n}", "func CIMessage(messageType string, data interface{}) {\n\tif RunningOnTeamCity() {\n\t\tmessage := \"##teamcity[\" + messageType\n\n\t\tswitch d := data.(type) {\n\t\tcase string:\n\t\t\tescaped := ciEscape(d)\n\t\t\tmessage += fmt.Sprintf(\" '%s'\", escaped)\n\t\tcase map[string]string:\n\t\t\tfor k, v := range d {\n\t\t\t\tescaped := ciEscape(v)\n\t\t\t\tmessage += fmt.Sprintf(\" %s='%s'\", k, escaped)\n\t\t\t}\n\t\t}\n\t\tmessage += \"]\"\n\t\tlog.Println(message)\n\t} else {\n\t\tlog.Printf(\"%s: %#v\", messageType, data)\n\t}\n}", "func Test_SimpleLogger(t *testing.T) {\n\tdefer b.Reset()\n\n\tt.Run(\"NoFields\", func(t *testing.T) {\n\t\tdefer b.Reset()\n\t\tlog.InitSimpleLogger(&log.Config{\n\t\t\tOutput: b,\n\t\t})\n\n\t\ttests := []struct {\n\t\t\tlevel string\n\t\t\tfile string\n\t\t\tfunction string\n\t\t\tf func(msg string)\n\t\t}{\n\t\t\t{\n\t\t\t\tlevel: \"ERROR\",\n\t\t\t\tfile: \"log_test.go\",\n\t\t\t\tfunction: \"1()\",\n\t\t\t\tf: log.Error,\n\t\t\t},\n\t\t\t{\n\t\t\t\tlevel: \"INFO \",\n\t\t\t\tfile: \"log_test.go\",\n\t\t\t\tfunction: \"1()\",\n\t\t\t\tf: log.Info,\n\t\t\t},\n\t\t\t{\n\t\t\t\tlevel: \"DEBUG\",\n\t\t\t\tfile: \"log_test.go\",\n\t\t\t\tfunction: \"1()\",\n\t\t\t\tf: log.Debug,\n\t\t\t},\n\t\t\t{\n\t\t\t\tlevel: \"WARN \",\n\t\t\t\tfile: \"log_test.go\",\n\t\t\t\tfunction: \"1()\",\n\t\t\t\tf: log.Warn,\n\t\t\t},\n\t\t}\n\n\t\tfor _, test := range tests {\n\t\t\tt.Run(test.level, func(t *testing.T) {\n\t\t\t\ttest.f(\"there was an error\")\n\t\t\t\tdefer b.Reset()\n\n\t\t\t\tout := b.String()\n\n\t\t\t\tassureSingleNewline(out, t)\n\n\t\t\t\tlevel, file, function, _ := splitMessage(out, t)\n\n\t\t\t\tif level != test.level {\n\t\t\t\t\tt.Errorf(\"expected level: '%s'. actual level: '%s'\", test.level, level)\n\t\t\t\t}\n\n\t\t\t\tif file != test.file {\n\t\t\t\t\tt.Errorf(\"expected file: '%s'. actual file: '%s'\", test.file, file)\n\t\t\t\t}\n\n\t\t\t\tif function != test.function {\n\t\t\t\t\tt.Errorf(\"expected function: '%s'. actual function: '%s'\", test.function, function)\n\t\t\t\t}\n\n\t\t\t\tif len(strings.Split(strings.TrimSpace(out), \"\\n\")) > 1 {\n\t\t\t\t\tt.Errorf(\"expected single line log point: '%s\", out)\n\t\t\t\t}\n\t\t\t})\n\t\t}\n\t})\n\n\tt.Run(\"WithFields\", func(t *testing.T) {\n\t\tdefer b.Reset()\n\t\tt.Run(\"Single Field\", func(t *testing.T) {\n\t\t\tlog.InitSimpleLogger(&log.Config{\n\t\t\t\tOutput: b,\n\t\t\t})\n\n\t\t\ttests := []struct {\n\t\t\t\tlevel string\n\t\t\t\tfile string\n\t\t\t\tfunction string\n\t\t\t\tkey string\n\t\t\t\tvalue interface{}\n\t\t\t\tf func(string)\n\t\t\t}{\n\t\t\t\t{\n\t\t\t\t\tlevel: \"ERROR\",\n\t\t\t\t\tfile: \"log_test.go\",\n\t\t\t\t\tfunction: \"1()\",\n\t\t\t\t\tkey: \"sample\",\n\t\t\t\t\tvalue: \"banana\",\n\t\t\t\t\tf: log.WithFields(log.Fields{\"sample\": \"banana\"}).Error,\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tlevel: \"INFO \",\n\t\t\t\t\tfile: \"log_test.go\",\n\t\t\t\t\tfunction: \"1()\",\n\t\t\t\t\tkey: \"text\",\n\t\t\t\t\tvalue: 1,\n\t\t\t\t\tf: log.WithFields(log.Fields{\"text\": 1}).Info,\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tlevel: \"DEBUG\",\n\t\t\t\t\tfile: \"log_test.go\",\n\t\t\t\t\tfunction: \"1()\",\n\t\t\t\t\tkey: \"burger\",\n\t\t\t\t\tvalue: []string{\"sorry fellas\"},\n\t\t\t\t\tf: log.WithFields(log.Fields{\"burger\": []string{\"sorry fellas\"}}).Debug,\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tlevel: \"WARN \",\n\t\t\t\t\tfile: \"log_test.go\",\n\t\t\t\t\tfunction: \"1()\",\n\t\t\t\t\tkey: \"salad\",\n\t\t\t\t\tvalue: \"fortnite\",\n\t\t\t\t\tf: log.WithFields(log.Fields{\"salad\": \"fortnite\"}).Warn,\n\t\t\t\t},\n\t\t\t}\n\n\t\t\tfor _, test := range tests {\n\t\t\t\tt.Run(test.level, func(t *testing.T) {\n\t\t\t\t\ttest.f(\"there was an error\")\n\t\t\t\t\tdefer b.Reset()\n\n\t\t\t\t\tout := b.String()\n\n\t\t\t\t\t//t.Log(b.String())\n\n\t\t\t\t\tassureSingleNewline(out, t)\n\n\t\t\t\t\tlevel, file, function, _ := splitMessage(out, t)\n\n\t\t\t\t\tif level != test.level {\n\t\t\t\t\t\tt.Errorf(\"expected level: '%s'. actual level: '%s'\", test.level, level)\n\t\t\t\t\t}\n\n\t\t\t\t\tif file != test.file {\n\t\t\t\t\t\tt.Errorf(\"expected file: '%s'. actual file: '%s'\", test.file, file)\n\t\t\t\t\t}\n\n\t\t\t\t\tif function != test.function {\n\t\t\t\t\t\tt.Errorf(\"expected function: '%s'. actual function: '%s'\", test.function, function)\n\t\t\t\t\t}\n\n\t\t\t\t\tif ok, fields := hasField(test.key, test.value, out, t); !ok {\n\t\t\t\t\t\tt.Errorf(\"expected fields to contain: '%s=%v. actual fields total: %s\", test.key, test.value, fields)\n\t\t\t\t\t}\n\t\t\t\t})\n\t\t\t}\n\t\t})\n\n\t\tt.Run(\"Multiple Fields\", func(t *testing.T) {\n\t\t\tdefer b.Reset()\n\t\t\tlog.InitSimpleLogger(&log.Config{\n\t\t\t\tOutput: b,\n\t\t\t})\n\n\t\t\ttests := []struct {\n\t\t\t\tlevel string\n\t\t\t\tfile string\n\t\t\t\tfunction string\n\t\t\t\tfields log.Fields\n\t\t\t\tf func(string)\n\t\t\t}{\n\t\t\t\t{\n\t\t\t\t\tlevel: \"ERROR\",\n\t\t\t\t\tfile: \"log_test.go\",\n\t\t\t\t\tfunction: \"1()\",\n\t\t\t\t\tfields: log.Fields{\n\t\t\t\t\t\t\"one\": 1,\n\t\t\t\t\t\t\"two\": \"2\",\n\t\t\t\t\t\t\"three\": []string{\"1\", \"2\", \"3\"},\n\t\t\t\t\t},\n\t\t\t\t\tf: log.WithFields(log.Fields{\n\t\t\t\t\t\t\"one\": 1,\n\t\t\t\t\t\t\"two\": \"2\",\n\t\t\t\t\t\t\"three\": []string{\"1\", \"2\", \"3\"},\n\t\t\t\t\t}).Error,\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tlevel: \"INFO \",\n\t\t\t\t\tfile: \"log_test.go\",\n\t\t\t\t\tfunction: \"1()\",\n\t\t\t\t\tfields: log.Fields{\n\t\t\t\t\t\t\"sample\": \"this is a long piece of text\",\n\t\t\t\t\t\t\"true\": false,\n\t\t\t\t\t\t\"false\": true,\n\t\t\t\t\t},\n\t\t\t\t\tf: log.WithFields(log.Fields{\n\t\t\t\t\t\t\"sample\": \"this is a long piece of text\",\n\t\t\t\t\t\t\"true\": false,\n\t\t\t\t\t\t\"false\": true,\n\t\t\t\t\t}).Info,\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tlevel: \"WARN \",\n\t\t\t\t\tfile: \"log_test.go\",\n\t\t\t\t\tfunction: \"1()\",\n\t\t\t\t\tfields: log.Fields{\n\t\t\t\t\t\t\"one\": nil,\n\t\t\t\t\t\t\"okay but\": \"epic\",\n\t\t\t\t\t},\n\t\t\t\t\tf: log.WithFields(log.Fields{\n\t\t\t\t\t\t\"one\": nil,\n\t\t\t\t\t\t\"okay but\": \"epic\",\n\t\t\t\t\t}).Warn,\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tlevel: \"DEBUG\",\n\t\t\t\t\tfile: \"log_test.go\",\n\t\t\t\t\tfunction: \"1()\",\n\t\t\t\t\tfields: log.Fields{\n\t\t\t\t\t\t\"teamwork\": -1,\n\t\t\t\t\t\t\"dreamwork\": []bool{false, true},\n\t\t\t\t\t},\n\t\t\t\t\tf: log.WithFields(log.Fields{\n\t\t\t\t\t\t\"teamwork\": -1,\n\t\t\t\t\t\t\"dreamwork\": []bool{false, true},\n\t\t\t\t\t}).Debug,\n\t\t\t\t},\n\t\t\t}\n\n\t\t\tfor _, test := range tests {\n\t\t\t\tt.Run(test.level, func(t *testing.T) {\n\t\t\t\t\ttest.f(\"burger\")\n\n\t\t\t\t\tdefer b.Reset()\n\n\t\t\t\t\tout := b.String()\n\n\t\t\t\t\t//t.Log(b.String())\n\n\t\t\t\t\tassureSingleNewline(out, t)\n\n\t\t\t\t\tlevel, file, function, _ := splitMessage(out, t)\n\n\t\t\t\t\tif level != test.level {\n\t\t\t\t\t\tt.Errorf(\"expected level: '%s'. actual level: '%s'\", test.level, level)\n\t\t\t\t\t}\n\n\t\t\t\t\tif file != test.file {\n\t\t\t\t\t\tt.Errorf(\"expected file: '%s'. actual file: '%s'\", test.file, file)\n\t\t\t\t\t}\n\n\t\t\t\t\tif function != test.function {\n\t\t\t\t\t\tt.Errorf(\"expected function: '%s'. actual function: '%s'\", test.function, function)\n\t\t\t\t\t}\n\n\t\t\t\t\tfor k, v := range test.fields {\n\t\t\t\t\t\tif ok, fields := hasField(k, v, out, t); !ok {\n\t\t\t\t\t\t\tt.Errorf(\"expected fields to contain: '%s=%v. actual fields total: %s\", k, v, fields)\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t})\n\t\t\t}\n\t\t})\n\n\t\tt.Run(\"Append Fields\", func(t *testing.T) {\n\t\t\tdefer b.Reset()\n\t\t\tlog.InitSimpleLogger(&log.Config{\n\t\t\t\tOutput: b,\n\t\t\t})\n\n\t\t\ttests := []struct {\n\t\t\t\tlevel string\n\t\t\t\tfile string\n\t\t\t\tfunction string\n\t\t\t\tfields log.Fields\n\t\t\t\tf func(string)\n\t\t\t}{\n\t\t\t\t{\n\t\t\t\t\tlevel: \"ERROR\",\n\t\t\t\t\tfile: \"log_test.go\",\n\t\t\t\t\tfunction: \"1()\",\n\t\t\t\t\tfields: log.Fields{\n\t\t\t\t\t\t\"one\": 1,\n\t\t\t\t\t},\n\t\t\t\t\tf: log.WithFields(log.Fields{\n\t\t\t\t\t\t\"one\": 1,\n\t\t\t\t\t}).WithFields(log.Fields{}).Error,\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tlevel: \"INFO \",\n\t\t\t\t\tfile: \"log_test.go\",\n\t\t\t\t\tfunction: \"1()\",\n\t\t\t\t\tfields: log.Fields{\n\t\t\t\t\t\t\"sample\": \"this is a long piece of text\",\n\t\t\t\t\t\t\"true\": false,\n\t\t\t\t\t\t\"false\": true,\n\t\t\t\t\t},\n\t\t\t\t\tf: log.WithFields(log.Fields{\n\t\t\t\t\t\t\"sample\": \"this is a long piece of text\",\n\t\t\t\t\t}).WithFields(log.Fields{\n\t\t\t\t\t\t\"false\": true,\n\t\t\t\t\t}).WithFields(log.Fields{\n\t\t\t\t\t\t\"true\": false,\n\t\t\t\t\t}).Info,\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tlevel: \"WARN \",\n\t\t\t\t\tfile: \"log_test.go\",\n\t\t\t\t\tfunction: \"1()\",\n\t\t\t\t\tfields: log.Fields{\n\t\t\t\t\t\t\"one\": nil,\n\t\t\t\t\t\t\"okay but\": \"epic\",\n\t\t\t\t\t},\n\t\t\t\t\tf: log.WithFields(log.Fields{\n\t\t\t\t\t\t\"one\": nil,\n\t\t\t\t\t}).WithFields(log.Fields{\n\t\t\t\t\t\t\"okay but\": \"epic\",\n\t\t\t\t\t}).Warn,\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tlevel: \"DEBUG\",\n\t\t\t\t\tfile: \"log_test.go\",\n\t\t\t\t\tfunction: \"1()\",\n\t\t\t\t\tfields: log.Fields{\n\t\t\t\t\t\t\"teamwork\": -1,\n\t\t\t\t\t\t\"dreamwork\": []bool{false, true},\n\t\t\t\t\t},\n\t\t\t\t\tf: log.WithFields(log.Fields{\n\t\t\t\t\t\t\"teamwork\": -1,\n\t\t\t\t\t}).WithFields(log.Fields{\n\t\t\t\t\t\t\"dreamwork\": []bool{false, true},\n\t\t\t\t\t}).Debug,\n\t\t\t\t},\n\t\t\t}\n\n\t\t\tfor _, test := range tests {\n\t\t\t\tt.Run(test.level, func(t *testing.T) {\n\t\t\t\t\ttest.f(\"burger\")\n\n\t\t\t\t\tdefer b.Reset()\n\n\t\t\t\t\tout := b.String()\n\n\t\t\t\t\t//t.Log(b.String())\n\n\t\t\t\t\tassureSingleNewline(out, t)\n\n\t\t\t\t\tlevel, file, function, _ := splitMessage(out, t)\n\n\t\t\t\t\tif level != test.level {\n\t\t\t\t\t\tt.Errorf(\"expected level: '%s'. actual level: '%s'\", test.level, level)\n\t\t\t\t\t}\n\n\t\t\t\t\tif file != test.file {\n\t\t\t\t\t\tt.Errorf(\"expected file: '%s'. actual file: '%s'\", test.file, file)\n\t\t\t\t\t}\n\n\t\t\t\t\tif function != test.function {\n\t\t\t\t\t\tt.Errorf(\"expected function: '%s'. actual function: '%s'\", test.function, function)\n\t\t\t\t\t}\n\n\t\t\t\t\tfor k, v := range test.fields {\n\t\t\t\t\t\tif ok, fields := hasField(k, v, out, t); !ok {\n\t\t\t\t\t\t\tt.Errorf(\"expected fields to contain: '%s=%v. actual fields total: %s\", k, v, fields)\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t})\n\t\t\t}\n\t\t})\n\n\t\tt.Run(\"With Error Field\", func(t *testing.T) {\n\t\t\tdefer b.Reset()\n\t\t\tlog.InitSimpleLogger(&log.Config{\n\t\t\t\tOutput: b,\n\t\t\t})\n\n\t\t\ttests := []struct {\n\t\t\t\tlevel string\n\t\t\t\tfile string\n\t\t\t\tfunction string\n\t\t\t\tfields log.Fields\n\t\t\t\tf func(string)\n\t\t\t}{\n\t\t\t\t{\n\t\t\t\t\tlevel: \"ERROR\",\n\t\t\t\t\tfile: \"log_test.go\",\n\t\t\t\t\tfunction: \"1()\",\n\t\t\t\t\tfields: log.Fields{\n\t\t\t\t\t\t\"one\": 1,\n\t\t\t\t\t\t\"error\": errors.New(\"sample text\"),\n\t\t\t\t\t},\n\t\t\t\t\tf: log.WithError(\n\t\t\t\t\t\terrors.New(\"sample text\"),\n\t\t\t\t\t).WithFields(log.Fields{\n\t\t\t\t\t\t\"one\": 1,\n\t\t\t\t\t}).Error,\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tlevel: \"INFO \",\n\t\t\t\t\tfile: \"log_test.go\",\n\t\t\t\t\tfunction: \"1()\",\n\t\t\t\t\tfields: log.Fields{\n\t\t\t\t\t\t\"sample\": \"this is a long piece of text\",\n\t\t\t\t\t\t\"error\": errors.New(\"sample text\"),\n\t\t\t\t\t},\n\t\t\t\t\tf: log.WithFields(log.Fields{\n\t\t\t\t\t\t\"sample\": \"this is a long piece of text\",\n\t\t\t\t\t}).WithError(errors.New(\"sample text\")).Info,\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tlevel: \"WARN \",\n\t\t\t\t\tfile: \"log_test.go\",\n\t\t\t\t\tfunction: \"1()\",\n\t\t\t\t\tfields: log.Fields{\n\t\t\t\t\t\t\"one\": nil,\n\t\t\t\t\t\t\"error\": errors.New(\"sample text\"),\n\t\t\t\t\t},\n\t\t\t\t\tf: log.WithFields(log.Fields{\n\t\t\t\t\t\t\"one\": nil,\n\t\t\t\t\t}).WithError(errors.New(\"sample text\")).Warn,\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tlevel: \"DEBUG\",\n\t\t\t\t\tfile: \"log_test.go\",\n\t\t\t\t\tfunction: \"1()\",\n\t\t\t\t\tfields: log.Fields{\n\t\t\t\t\t\t\"teamwork\": -1,\n\t\t\t\t\t\t\"error\": errors.New(\"sample text\"),\n\t\t\t\t\t},\n\t\t\t\t\tf: log.WithFields(log.Fields{\n\t\t\t\t\t\t\"teamwork\": -1,\n\t\t\t\t\t}).WithError(errors.New(\"sample text\")).Debug,\n\t\t\t\t},\n\t\t\t}\n\n\t\t\tfor _, test := range tests {\n\t\t\t\tt.Run(test.level, func(t *testing.T) {\n\t\t\t\t\ttest.f(\"burger\")\n\n\t\t\t\t\tdefer b.Reset()\n\n\t\t\t\t\tout := b.String()\n\n\t\t\t\t\t//t.Log(b.String())\n\n\t\t\t\t\tassureSingleNewline(out, t)\n\n\t\t\t\t\tlevel, file, function, _ := splitMessage(out, t)\n\n\t\t\t\t\tif level != test.level {\n\t\t\t\t\t\tt.Errorf(\"expected level: '%s'. actual level: '%s'\", test.level, level)\n\t\t\t\t\t}\n\n\t\t\t\t\tif file != test.file {\n\t\t\t\t\t\tt.Errorf(\"expected file: '%s'. actual file: '%s'\", test.file, file)\n\t\t\t\t\t}\n\n\t\t\t\t\tif function != test.function {\n\t\t\t\t\t\tt.Errorf(\"expected function: '%s'. actual function: '%s'\", test.function, function)\n\t\t\t\t\t}\n\n\t\t\t\t\tfor k, v := range test.fields {\n\t\t\t\t\t\tif ok, fields := hasField(k, v, out, t); !ok {\n\t\t\t\t\t\t\tt.Errorf(\"expected fields to contain: '%s=%v. actual fields total: %s\", k, v, fields)\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t})\n\t\t\t}\n\t\t})\n\t})\n\n\tt.Run(\"LogLevel\", func(t *testing.T) {\n\t\ttests := []struct {\n\t\t\tlevelName string\n\t\t\tlevel log.LogLevel\n\t\t\toutput bool\n\t\t\tf func(string)\n\t\t}{\n\t\t\t{\n\t\t\t\tlevelName: \"DEBUG\",\n\t\t\t\tlevel: log.LogDebug,\n\t\t\t\toutput: true,\n\t\t\t\tf: log.Debug,\n\t\t\t},\n\t\t\t{\n\t\t\t\tlevelName: \"ERROR\",\n\t\t\t\tlevel: log.LogInformational,\n\t\t\t\toutput: true,\n\t\t\t\tf: log.Error,\n\t\t\t},\n\t\t\t{\n\t\t\t\tlevelName: \"INFO \",\n\t\t\t\tlevel: log.LogWarning,\n\t\t\t\toutput: false,\n\t\t\t\tf: log.Info,\n\t\t\t},\n\t\t\t{\n\t\t\t\tlevelName: \"WARN \",\n\t\t\t\tlevel: log.LogError,\n\t\t\t\toutput: false,\n\t\t\t\tf: log.Warn,\n\t\t\t},\n\t\t}\n\n\t\tvar b strings.Builder\n\t\tfor _, test := range tests {\n\t\t\tt.Run(test.levelName, func(t *testing.T) {\n\t\t\t\tdefer b.Reset()\n\t\t\t\tlog.InitSimpleLogger(&log.Config{\n\t\t\t\t\tOutput: &b,\n\t\t\t\t\tLogLevel: test.level,\n\t\t\t\t})\n\n\t\t\t\ttest.f(\"sample text\")\n\n\t\t\t\tif b.Len() > 0 && !test.output {\n\t\t\t\t\tt.Errorf(\"expected no output for log level %d, got '%s'\", test.level, b.String())\n\t\t\t\t}\n\t\t\t})\n\t\t}\n\t})\n\n\tt.Run(\"Clone\", func(t *testing.T) {\n\t\tdefer b.Reset()\n\t\tlog.InitSimpleLogger(&log.Config{\n\t\t\tOutput: b,\n\t\t})\n\n\t\te := log.WithFields(log.Fields{\n\t\t\t\"sample\": \"text\",\n\t\t})\n\n\t\te1 := e.Clone().WithFields(log.Fields{\n\t\t\t\"fortnite\": \"borger\",\n\t\t})\n\n\t\te = e.WithFields(log.Fields{\n\t\t\t\"hello\": \"world\",\n\t\t})\n\n\t\te.Info(\"e\")\n\n\t\tif ok, fields := hasField(\"fortnite\", \"borger\", b.String(), t); ok {\n\t\t\tt.Errorf(\"expected to not have '%s=%s' but it did: '%s'\", \"fortnite\", \"borger\", fields)\n\t\t}\n\n\t\tb.Reset()\n\t\te1.Info(\"e\")\n\n\t\tif ok, fields := hasField(\"hello\", \"world\", b.String(), t); ok {\n\t\t\tt.Errorf(\"expected to not have '%s=%s' but it did: '%s'\", \"hello\", \"world\", fields)\n\t\t}\n\t})\n\n\tt.Run(\"Context\", func(t *testing.T) {\n\t\tdefer b.Reset()\n\t\tlog.InitSimpleLogger(&log.Config{\n\t\t\tOutput: b,\n\t\t})\n\n\t\tctx := context.WithValue(context.Background(), log.Key, log.Fields{\n\t\t\t\"sample\": \"text\",\n\t\t})\n\n\t\tlog.WithContext(ctx).Info(\"hello epic reddit\")\n\n\t\tif ok, fields := hasField(\"sample\", \"text\", b.String(), t); !ok {\n\t\t\tt.Errorf(\"expected fields to contain: '%s=%v'. actual fields total: %s\", \"sample\", \"text\", fields)\n\t\t}\n\t})\n}", "func constructMessage(args ...interface{}) string {\n\tmessage := \"\"\n\tfor index, item := range args {\n\t\tmessage += fmt.Sprint(item)\n\t\tif index != len(args)-1 {\n\t\t\tmessage += \" \"\n\t\t}\n\t}\n\treturn message\n}", "func testLog() *Log {\n\treturn &Log{\n\t\tID: sql.NullInt64{Int64: 1, Valid: true},\n\t\tBuildID: sql.NullInt64{Int64: 1, Valid: true},\n\t\tRepoID: sql.NullInt64{Int64: 1, Valid: true},\n\t\tServiceID: sql.NullInt64{Int64: 1, Valid: true},\n\t\tStepID: sql.NullInt64{Int64: 1, Valid: true},\n\t\tData: []byte(\"foo\"),\n\t}\n}", "func buildReport(conf *config.Config) (*Report, error) {\n\tpanic(\"boom: I ran some linux code\")\n}", "func GenerateLog() string {\n\treturn fmt.Sprintf(\"%s - %s %s %s %s %s\\n\", RandomIP(), RandomUser(), CurrentTime(), RandomRequest(), RandomStatus(), RandomByteSize())\n}", "func TestTestcaseBuild__WithUndefinedVariable(t *testing.T) {\n\tvar expectedOutput = GivenTestdataContents(t, t.Name()+\"_output.js\")\n\n\tvar output strings.Builder\n\terr := MainTestcaseBuild(&output, filepath.Join(\"testdata\", t.Name()+\"_main.mjs\"), nil)\n\tassert.NoError(t, err)\n\n\tassert.Equal(t, expectedOutput, output.String())\n}", "func (v *vpconnect) createLogging() string {\n\tprint(&msg{Message: \"v.createLogging(): Entering\", LogLevel: \"debug\"})\n\tdefer print(&msg{Message: \"v.createLogging(): Returning\", LogLevel: \"debug\"})\n\n\tlogging := \"charon {\\n\"\n\tlogging += \"\\tfilelog {\\n\"\n\tlogging += \"\\t\\tcharonlog {\\n\"\n\tlogging += \"\\t\\t\\tpath = /var/log/charon.log\\n\"\n\tlogging += fmt.Sprintf(\"\\t\\t\\tdefault = %s\\n\", v.charonLogLevel)\n\tlogging += \"\\t\\t\\ttime_format = %Y-%m-%d %H:%M:%S\\n\"\n\tlogging += \"\\t\\t}\\n\"\n\tlogging += \"\\t}\\n\"\n\tlogging += \"\\tsyslog {\\n\"\n\tlogging += \"\\t\\tdaemon {\\n\"\n\tlogging += \"\\t\\t}\\n\"\n\tlogging += \"\\t\\tdefault = -1\\n\"\n\tlogging += \"\\t}\\n\"\n\tlogging += \"}\\n\"\n\n\treturn logging\n}", "func buildQueue(c *cli.Context) error {\n\n\tclient, err := newClient(c)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tbuilds, err := client.BuildQueue()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif len(builds) == 0 {\n\t\tfmt.Println(\"there are no pending or running builds\")\n\t\treturn nil\n\t}\n\n\ttmpl, err := template.New(\"_\").Parse(c.String(\"format\") + \"\\n\")\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tfor _, build := range builds {\n\t\ttmpl.Execute(os.Stdout, build)\n\t}\n\treturn nil\n}", "func BuildStr() string {\n\tvar result = fmt.Sprintf(\n\t\t\"version: %s\\n\"+\n\t\t\t\"commit: %s\\n\"+\n\t\t\t\"built at: %s\\n\"+\n\t\t\t\"built by: %s\\n\",\n\t\tversion, commit, date, builtBy)\n\n\treturn result\n}", "func TestTextMessage(t *testing.T) {\n\tprefix := \"test\"\n\tlevel := level.Info\n\ttimestamp := time.Now()\n\toutput := &Stdout\n\toutput.WithColor = trit.True\n\toutput.Layouts = output.Layouts | layout.LineNumber | layout.FuncAddress\n\tstackframe := getStackFrame(2)\n\n\ttests := []struct {\n\t\tname string\n\t\tf string\n\t\ta []any\n\t\te string\n\t}{\n\t\t{\n\t\t\tname: \"Text message with formatted string\",\n\t\t\tf: \"formatted string %s\",\n\t\t\ta: []any{\"value\"},\n\t\t\te: \"formatted string value\",\n\t\t},\n\t\t{\n\t\t\tname: \"Text message with multiple formatted values\",\n\t\t\tf: \"formatted string with multiple values %s %d\",\n\t\t\ta: []any{\"value\", 1},\n\t\t\te: \"formatted string with multiple values value 1\",\n\t\t},\n\t\t{\n\t\t\tname: \"Text message with no formatting\",\n\t\t\tf: \"\",\n\t\t\ta: []any{\"value\"},\n\t\t\te: \"value\",\n\t\t},\n\t}\n\n\tfor _, test := range tests {\n\t\tt.Run(test.name, func(t *testing.T) {\n\t\t\tresult := textMessage(\n\t\t\t\tprefix,\n\t\t\t\tlevel,\n\t\t\t\ttimestamp,\n\t\t\t\toutput,\n\t\t\t\tstackframe,\n\t\t\t\ttest.f,\n\t\t\t\ttest.a...,\n\t\t\t)\n\n\t\t\tif !strings.Contains(result, test.e) {\n\t\t\t\tt.Errorf(\"Message '%s' doesn't contains '%s'\", result, test.e)\n\t\t\t}\n\t\t})\n\t}\n\n\t// Change layouts.\n\toutput.Layouts = layout.FullFilePath\n\tfor _, test := range tests {\n\t\tt.Run(test.name, func(t *testing.T) {\n\t\t\tresult := textMessage(\n\t\t\t\tprefix,\n\t\t\t\tlevel,\n\t\t\t\ttimestamp,\n\t\t\t\toutput,\n\t\t\t\tstackframe,\n\t\t\t\ttest.f,\n\t\t\t\ttest.a...,\n\t\t\t)\n\n\t\t\tif !strings.Contains(result, test.e) {\n\t\t\t\tt.Errorf(\"Message '%s' doesn't contains '%s'\", result, test.e)\n\t\t\t}\n\t\t})\n\t}\n\n\t// Change layouts.\n\toutput.Layouts = layout.LineNumber | layout.FuncAddress\n\tfor _, test := range tests {\n\t\tt.Run(test.name, func(t *testing.T) {\n\t\t\tresult := textMessage(\n\t\t\t\tprefix,\n\t\t\t\tlevel,\n\t\t\t\ttimestamp,\n\t\t\t\toutput,\n\t\t\t\tstackframe,\n\t\t\t\ttest.f,\n\t\t\t\ttest.a...,\n\t\t\t)\n\n\t\t\tif !strings.Contains(result, test.e) {\n\t\t\t\tt.Errorf(\"Message '%s' doesn't contains '%s'\", result, test.e)\n\t\t\t}\n\t\t})\n\t}\n}", "func makeCongratulationMessage(holidayDay holiday, tmpDateOfHoliday time.Time, today bool) (message string){\r\n\tmessageString := \"\"\r\n\tif(today == true){\r\n\t\tmessageString += \"Today is a holiday! It is \" + holidayDay.Name\r\n\t} else {\r\n\t\tmessageString += (\"The next holiday is \" + holidayDay.Name +\r\n \t\t\" (\" +\r\n \t\ttmpDateOfHoliday.Weekday().String() + \", \" +\r\n \t\ttmpDateOfHoliday.Month().String() + \", \" +\r\n \t\tstrconv.Itoa(tmpDateOfHoliday.Day()) +\r\n \t\t\")\")\r\n\t}\r\n\tmessageString += isLongWeekendMessage(tmpDateOfHoliday)\r\n return messageString\r\n}", "func TestMessage(t *testing.T) {\n\tt.Run(\"LogMessage\", func(t *testing.T) {\n\t\tt.Run(\"NilFmt\", func(t *testing.T) {\n\t\t\tmessage := &Message{\n\t\t\t\tid: 1,\n\t\t\t\tlevel: INFO,\n\t\t\t\tfmt: nil,\n\t\t\t\targs: []interface{}{\n\t\t\t\t\t\"Some argument\", \"SecondArgument\",\n\t\t\t\t},\n\t\t\t}\n\t\t\tmsgStr := message.Message()\n\t\t\tassert.Equal(t, fmt.Sprint(message.args...), msgStr)\n\t\t})\n\t\tt.Run(\"NonNilFmt\", func(t *testing.T) {\n\t\t\tformat := \"%s, %s\"\n\t\t\tmessage := &Message{\n\t\t\t\tid: 2,\n\t\t\t\tlevel: DEBUG,\n\t\t\t\tfmt: &format,\n\t\t\t\targs: []interface{}{\n\t\t\t\t\t\"first\", \"second\",\n\t\t\t\t},\n\t\t\t}\n\t\t\tmsgStr := message.Message()\n\t\t\tassert.Equal(t, fmt.Sprintf(*message.fmt, message.args...), msgStr)\n\n\t\t\tstr := message.String()\n\t\t\tassert.Equal(t, fmt.Sprintf(\"%s|%04x: %s\", message.level, message.id, message.getMessage()), str)\n\t\t})\n\t})\n}", "func generateTestSpec(fields []gengo.Field) *gengo.MsgSpec {\n\tmsgSpec := &gengo.MsgSpec{}\n\tmsgSpec.FullName = \"TestMessage\"\n\tmsgSpec.Package = \"Testing\"\n\tmsgSpec.MD5Sum = \"1337beeffeed1337\"\n\tmsgSpec.ShortName = \"Test\"\n\tmsgSpec.Fields = fields\n\treturn msgSpec\n}", "func newCommitMsg(author *object.Signature) (string, error) {\n\tif author.Name == \"\" {\n\t\treturn \"\", fmt.Errorf(\"commit Author.Name must be set\")\n\t}\n\tif author.Email == \"\" {\n\t\treturn \"\", fmt.Errorf(\"commit Author.Email must be set\")\n\t}\n\n\tbuf := bytes.Buffer{}\n\terr := commitMsgTmpl.Execute(&buf, author)\n\treturn buf.String(), err\n}", "func fakeLogs() {\n\ttime.Sleep(1 * time.Second)\n\n\tconn, err := pgxpool.Acquire()\n\tdefer pgxpool.Release(conn)\n\tif err != nil {\n\t\tlogger.Fatalln(err)\n\t}\n\n\ttime.Sleep(1 * time.Second)\n\n\tfor n := 1; n > 0; n++ {\n\t\t// _, err := runCmdForBuild(int64(buildID), exec.Command(\"./sleepy.sh\"))\n\t\t// pp.Println(err)\n\t\tforwardLogToDB(conn, 3, fmt.Sprintf(\"example line %d\", n))\n\t\ttime.Sleep(10 * time.Second)\n\t}\n\n\tos.Exit(0)\n}", "func (r *ConfigAuditReportReconciler) buildCommand(workloadType, workloadName string) string {\n\treturn \"starboard -n \" + r.NamespaceWatched + \" get report \" + workloadType + \"/\" + workloadName + \" > \" + reportPath + workloadName + \".\" + workloadType + \".html\"\n}", "func (p *messagePredicate) buildDiff(rep *Report) {\n\treport.WriteDiff(\n\t\t&rep.Section(\"Message Diff\").Content,\n\t\treport.RenderMessage(p.expectedMessage),\n\t\treport.RenderMessage(p.bestMatch.Message),\n\t)\n}", "func executeBuild() {\n\tfmt.Println(\"Building ...\")\n}", "func BuildIssueFiatMsg(from sdk.AccAddress, to sdk.AccAddress, fiatPeg sdk.FiatPeg) sdk.Msg {\n\n\tissueFiat := bank.NewIssueFiat(from, to, fiatPeg)\n\tmsg := bank.NewMsgBankIssueFiats([]bank.IssueFiat{issueFiat})\n\treturn msg\n}", "func log(t Type, wait bool, msg string, meta map[string]interface{}) string {\n\tnow := time.Now()\n\tm := pool.Get().(*Message)\n\tm.Type = t\n\tm.Time = now.Format(\"Jan 02 2006 15:04:05.9999\")\n\tm.Code = strconv.FormatInt(now.UnixNano(), 36)\n\tm.Desc = msg\n\tm.Meta = meta\n\tm.done = make(chan struct{})\n\tmessages <- m\n\n\tif wait {\n\t\t<-m.done\n\t}\n\n\treturn m.Code\n}", "func (t Plan) BuildCommand() []string {\n\t// detailed exit code needed to better parse the plan\n\tcommand := []string{\"plan\", \"-detailed-exitcode\"}\n\n\tif t.CompactWarnings {\n\t\tcommand = append(command, \"-compact-warnings\")\n\t}\n\n\tif t.Destroy {\n\t\tcommand = append(command, \"-destroy\")\n\t}\n\n\tif !t.Input {\n\t\tcommand = append(command, \"-input=false\")\n\t}\n\n\tif t.LockTimeout.String() != \"0s\" {\n\t\tcommand = append(command, \"-lock-timeout=\"+t.LockTimeout.String())\n\t}\n\n\tif t.NoColor {\n\t\tcommand = append(command, \"-no-color\")\n\t}\n\n\tif t.Out != \"\" {\n\t\tcommand = append(command, \"-out=\"+t.Out)\n\t}\n\n\tif t.Parallelism != 10 {\n\t\tcommand = append(command, fmt.Sprintf(\"-parallelism=%d\", t.Parallelism))\n\t}\n\n\tif !t.Refresh {\n\t\tcommand = append(command, \"-refresh=false\")\n\t}\n\n\tif t.State != \"\" {\n\t\tcommand = append(command, \"-state=\"+t.State)\n\t}\n\n\tif !t.Targets.Empty() {\n\t\tfor _, v := range t.Targets.Options {\n\t\t\tcommand = append(command, \"-target=\"+v)\n\t\t}\n\t}\n\n\tif !t.Vars.Empty() {\n\t\tfor _, v := range t.Vars.Options {\n\t\t\tcommand = append(command, \"-var '\"+v+\"'\")\n\t\t}\n\t}\n\n\tif !t.VarFiles.Empty() {\n\t\tfor _, v := range t.VarFiles.Options {\n\t\t\tcommand = append(command, \"-var-file=\"+v)\n\t\t}\n\t}\n\n\treturn command\n}", "func LogMsg(moduleName string, a string, args ...interface{}) {\n\tmsg := fmt.Sprintf(a, args...)\n\tnow := time.Now()\n\tlog.Printf(\"%s %s: %s\", Time2string(now), moduleName, msg)\n}", "func TestErrorLogWithLinenumberAndFilename(t *testing.T) {\n\tvar b strings.Builder\n\ttMock := mock.NewTMock()\n\texpect := texp.Expect(tMock, conf.OutputTo(&b))\n\n\texpect(false).ToBeTrue(\"\")\n\t_, filename, linenum, _ := runtime.Caller(0)\n\texpLog := fmt.Sprintf(\"Test: Test\\nTrace: %s:%v\\nError: \\n\", path.Base(filename), linenum-1)\n\tif b.String() != expLog {\n\t\tt.Errorf(\"Expecting %s, got %s\", expLog, b.String())\n\t}\n}", "func (state *BuildState) logResult(result *BuildResult) {\n\tresult.Time = time.Now()\n\tstate.progress.internalResults <- result\n\tif result.Status.IsFailure() {\n\t\tstate.progress.failed.SetTrue()\n\t\tif result.Status == TargetBuildFailed {\n\t\t\tstate.progress.buildFailed.SetTrue()\n\t\t} else if result.Status == TargetTestFailed {\n\t\t\tstate.progress.testFailed.SetTrue()\n\t\t}\n\t}\n}", "func (ad *AgentRunCommandReplyType) constructMessage(result *contracts.DocumentResult) (*mgsContracts.AgentMessage, error) {\n\tlog := ad.context.Log()\n\tappConfig := ad.context.AppConfig()\n\tagentInfo := contracts.AgentInfo{\n\t\tLang: appConfig.Os.Lang,\n\t\tName: appConfig.Agent.Name,\n\t\tVersion: appConfig.Agent.Version,\n\t\tOs: appConfig.Os.Name,\n\t\tOsVersion: appConfig.Os.Version,\n\t}\n\treplyPayload := runcommand.FormatPayload(log, result.LastPlugin, agentInfo, result.PluginResults)\n\tcommandTopic := utils.GetTopicFromDocResult(result.ResultType, result.RelatedDocumentType)\n\treturn utils.GenerateAgentJobReplyPayload(log, ad.replyId, result.MessageID, replyPayload, commandTopic)\n}", "func BuildSendFiatMsg(from sdk.AccAddress, to sdk.AccAddress, pegHash sdk.PegHash, amount int64) sdk.Msg {\n\n\tsendFiat := bank.NewSendFiat(from, to, pegHash, amount)\n\tmsg := bank.NewMsgBankSendFiats([]bank.SendFiat{sendFiat})\n\treturn msg\n}", "func MakeLogLineMsg(cat LogCat, msgData LogLineParsedMsg) LogLineParsed {\n\th, m, s := time.Now().Clock()\n\tllp := LogLineParsed{\n\t\tCat: cat,\n\t\tBody: \"\",\n\t\tMsg: &msgData,\n\t\tStampSeconds: h*60*60 + m*60 + s,\n\t}\n\n\tllp.UpdateBody()\n\treturn llp\n}", "func (c *TestCommand) build() (string, error) {\n\tpath := \"go\" // go must be in $PATH\n\t// -c compiles without running the test\n\targs := append([]string{\"test\", \"-c\"})\n\tcmd := exec.Command(path, args...)\n\tpkgDir := c.config.Wd()\n\tcmd.Dir = pkgDir\n\tcmd.Env = CrossEnv()\n\tcmd.Stdout = os.Stdout\n\tcmd.Stderr = os.Stderr\n\terr := cmd.Run()\n\t_, pkgName := filepath.Split(pkgDir)\n\tfileName := filepath.Join(pkgDir, pkgName+\".test\")\n\treturn fileName, err\n}", "func (e WakeupEvent) LogMessage() string {\n\tif util.InTestMode() {\n\t\treturn \"scheduled consistency check\"\n\t}\n\treturn \"\"\n}", "func (j *JobLog) createComponents() {\n\tj.jobsInfo = tview.NewTextView().\n\t\tSetDynamicColors(true)\n\tj.jobsInfo.SetBorder(true).\n\t\tSetBorderColor(tcell.ColorYellow)\n\n\tj.logBox = tview.NewTextView().\n\t\tSetDynamicColors(true)\n\tj.logBox.SetBorder(true).\n\t\tSetTitle(\"Log\")\n\n\tj.usage = tview.NewTextView().\n\t\tSetDynamicColors(true)\n\n\t// Create the layout.\n\tj.layout = tview.NewFlex().\n\t\tSetDirection(tview.FlexRow).\n\t\tAddItem(j.jobsInfo, 0, 1, false).\n\t\tAddItem(j.logBox, 0, 5, true).\n\t\tAddItem(j.usage, 1, 1, false)\n}", "func (r *ConfigAuditReportReconciler) buildCommand(workloadInfo string) string {\n\tworkloadInfos := strings.Split(workloadInfo, \"|\")\n\tworkloadType := workloadInfos[0]\n\tworkloadName := workloadInfos[1]\n\n\treturn \"starboard -n \" + r.NamespaceWatched + \" get report \" + workloadType + \"/\" + workloadName + \" > \" + buildReportName(workloadType, workloadName)\n}", "func BuildMessage(content string) *Message {\n msg := new(Message)\n fields := strings.Split(content, \"][\")\n for idx, field := range fields {\n s := strings.Trim(field, \"[]\")\n switch idx {\n case 0:\n msg.Priority = s\n case 1:\n msg.Status = s\n case 2:\n msg.Endpoint = s\n case 3:\n case 4:\n msg.Content = s\n case 5:\n l := strings.Split(s, \" \")\n t := l[1:]\n ts := strings.Join(t, \"T\")\n msg.Timestamp = ts\n default:\n }\n }\n return msg\n}", "func BuildBvsMsg(senderAccount sdk.AccAddress, sender string, recp string, asset *types.BvsAsset) sdk.Msg {\n\treturn &MsgBvs{\n\t\tSenderAccount: senderAccount,\n\t\tSender: sender,\n\t\tRecipient: recp,\n\t\tAsset: *asset,\n\t}\n}", "func fakeCommitMsgConfig(t *testing.T) *config.CommitMsgConfig {\n\tc := &config.CommitMsgConfig{\n\t\tBugProject: fakeBugProject,\n\t\tChildLogUrlTmpl: \"https://fake-child-log/{{.RollingFrom}}..{{.RollingTo}}\",\n\t\tCqExtraTrybots: []string{\"some-trybot\"},\n\t\tCqDoNotCancelTrybots: true,\n\t\tExtraFooters: []string{\"My-Footer: BlahBlah\", \"My-Other-Footer: Blah\"},\n\t\tIncludeLog: true,\n\t\tIncludeRevisionCount: true,\n\t\tIncludeTbrLine: true,\n\t\tIncludeTests: true,\n\t\tTemplate: &config.CommitMsgConfig_BuiltIn_{\n\t\t\tBuiltIn: config.CommitMsgConfig_DEFAULT,\n\t\t},\n\t}\n\t// Sanity check.\n\trequire.NoError(t, c.Validate())\n\treturn c\n}", "func (client *BuildServiceClient) getBuildResultLogCreateRequest(ctx context.Context, resourceGroupName string, serviceName string, buildServiceName string, buildName string, buildResultName string, options *BuildServiceClientGetBuildResultLogOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.AppPlatform/Spring/{serviceName}/buildServices/{buildServiceName}/builds/{buildName}/results/{buildResultName}/getLogFileUrl\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif serviceName == \"\" {\n\t\treturn nil, errors.New(\"parameter serviceName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{serviceName}\", url.PathEscape(serviceName))\n\tif buildServiceName == \"\" {\n\t\treturn nil, errors.New(\"parameter buildServiceName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{buildServiceName}\", url.PathEscape(buildServiceName))\n\tif buildName == \"\" {\n\t\treturn nil, errors.New(\"parameter buildName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{buildName}\", url.PathEscape(buildName))\n\tif buildResultName == \"\" {\n\t\treturn nil, errors.New(\"parameter buildResultName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{buildResultName}\", url.PathEscape(buildResultName))\n\treq, err := runtime.NewRequest(ctx, http.MethodPost, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2023-01-01-preview\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}", "func constructGrid(log logrus.FieldLogger, group *configpb.TestGroup, cols []InflatedColumn, issues map[string][]string) *statepb.Grid {\n\t// Add the columns into a grid message\n\tvar grid statepb.Grid\n\trows := map[string]*statepb.Row{} // For fast target => row lookup\n\tfailsOpen := int(group.NumFailuresToAlert)\n\tpassesClose := int(group.NumPassesToDisableAlert)\n\tif failsOpen > 0 && passesClose == 0 {\n\t\tpassesClose = 1\n\t}\n\n\tfor _, col := range cols {\n\t\tappendColumn(&grid, rows, col)\n\t}\n\n\tdropEmptyRows(log, &grid, rows)\n\n\tfor name, row := range rows {\n\t\trow.Issues = append(row.Issues, issues[name]...)\n\t\tissues := make(map[string]bool, len(row.Issues))\n\t\tfor _, i := range row.Issues {\n\t\t\tissues[i] = true\n\t\t}\n\t\trow.Issues = make([]string, 0, len(issues))\n\t\tfor i := range issues {\n\t\t\trow.Issues = append(row.Issues, i)\n\t\t}\n\t\tsort.SliceStable(row.Issues, func(i, j int) bool {\n\t\t\t// Largest issues at the front of the list\n\t\t\treturn !sortorder.NaturalLess(row.Issues[i], row.Issues[j])\n\t\t})\n\t}\n\n\talertRows(grid.Columns, grid.Rows, failsOpen, passesClose)\n\tsort.SliceStable(grid.Rows, func(i, j int) bool {\n\t\treturn sortorder.NaturalLess(grid.Rows[i].Name, grid.Rows[j].Name)\n\t})\n\n\tfor _, row := range grid.Rows {\n\t\tdel := true\n\t\tfor _, up := range row.UserProperty {\n\t\t\tif up != \"\" {\n\t\t\t\tdel = false\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t\tif del {\n\t\t\trow.UserProperty = nil\n\t\t}\n\t\tsort.SliceStable(row.Metric, func(i, j int) bool {\n\t\t\treturn sortorder.NaturalLess(row.Metric[i], row.Metric[j])\n\t\t})\n\t\tsort.SliceStable(row.Metrics, func(i, j int) bool {\n\t\t\treturn sortorder.NaturalLess(row.Metrics[i].Name, row.Metrics[j].Name)\n\t\t})\n\t}\n\treturn &grid\n}", "func failureLog(testName, function string, args map[string]interface{}, startTime time.Time, alert, message string, err error) *log.Entry {\n\t// calculate the test case duration\n\tduration := time.Since(startTime)\n\tvar fields log.Fields\n\t// log with the fields as per mint\n\tif err != nil {\n\t\tfields = log.Fields{\n\t\t\t\"name\": \"minio-go: \" + testName, \"function\": function, \"args\": args,\n\t\t\t\"duration\": duration.Nanoseconds() / 1000000, \"status\": \"FAIL\", \"alert\": alert, \"message\": message, \"error\": err,\n\t\t}\n\t} else {\n\t\tfields = log.Fields{\n\t\t\t\"name\": \"minio-go: \" + testName, \"function\": function, \"args\": args,\n\t\t\t\"duration\": duration.Nanoseconds() / 1000000, \"status\": \"FAIL\", \"alert\": alert, \"message\": message,\n\t\t}\n\t}\n\treturn log.WithFields(cleanEmptyEntries(fields))\n}", "func RunBuild(opts Options, flags ...Flag) (comm.LogChan, comm.EventChan, comm.ExitChan) {\n\n\tvar unitConfig = opts.UnitConfig\n\tvar contextDir = opts.ContextDir\n\n\tvar log = make(chan comm.LogEntry, 1)\n\tvar event = make(chan comm.Event, 1)\n\tvar exit = make(chan error)\n\n\tgo func() {\n\t\tvar err error\n\n\t\tif unitConfig == nil {\n\t\t\texit <- errors.New(\"unit config may not be nil\")\n\t\t\treturn\n\t\t}\n\n\t\tparser := p.NewParser(p.NewParserOptions{\n\t\t\tContextDir: contextDir,\n\t\t\tLog: log,\n\t\t\tEvent: event,\n\t\t})\n\t\tcommandSequence := parser.Parse(unitConfig)\n\n\t\tbuilder := b.NewBuilder(b.NewBuilderOptions{\n\t\t\tContextDir: contextDir,\n\t\t\tLog: log,\n\t\t\tEvent: event,\n\t\t})\n\t\tbuilder.KeepTemporaryTag = shouldKeepTemporaryTag(flags)\n\t\tif err = builder.BuildCommandSequence(commandSequence); err != nil {\n\t\t\texit <- err\n\t\t\treturn\n\t\t}\n\n\t\texit <- nil\n\t}()\n\n\treturn log, event, exit\n}", "func logMsg(format string, a ...interface{}) {\n\tmsg := fmt.Sprintf(format, a...)\n\tlog.Println(msg)\n\tdiscord.ChannelMessageSend(logChannel, msg)\n}", "func TestMessages(t *testing.T) {\n\tt.Parallel()\n\tdefer logging.HideLogs(t)()\n\n\tnewWait := func(exitStatus uint32) *wait.Wait {\n\t\treturn &wait.Wait{\n\t\t\tShell: &shell.Shell{\n\t\t\t\tCheckStmt: \"test\",\n\t\t\t\tStatus: &shell.CommandResults{\n\t\t\t\t\tExitStatus: exitStatus,\n\t\t\t\t},\n\t\t\t},\n\t\t\tRetrier: &wait.Retrier{},\n\t\t}\n\t}\n\n\tt.Run(\"passed message\", func(t *testing.T) {\n\t\twait := newWait(0)\n\t\tassert.Equal(t, 1, len(wait.Messages()))\n\n\t\tt.Run(\"failed messages\", func(t *testing.T) {\n\t\t\tassert.Regexp(t, regexp.MustCompile(\"^Passed after\"), wait.Messages()[0])\n\t\t})\n\t})\n\n\tt.Run(\"failed messages\", func(t *testing.T) {\n\t\twait := newWait(1)\n\t\tassert.Equal(t, 2, len(wait.Messages()))\n\n\t\tt.Run(\"failed after\", func(t *testing.T) {\n\t\t\tassert.Regexp(t, regexp.MustCompile(\"^Failed after\"), wait.Messages()[0])\n\t\t})\n\n\t\tt.Run(\"last attempt\", func(t *testing.T) {\n\t\t\tassert.Regexp(t, regexp.MustCompile(\"^Last attempt\"), wait.Messages()[1])\n\t\t})\n\t})\n}", "func (builder testBuilder) Build(config *s2iapi.Config) (*s2iapi.Result, error) {\n\treturn nil, builder.buildError\n}", "func taskFailureSubject(ctx AlertContext) string {\n\tsubj := &bytes.Buffer{}\n\tfailed := []string{}\n\tfor _, test := range ctx.Task.LocalTestResults {\n\t\tif test.Status == evergreen.TestFailedStatus {\n\t\t\tfailed = append(failed, cleanTestName(test.TestFile))\n\t\t}\n\t}\n\n\tswitch {\n\tcase ctx.Task.Details.TimedOut:\n\t\tsubj.WriteString(\"Task Timed Out: \")\n\tcase len(failed) == 1:\n\t\tsubj.WriteString(\"Test Failure: \")\n\tcase len(failed) > 1:\n\t\tsubj.WriteString(\"Test Failures: \")\n\tcase ctx.Task.Details.Description == task.AgentHeartbeat:\n\t\tsubj.WriteString(\"Task System Failure: \")\n\tcase ctx.Task.Details.Type == evergreen.CommandTypeSystem:\n\t\tsubj.WriteString(\"Task System Failure: \")\n\tcase ctx.Task.Details.Type == evergreen.CommandTypeSetup:\n\t\tsubj.WriteString(\"Task Setup Failure: \")\n\tdefault:\n\t\tsubj.WriteString(\"Task Failed: \")\n\t}\n\n\tfmt.Fprintf(subj, \"%s on %s \", ctx.Task.DisplayName, ctx.Build.DisplayName)\n\n\t// include test names if <= 4 failed, otherwise print two plus the number remaining\n\tif len(failed) > 0 {\n\t\tsubj.WriteString(\"(\")\n\t\tif len(failed) <= 4 {\n\t\t\tsubj.WriteString(strings.Join(failed, \", \"))\n\t\t} else {\n\t\t\tfmt.Fprintf(subj, \"%s, %s, +%v more\", failed[0], failed[1], len(failed)-2)\n\t\t}\n\t\tsubj.WriteString(\") \")\n\t}\n\n\tfmt.Fprintf(subj, \"// %s @ %s\", ctx.ProjectRef.DisplayName, ctx.Version.Revision[0:8])\n\treturn subj.String()\n}", "func (p Plugin) Message(repo Repo, build Build) []string {\n\treturn []string{fmt.Sprintf(\"[%s] <%s> (%s)『%s』by %s\",\n\t\tbuild.Status,\n\t\tbuild.Link,\n\t\tbuild.Branch,\n\t\tbuild.Message,\n\t\tbuild.Author,\n\t)}\n}", "func generateLogs(cmd *cobra.Command, args []string) {\n\tif linesTotal <= 0 {\n\t\tklog.Fatalf(\"Invalid total number of lines: %d\", linesTotal)\n\t}\n\n\tif duration <= 0 {\n\t\tklog.Fatalf(\"Invalid duration: %v\", duration)\n\t}\n\n\tdelay := duration / time.Duration(linesTotal)\n\n\tticker := time.NewTicker(delay)\n\tdefer ticker.Stop()\n\tfor id := 0; id < linesTotal; id++ {\n\t\tklog.Info(generateLogLine(id))\n\t\t<-ticker.C\n\t}\n}", "func buildFailureLabel(labelAndArgs ...interface{}) string {\n\n\tif len(labelAndArgs) == 0 {\n\t\treturn \"\"\n\t}\n\n\treturn fmt.Sprintf(labelAndArgs[0].(string), labelAndArgs[1:]...) + \"\\n\"\n}", "func TestVoltBuildGitNoVimRepos(t *testing.T) {\n\ttestBuildMatrix(t, voltBuildGitNoVimRepos)\n}", "func testSomeLogMethod(t *testing.T, fn LogMethod, level string, expectOutput bool) {\n\tr, w := io.Pipe()\n\tdefer r.Close()\n\tlog.InitWithWriter(w)\n\n\t// Generate log message\n\trs := randomString()\n\tgo func(fn LogMethod, rs string, w io.WriteCloser) {\n\t\tfn(rs)\n\t\tw.Close()\n\t}(fn, rs, w)\n\n\t// Check we got the message\n\tvar output []byte = make([]byte, 1024)\n\t_, readErr := r.Read(output)\n\tif readErr != nil && readErr != io.EOF {\n\t\tt.Fatalf(\"Cannot read log output from io.Pipe: %v\", readErr)\n\t}\n\tif readErr == io.EOF {\n\t\tif expectOutput {\n\t\t\t// This is what we wanted\n\t\t\tt.Fatalf(\"Got EOF when output was expected\")\n\t\t} else {\n\t\t\treturn\n\t\t}\n\t}\n\tt.Logf(\"Log output: <<<%s>>>\", string(output))\n\tif !strings.Contains(string(output), rs) {\n\t\tt.Error(\"Log output did not have message\")\n\t}\n\tif !strings.Contains(string(output), level) {\n\t\tt.Error(\"Log output did not have expected level\")\n\t}\n}", "func TestLogging(t *testing.T) {\n\ttmpOut := bytes.Buffer{}\n\n\tlogger := log.New()\n\tlogger.Out = &tmpOut\n\tlogger.Level = log.TraceLevel\n\tlogger.Date = \"\"\n\n\tlogger.Printf(\"Prolly worked\")\n\tif tmpOut.String() != \"Prolly worked\\n\" {\n\t\tt.Errorf(\"Failed to Printf - '%s'\", tmpOut.String())\n\t}\n\ttmpOut.Reset()\n\n\tlogger.Trace(\"Prolly worked\")\n\tif tmpOut.String() != \"TRACE Prolly worked\\n\" {\n\t\tt.Errorf(\"Failed to Printf - '%s'\", tmpOut.String())\n\t}\n\ttmpOut.Reset()\n\n\tlogger.Debug(\"Prolly worked\")\n\tif tmpOut.String() != \"DEBUG Prolly worked\\n\" {\n\t\tt.Errorf(\"Failed to Printf - '%s'\", tmpOut.String())\n\t}\n\ttmpOut.Reset()\n\n\tlogger.Info(\"Prolly worked\")\n\tif tmpOut.String() != \" INFO Prolly worked\\n\" {\n\t\tt.Errorf(\"Failed to Printf - '%s'\", tmpOut.String())\n\t}\n\ttmpOut.Reset()\n\n\tlogger.Warn(\"Prolly worked\")\n\tif tmpOut.String() != \" WARN Prolly worked\\n\" {\n\t\tt.Errorf(\"Failed to Printf - '%s'\", tmpOut.String())\n\t}\n\ttmpOut.Reset()\n\n\tlogger.Error(\"Prolly worked\")\n\tif tmpOut.String() != \"ERROR Prolly worked\\n\" {\n\t\tt.Errorf(\"Failed to Printf - '%s'\", tmpOut.String())\n\t}\n\ttmpOut.Reset()\n\n\tlogger.Fatal(\"Prolly worked\")\n\tif tmpOut.String() != \"FATAL Prolly worked\\n\" {\n\t\tt.Errorf(\"Failed to Printf - '%s'\", tmpOut.String())\n\t}\n\ttmpOut.Reset()\n\n\tlogger.Date = \"2006-01-02 15:04:05.00000\"\n\tlogger.Trace(\"Prolly worked\")\n\tlogger.Debug(\"Prolly worked\")\n\tlogger.Info(\"Prolly worked\")\n\tlogger.Warn(\"Prolly worked\")\n\tlogger.Error(\"Prolly worked\")\n\tlogger.Fatal(\"Prolly worked\")\n}", "func TestBuilder_Build(t *testing.T) {\n\tb := newDockerBuilder(t)\n\n\tctx := context.Background()\n\tw := new(bytes.Buffer)\n\n\t_, err := b.Build(ctx, w, builder.BuildOptions{\n\t\tRepository: \"remind101/acme-inc\",\n\t\tBranch: \"master\",\n\t\tSha: \"827fecd2d36ebeaa2fd05aa8ef3eed1e56a8cd57\",\n\t})\n\tassert.NoError(t, err)\n\n\tif !regexp.MustCompile(`Successfully built`).MatchString(w.String()) {\n\t\tt.Log(w.String())\n\t\tt.Fatal(\"Expected image to be built\")\n\t}\n}", "func main() {\n\tlog.Printf(\"Build var 'version' is: %s\", version)\n\tlog.Printf(\"Build var 'time' is: %s\", buildDate)\n\tcmd.Execute()\n}", "func main() {\n\tlog.Println(\"TEST CASE 1:\")\n\ttest_case_1()\n\tlog.Println(\"\\n\\n\")\n\n\tlog.Println(\"TEST CASE 2:\")\n\ttest_case_2()\n\tlog.Println(\"\\n\\n\")\n\n\tlog.Println(\"TEST CASE 3:\")\n\ttest_case_3()\n\tlog.Println(\"\\n\\n\")\n\n\tlog.Println(\"TEST CASE 4:\")\n\ttest_case_4()\n\tlog.Println(\"\\n\\n\")\n}", "func (state *BuildState) LogTestResult(target *BuildTarget, status BuildResultStatus, results *TestSuite, coverage *TestCoverage, err error, format string, args ...interface{}) {\n\tstate.logResult(&BuildResult{\n\t\tLabel: target.Label,\n\t\ttarget: target,\n\t\tStatus: status,\n\t\tErr: err,\n\t\tDescription: fmt.Sprintf(format, args...),\n\t\tTests: *results,\n\t})\n\tstate.progress.mutex.Lock()\n\tdefer state.progress.mutex.Unlock()\n\tstate.Coverage.Aggregate(coverage)\n}", "func newLogMessage(e *events.Envelope) *Event {\n\tvar m = e.GetLogMessage()\n\tvar r = LabelSet{\n\t\t\"cf_app_id\": m.GetAppId(),\n\t\t\"cf_origin\": \"firehose\",\n\t\t\"deployment\": e.GetDeployment(),\n\t\t\"event_type\": e.GetEventType().String(),\n\t\t\"job\": e.GetJob(),\n\t\t\"job_index\": e.GetIndex(),\n\t\t\"message_type\": m.GetMessageType().String(),\n\t\t\"origin\": e.GetOrigin(),\n\t\t\"source_instance\": m.GetSourceInstance(),\n\t\t\"source_type\": m.GetSourceType(),\n\t}\n\tmsg := string(m.GetMessage())\n\treturn &Event{\n\t\tLabels: r,\n\t\tMsg: msg,\n\t}\n}", "func NewLogMessage(m string) LogMessage {\n\treturn LogMessage{\n\t\tts: time.Now(),\n\t\tmessage: m,\n\t}\n}", "func GenerateHTMLReport(totalTestTime, testDate string, testSummary []TestOverview, testSuiteDetails map[string]TestSuiteDetails) {\n\ttotalPassedTests := 0\n\ttotalFailedTests := 0\n\ttotalSkippedTests := 0\n\ttemplates := make([]template.HTML, 0)\n\tfor _, testSuite := range testSuiteDetails {\n\t\ttotalPassedTests = totalPassedTests + testSuite.PassedTests\n\t\ttotalFailedTests = totalFailedTests + testSuite.FailedTests\n\t\ttotalSkippedTests = totalSkippedTests + testSuite.SkippedTests\n\t\t// display testSuiteName\n\t\thtmlString := template.HTML(\"<div type=\\\"button\\\" class=\\\"collapsible\\\">\\n\")\n\t\tpackageInfoTemplateString := template.HTML(\"\")\n\n\t\tpackageInfoTemplateString = \"<div>{{.testsuiteName}}</div>\" + \"\\n\" + \"<div>Run Time: {{.elapsedTime}}m</div> \" + \"\\n\"\n\t\tpackageInfoTemplate, err := template.New(\"packageInfoTemplate\").Parse(string(packageInfoTemplateString))\n\t\tif err != nil {\n\t\t\tlog.Println(\"error parsing package info template\", err)\n\t\t\tos.Exit(1)\n\t\t}\n\n\t\tvar processedPackageTemplate bytes.Buffer\n\t\terr = packageInfoTemplate.Execute(&processedPackageTemplate, map[string]string{\n\t\t\t\"testsuiteName\": testSuite.TestSuiteName + \"_\" + OS,\n\t\t\t\"elapsedTime\": fmt.Sprintf(\"%.2f\", testSuite.ElapsedTime),\n\t\t})\n\t\tif err != nil {\n\t\t\tlog.Println(\"error applying package info template: \", err)\n\t\t\tos.Exit(1)\n\t\t}\n\n\t\tif testSuite.Status == \"pass\" {\n\t\t\tpackageInfoTemplateString = \"<div class=\\\"collapsibleHeading packageCardLayout successBackgroundColor \\\">\" +\n\t\t\t\ttemplate.HTML(processedPackageTemplate.Bytes()) + \"</div>\"\n\t\t} else if testSuite.Status == \"fail\" {\n\t\t\tpackageInfoTemplateString = \"<div class=\\\"collapsibleHeading packageCardLayout failBackgroundColor \\\">\" +\n\t\t\t\ttemplate.HTML(processedPackageTemplate.Bytes()) + \"</div>\"\n\t\t} else {\n\t\t\tpackageInfoTemplateString = \"<div class=\\\"collapsibleHeading packageCardLayout skipBackgroundColor \\\">\" +\n\t\t\t\ttemplate.HTML(processedPackageTemplate.Bytes()) + \"</div>\"\n\t\t}\n\n\t\thtmlString = htmlString + \"\\n\" + packageInfoTemplateString\n\t\ttestInfoTemplateString := template.HTML(\"\")\n\n\t\t// display testCases\n\t\tfor _, pt := range testSummary {\n\t\t\ttestHTMLTemplateString := template.HTML(\"\")\n\t\t\tif len(pt.TestCases) == 0 {\n\t\t\t\tlog.Println(\"Test run failed for \", pt.TestSuiteName, \"no testcases were executed\")\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif pt.TestSuiteName == testSuite.TestSuiteName {\n\t\t\t\tif testSuite.FailedTests == 0 {\n\t\t\t\t\ttestHTMLTemplateString = \"<div type=\\\"button\\\" class=\\\"collapsible \\\">\" +\n\t\t\t\t\t\t\"\\n\" + \"<div class=\\\"collapsibleHeading testCardLayout successBackgroundColor \\\">\" +\n\t\t\t\t\t\t\"<div>+ {{.testName}}</div>\" + \"\\n\" + \"<div>{{.elapsedTime}}</div>\" + \"\\n\" +\n\t\t\t\t\t\t\"</div>\" + \"\\n\" +\n\t\t\t\t\t\t\"<div class=\\\"collapsibleHeadingContent\\\">\"\n\t\t\t\t} else if testSuite.FailedTests > 0 {\n\t\t\t\t\ttestHTMLTemplateString = \"<div type=\\\"button\\\" class=\\\"collapsible \\\">\" +\n\t\t\t\t\t\t\"\\n\" + \"<div class=\\\"collapsibleHeading testCardLayout failBackgroundColor \\\">\" +\n\t\t\t\t\t\t\"<div>+ {{.testName}}</div>\" + \"\\n\" + \"<div>{{.elapsedTime}}</div>\" + \"\\n\" +\n\t\t\t\t\t\t\"</div>\" + \"\\n\" +\n\t\t\t\t\t\t\"<div class=\\\"collapsibleHeadingContent\\\">\"\n\t\t\t\t} else if testSuite.SkippedTests > 0 {\n\t\t\t\t\ttestHTMLTemplateString = \"<div type=\\\"button\\\" class=\\\"collapsible \\\">\" +\n\t\t\t\t\t\t\"\\n\" + \"<div class=\\\"collapsibleHeading testCardLayout skipBackgroundColor \\\">\" +\n\t\t\t\t\t\t\"<div>+ {{.testName}}</div>\" + \"\\n\" + \"<div>{{.elapsedTime}}</div>\" + \"\\n\" +\n\t\t\t\t\t\t\"</div>\" + \"\\n\" +\n\t\t\t\t\t\t\"<div class=\\\"collapsibleHeadingContent\\\">\"\n\t\t\t\t}\n\t\t\t\ttestTemplate, err := template.New(\"Test\").Parse(string(testHTMLTemplateString))\n\t\t\t\tif err != nil {\n\t\t\t\t\tlog.Println(\"error parsing tests template: \", err)\n\t\t\t\t\tos.Exit(1)\n\t\t\t\t}\n\t\t\t\tvar processedTestTemplate bytes.Buffer\n\t\t\t\terr = testTemplate.Execute(&processedTestTemplate, map[string]string{\n\t\t\t\t\t\"testName\": \"TestCases\",\n\t\t\t\t})\n\t\t\t\tif err != nil {\n\t\t\t\t\tlog.Println(\"error applying test template: \", err)\n\t\t\t\t\tos.Exit(1)\n\t\t\t\t}\n\n\t\t\t\ttestHTMLTemplateString = template.HTML(processedTestTemplate.Bytes())\n\t\t\t\ttestCaseHTMLTemplateString := template.HTML(\"\")\n\n\t\t\t\tfor _, tC := range pt.TestCases {\n\t\t\t\t\ttestCaseHTMLTemplateString = \"<div>{{.testName}}</div>\" + \"\\n\" + \"<div>{{.elapsedTime}}m</div>\"\n\t\t\t\t\ttestCaseTemplate, err := template.New(\"testCase\").Parse(string(testCaseHTMLTemplateString))\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\tlog.Println(\"error parsing test case template: \", err)\n\t\t\t\t\t\tos.Exit(1)\n\t\t\t\t\t}\n\n\t\t\t\t\tvar processedTestCaseTemplate bytes.Buffer\n\t\t\t\t\terr = testCaseTemplate.Execute(&processedTestCaseTemplate, map[string]string{\n\t\t\t\t\t\t\"testName\": tC.TestCaseName,\n\t\t\t\t\t\t\"elapsedTime\": fmt.Sprintf(\"%f\", tC.ElapsedTime),\n\t\t\t\t\t})\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\tlog.Println(\"error applying test case template: \", err)\n\t\t\t\t\t\tos.Exit(1)\n\t\t\t\t\t}\n\n\t\t\t\t\tif tC.Status == \"passed\" {\n\t\t\t\t\t\ttestCaseHTMLTemplateString = \"<div class=\\\"testCardLayout successBackgroundColor \\\">\" + template.HTML(processedTestCaseTemplate.Bytes()) + \"</div>\"\n\n\t\t\t\t\t} else if tC.Status == \"failed\" {\n\t\t\t\t\t\ttestCaseHTMLTemplateString = \"<div class=\\\"testCardLayout failBackgroundColor \\\">\" + template.HTML(processedTestCaseTemplate.Bytes()) + \"</div>\"\n\n\t\t\t\t\t} else {\n\t\t\t\t\t\ttestCaseHTMLTemplateString = \"<div class=\\\"testCardLayout skipBackgroundColor \\\">\" + template.HTML(processedTestCaseTemplate.Bytes()) + \"</div>\"\n\t\t\t\t\t}\n\t\t\t\t\ttestHTMLTemplateString = testHTMLTemplateString + \"\\n\" + testCaseHTMLTemplateString\n\t\t\t\t}\n\t\t\t\ttestHTMLTemplateString = testHTMLTemplateString + \"\\n\" + \"</div>\" + \"\\n\" + \"</div>\"\n\t\t\t\ttestInfoTemplateString = testInfoTemplateString + \"\\n\" + testHTMLTemplateString\n\t\t\t}\n\t\t}\n\t\thtmlString = htmlString + \"\\n\" + \"<div class=\\\"collapsibleHeadingContent\\\">\\n\" + testInfoTemplateString + \"\\n\" + \"</div>\"\n\t\thtmlString = htmlString + \"\\n\" + \"</div>\"\n\t\ttemplates = append(templates, htmlString)\n\t}\n\treportTemplate := template.New(\"report-template.html\")\n\treportTemplateData, err := Asset(\"report-template.html\")\n\tif err != nil {\n\t\tlog.Println(\"error retrieving report-template.html: \", err)\n\t\tos.Exit(1)\n\t}\n\n\treport, err := reportTemplate.Parse(string(reportTemplateData))\n\tif err != nil {\n\t\tlog.Println(\"error parsing report-template.html: \", err)\n\t\tos.Exit(1)\n\t}\n\n\tvar processedTemplate bytes.Buffer\n\ttype templateData struct {\n\t\tHTMLElements []template.HTML\n\t\tFailedTests int\n\t\tPassedTests int\n\t\tSkippedTests int\n\t\tTotalTestTime string\n\t\tTestDate string\n\t}\n\n\terr = report.Execute(&processedTemplate,\n\t\t&templateData{\n\t\t\tHTMLElements: templates,\n\t\t\tFailedTests: totalFailedTests,\n\t\t\tPassedTests: totalPassedTests,\n\t\t\tSkippedTests: totalSkippedTests,\n\t\t\tTotalTestTime: totalTestTime,\n\t\t\tTestDate: testDate,\n\t\t},\n\t)\n\tif err != nil {\n\t\tlog.Println(\"error applying report-template.html: \", err)\n\t\tos.Exit(1)\n\t}\n\thtmlReport = strings.Split(fileName, \".\")[0] + \"_results.html\"\n\tbucketName = strings.Split(htmlReport, \"_\")[0] + \"-results\"\n\tfmt.Println(bucketName)\n\terr = ioutil.WriteFile(htmlReport, processedTemplate.Bytes(), 0644)\n\tif err != nil {\n\t\tlog.Println(\"error writing report.html file: \", err)\n\t\tos.Exit(1)\n\t}\n}", "func (m *MockBuilder) Build() log.Logger {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"Build\")\n\tret0, _ := ret[0].(log.Logger)\n\treturn ret0\n}", "func (o *GetBuildLogsOptions) getProwBuildLog(kubeClient kubernetes.Interface, tektonClient tektonclient.Interface, jxClient versioned.Interface, ns string, tektonEnabled bool) error {\n\tif o.CurrentFolder {\n\t\tcurrentDirectory, err := os.Getwd()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tgitRepository, err := gits.NewGitCLI().Info(currentDirectory)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\to.BuildFilter.Repository = gitRepository.Name\n\t\to.BuildFilter.Owner = gitRepository.Organisation\n\t}\n\n\tvar err error\n\n\tif o.TektonLogger == nil {\n\t\to.TektonLogger = &logs.TektonLogger{\n\t\t\tKubeClient: kubeClient,\n\t\t\tTektonClient: tektonClient,\n\t\t\tJXClient: jxClient,\n\t\t\tNamespace: ns,\n\t\t\tFailIfPodFails: o.FailIfPodFails,\n\t\t}\n\t}\n\tvar waitableCondition bool\n\tf := func() error {\n\t\twaitableCondition, err = o.getTektonLogs(kubeClient, tektonClient, jxClient, ns)\n\t\treturn err\n\t}\n\n\terr = f()\n\tif err != nil {\n\t\tif o.Wait && waitableCondition {\n\t\t\tlog.Logger().Info(\"The selected pipeline didn't start, let's wait a bit\")\n\t\t\terr := util.Retry(o.WaitForPipelineDuration, f)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t\treturn err\n\t}\n\treturn nil\n}", "func (p *Project) LogProject() {\n\tlogMessage := `\t\tProject information\n\tProjectID: \t\t %v\n\tProjectName: %v\n\tIcon: \t\t\t %v\n\tMember: \t\t %v\n\tDescription: \t %v\n\t\tStructure \n\t%v`\n\tfmt.Printf(logMessage, p.ProjectID, p.ProjectName, p.Icon, p.Member, p.Description, p.ProjectStructure)\n\tfmt.Println(\"\")\n}", "func MakeLogString(ll LogsLevels, goroutineID, errorMessage string, err error) string {\n\terrorString := \"[\" + ll.String() + \"] \"\n\tif goroutineID != \"\" {\n\t\terrorString += \"(\" + goroutineID + \") \"\n\t}\n\terrorString += errorMessage\n\tif err != nil {\n\t\terrorString += \" Error: \" + err.Error()\n\t}\n\n\treturn errorString\n}", "func TestVoltBuildStaticNoVimRepos(t *testing.T) {\n\ttestBuildMatrix(t, voltBuildStaticNoVimRepos)\n}", "func BuildMessage(url string, params map[string]string) string {\n\ti := 0;\n\tkeys := make([]string,len(params));\n\tfor k,_ := range params {\n\t\tkeys[i] = k;\n\t\ti = i + 1;\n\t}\n\tsort.SortStrings(keys);\n\n\tj := 0;\n\tmss := make([]string,len(params));\n\tfor k := range keys {\n\t\tmss[j] = urllib.Urlquote(keys[k]) + \"=\" + urllib.Urlquote(params[keys[k]]);\n\t\tj = j + 1;\n\t}\n\tms := strings.Join(mss, \"&\");\n\tlog.Stderrf(\"ms:%s\", ms);\n\n\tm := strings.Join([]string{\"GET\", urllib.Urlquote(url), urllib.Urlquote(ms)}, \"&\");\n\tlog.Stderrf(\"m:%s\", m);\n\n\treturn m;\n}", "func createLog(\n\t_ context.Context,\n\tset receiver.CreateSettings,\n\tcfg component.Config,\n\tconsumer consumer.Logs,\n) (receiver.Logs, error) {\n\toCfg := cfg.(*Config)\n\tr, err := receivers.GetOrAdd(oCfg, func() (*otlpReceiver, error) {\n\t\treturn newOtlpReceiver(oCfg, set)\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif err = r.Unwrap().registerLogsConsumer(consumer); err != nil {\n\t\treturn nil, err\n\t}\n\treturn r, nil\n}", "func printTest(args *Args, isJustTest bool) {\n\tmessage := fmt.Sprintf(`%s %s configuration file %s test is `, appname, appversion, args.ConfigFile)\n\n\tdefer func() {\n\t\tif err := recover(); err != nil {\n\t\t\tfmt.Printf(\"%s wrong !!!\\n> %s\\n\", message, err)\n\t\t\tos.Exit(0)\n\t\t}\n\t}()\n\n\t// Get config file content to struct\n\tcfg := config.GetConfig(args.ConfigFile)\n\n\t// check statistic redis source\n\tif \"redis\" == strings.ToLower(cfg.Statistic.SourceType) {\n\t\toredis.GetInstancePanic(cfg.Statistic.RedisSource)\n\t}\n\n\tenabledQueueCount := 0\n\t// sort the DelayOnFailure array\n\tfor _, r := range cfg.Redis {\n\t\toredis.GetInstancePanic(r.Config)\n\t\tfor _, n := range r.Queues {\n\t\t\tif n.IsEnabled {\n\t\t\t\tenabledQueueCount++\n\t\t\t}\n\t\t}\n\t}\n\n\tfor _, r := range cfg.RabbitMQ {\n\t\tr.Config.GetConnectionPanic()\n\t\tfor _, n := range r.Queues {\n\t\t\tif n.IsEnabled {\n\t\t\t\tenabledQueueCount++\n\t\t\t}\n\t\t}\n\t}\n\n\tif enabledQueueCount < 1 {\n\t\tpanic(`There has no enabled queue, please check configure file \"IsEnabled\" fields for every queue`)\n\t}\n\n\t// if -t\n\tif isJustTest {\n\t\tfmt.Printf(\"%s ok\\n\", message)\n\t\tos.Exit(0)\n\t}\n}", "func (ts *Tester) Build() error {\n\t// no-op\n\treturn nil\n}", "func (t *SpecTest) String() string { return strings.Join(t.descstack, \" \") }", "func (t *tester) runTests(ctx context.Context, builderType string, info *buildInfo) builderResult {\n\tlog.Printf(\"%s: creating buildlet\", builderType)\n\tc, err := createBuildletWithRetry(ctx, t.coordinator, builderType)\n\tif err != nil {\n\t\treturn builderResult{builderType: builderType, err: fmt.Errorf(\"failed to create buildlet: %s\", err)}\n\t}\n\tbuildletName := c.RemoteName()\n\tlog.Printf(\"%s: created buildlet (%s)\", builderType, buildletName)\n\tdefer func() {\n\t\tif err := c.Close(); err != nil {\n\t\t\tlog.Printf(\"%s: unable to close buildlet %q: %s\", builderType, buildletName, err)\n\t\t} else {\n\t\t\tlog.Printf(\"%s: destroyed buildlet\", builderType)\n\t\t}\n\t}()\n\n\tbuildConfig, ok := dashboard.Builders[builderType]\n\tif !ok {\n\t\tlog.Printf(\"%s: unknown builder type\", builderType)\n\t\treturn builderResult{builderType: builderType, err: errors.New(\"unknown builder type\")}\n\t}\n\tbootstrapURL := buildConfig.GoBootstrapURL(buildenv.Production)\n\t// Assume if bootstrapURL == \"\" the buildlet is already bootstrapped\n\tif bootstrapURL != \"\" {\n\t\tif err := c.PutTarFromURL(ctx, bootstrapURL, \"go1.4\"); err != nil {\n\t\t\tlog.Printf(\"%s: failed to bootstrap buildlet: %s\", builderType, err)\n\t\t\treturn builderResult{builderType: builderType, err: fmt.Errorf(\"failed to bootstrap buildlet: %s\", err)}\n\t\t}\n\t}\n\n\tsuffix := make([]byte, 4)\n\trand.Read(suffix)\n\n\tvar output io.Writer\n\tvar logURL string\n\n\tif t.gcs != nil {\n\t\tgcsBucket, gcsObject := *gcsBucket, fmt.Sprintf(\"%s-%x/%s\", info.revision, suffix, builderType)\n\t\tgcsWriter, err := newLiveWriter(ctx, t.gcs.Bucket(gcsBucket).Object(gcsObject))\n\t\tif err != nil {\n\t\t\tlog.Printf(\"%s: failed to create log writer: %s\", builderType, err)\n\t\t\treturn builderResult{builderType: builderType, err: fmt.Errorf(\"failed to create log writer: %s\", err)}\n\t\t}\n\t\tdefer func() {\n\t\t\tif err := gcsWriter.Close(); err != nil {\n\t\t\t\tlog.Printf(\"%s: failed to flush GCS writer: %s\", builderType, err)\n\t\t\t}\n\t\t}()\n\t\tlogURL = \"https://storage.cloud.google.com/\" + path.Join(gcsBucket, gcsObject)\n\t\toutput = gcsWriter\n\t} else {\n\t\toutput = &localWriter{buildletName}\n\t}\n\n\twork, err := c.WorkDir(ctx)\n\tif err != nil {\n\t\tlog.Printf(\"%s: failed to retrieve work dir: %s\", builderType, err)\n\t\treturn builderResult{builderType: builderType, err: fmt.Errorf(\"failed to get work dir: %s\", err)}\n\t}\n\n\tenv := append(buildConfig.Env(), \"GOPATH=\"+work+\"/gopath\", \"GOROOT_FINAL=\"+dashboard.GorootFinal(buildConfig.GOOS()), \"GOROOT=\"+work+\"/go\")\n\t// Because we are unable to determine the internal GCE hostname of the\n\t// coordinator, we cannot use the same GOPROXY proxy that the public TryBots\n\t// use to get around the disabled network. Instead of using that proxy\n\t// proxy, we instead wait to disable the network until right before we\n\t// actually execute the tests, and manually download module dependencies\n\t// using \"go mod download\" if we are testing a subrepo branch.\n\tvar disableNetwork bool\n\tfor i, v := range env {\n\t\tif v == \"GO_DISABLE_OUTBOUND_NETWORK=1\" {\n\t\t\tenv = append(env[:i], env[i+1:]...)\n\t\t\tdisableNetwork = true\n\t\t\tbreak\n\t\t}\n\t}\n\tdirName := \"go\"\n\n\tif info.isSubrepo() {\n\t\tdirName = info.branch\n\n\t\t// fetch and build go at master first\n\t\tif err := c.PutTar(ctx, bytes.NewReader(info.goArchive), \"go\"); err != nil {\n\t\t\tlog.Printf(\"%s: failed to upload change archive: %s\", builderType, err)\n\t\t\treturn builderResult{builderType: builderType, err: fmt.Errorf(\"failed to upload change archive: %s\", err)}\n\t\t}\n\t\tif err := c.Put(ctx, strings.NewReader(\"devel \"+info.revision), \"go/VERSION\", 0644); err != nil {\n\t\t\tlog.Printf(\"%s: failed to upload VERSION file: %s\", builderType, err)\n\t\t\treturn builderResult{builderType: builderType, err: fmt.Errorf(\"failed to upload VERSION file: %s\", err)}\n\t\t}\n\n\t\tcmd, args := \"go/\"+buildConfig.MakeScript(), buildConfig.MakeScriptArgs()\n\t\tremoteErr, execErr := c.Exec(ctx, cmd, buildlet.ExecOpts{\n\t\t\tOutput: output,\n\t\t\tExtraEnv: append(env, \"GO_DISABLE_OUTBOUND_NETWORK=0\"),\n\t\t\tArgs: args,\n\t\t\tOnStartExec: func() {\n\t\t\t\tlog.Printf(\"%s: starting make.bash %s\", builderType, logURL)\n\t\t\t},\n\t\t})\n\t\tif execErr != nil {\n\t\t\tlog.Printf(\"%s: failed to execute make.bash: %s\", builderType, execErr)\n\t\t\treturn builderResult{builderType: builderType, err: fmt.Errorf(\"failed to execute make.bash: %s\", err)}\n\t\t}\n\t\tif remoteErr != nil {\n\t\t\tlog.Printf(\"%s: make.bash failed: %s\", builderType, remoteErr)\n\t\t\treturn builderResult{builderType: builderType, err: fmt.Errorf(\"make.bash failed: %s\", remoteErr)}\n\t\t}\n\t}\n\n\tif err := c.PutTar(ctx, bytes.NewReader(info.changeArchive), dirName); err != nil {\n\t\tlog.Printf(\"%s: failed to upload change archive: %s\", builderType, err)\n\t\treturn builderResult{builderType: builderType, err: fmt.Errorf(\"failed to upload change archive: %s\", err)}\n\t}\n\n\tif !info.isSubrepo() {\n\t\tif err := c.Put(ctx, strings.NewReader(\"devel \"+info.revision), \"go/VERSION\", 0644); err != nil {\n\t\t\tlog.Printf(\"%s: failed to upload VERSION file: %s\", builderType, err)\n\t\t\treturn builderResult{builderType: builderType, err: fmt.Errorf(\"failed to upload VERSION file: %s\", err)}\n\t\t}\n\t}\n\n\tvar cmd string\n\tvar args []string\n\tif info.isSubrepo() {\n\t\tcmd, args = \"go/bin/go\", []string{\"test\", \"./...\"}\n\t} else {\n\t\tcmd, args = \"go/\"+buildConfig.AllScript(), buildConfig.AllScriptArgs()\n\t}\n\topts := buildlet.ExecOpts{\n\t\tOutput: output,\n\t\tExtraEnv: env,\n\t\tArgs: args,\n\t\tOnStartExec: func() {\n\t\t\tlog.Printf(\"%s: starting tests %s\", builderType, logURL)\n\t\t},\n\t}\n\tif info.isSubrepo() {\n\t\topts.Dir = dirName\n\n\t\tremoteErr, execErr := c.Exec(ctx, \"go/bin/go\", buildlet.ExecOpts{\n\t\t\tArgs: []string{\"mod\", \"download\"},\n\t\t\tExtraEnv: append(env, \"GO_DISABLE_OUTBOUND_NETWORK=0\"),\n\t\t\tDir: dirName,\n\t\t\tOutput: output,\n\t\t\tOnStartExec: func() {\n\t\t\t\tlog.Printf(\"%s: downloading modules %s\", builderType, logURL)\n\t\t\t},\n\t\t})\n\t\tif execErr != nil {\n\t\t\tlog.Printf(\"%s: failed to execute go mod download: %s\", builderType, execErr)\n\t\t\treturn builderResult{builderType: builderType, err: fmt.Errorf(\"failed to execute go mod download: %s\", err)}\n\t\t}\n\t\tif remoteErr != nil {\n\t\t\tlog.Printf(\"%s: go mod download failed: %s\", builderType, remoteErr)\n\t\t\treturn builderResult{builderType: builderType, err: fmt.Errorf(\"go mod download failed: %s\", remoteErr)}\n\t\t}\n\t}\n\tif disableNetwork {\n\t\topts.ExtraEnv = append(opts.ExtraEnv, \"GO_DISABLE_OUTBOUND_NETWORK=1\")\n\t}\n\tremoteErr, execErr := c.Exec(ctx, cmd, opts)\n\tif execErr != nil {\n\t\tlog.Printf(\"%s: failed to execute tests: %s\", builderType, execErr)\n\t\treturn builderResult{builderType: builderType, err: fmt.Errorf(\"failed to execute all.bash: %s\", err)}\n\t}\n\tif remoteErr != nil {\n\t\tlog.Printf(\"%s: tests failed: %s\", builderType, remoteErr)\n\t\treturn builderResult{builderType: builderType, logURL: logURL, passed: false}\n\t}\n\tlog.Printf(\"%s: tests succeeded\", builderType)\n\treturn builderResult{builderType: builderType, logURL: logURL, passed: true}\n}", "func toLog(context string, str string) {\n\tnow := time.Now()\n\n\tfmt.Println(\"[\" + now.Format(time.RFC822) + \"] \" + context + \": \" + str)\n}", "func testSyslogMessageDelivery(t *testing.T, ti tInfo, dummyObjRef *cluster.Node, messages map[chan string][]struct {\n\tSubstrs []string\n\tMsgFormat monitoring.MonitoringExportFormat\n}) {\n\n\tvar m sync.Mutex\n\twg := new(sync.WaitGroup)\n\twg.Add(1) // events recorder\n\n\t// define list of events to be recorded\n\trecordEvents := []*struct {\n\t\teventType eventtypes.EventType\n\t\tmessage string\n\t\tobjRef interface{}\n\t\trepeat int // number of times to repeat the event\n\t}{\n\t\t{eventtypes.SERVICE_STARTED, fmt.Sprintf(\"(tenant:%s) test %s started on %s\", dummyObjRef.Tenant, t.Name(), dummyObjRef.GetKind()), *dummyObjRef, 10},\n\t\t{eventtypes.SERVICE_RUNNING, fmt.Sprintf(\"(tenant:%s) test %s running on %s\", dummyObjRef.Tenant, t.Name(), dummyObjRef.GetKind()), *dummyObjRef, 10},\n\t\t{eventtypes.SERVICE_UNRESPONSIVE, fmt.Sprintf(\"(tenant:%s) test %s unresponsive on %s\", dummyObjRef.Tenant, t.Name(), dummyObjRef.GetKind()), *dummyObjRef, 15},\n\t\t{eventtypes.SERVICE_STOPPED, fmt.Sprintf(\"(tenant:%s) test %s stopped on %s\", dummyObjRef.Tenant, t.Name(), dummyObjRef.GetKind()), *dummyObjRef, 11},\n\n\t\t{eventtypes.ELECTION_STARTED, fmt.Sprintf(\"(tenant:%s) dummy election: election started %s\", dummyObjRef.Tenant, t.Name()), *dummyObjRef, 10},\n\t\t{eventtypes.LEADER_ELECTED, fmt.Sprintf(\"(tenant:%s) dummy election: leader elected %s\", dummyObjRef.Tenant, t.Name()), *dummyObjRef, 10},\n\t\t{eventtypes.LEADER_CHANGED, fmt.Sprintf(\"(tenant:%s) dummy election: leader changed %s\", dummyObjRef.Tenant, t.Name()), *dummyObjRef, 15},\n\t\t{eventtypes.LEADER_LOST, fmt.Sprintf(\"(tenant:%s) dummy election: leader lost %s\", dummyObjRef.Tenant, t.Name()), *dummyObjRef, 11},\n\t\t{eventtypes.ELECTION_STOPPED, fmt.Sprintf(\"(tenant:%s) dummy election: election stopped %s\", dummyObjRef.Tenant, t.Name()), *dummyObjRef, 15},\n\n\t\t// events in non default tenant\n\t\t{eventtypes.SERVICE_STARTED, fmt.Sprintf(\"(tenant:%s) test %s started\", globals.DefaultTenant, t.Name()), nil, 10},\n\t\t{eventtypes.SERVICE_RUNNING, fmt.Sprintf(\"(tenant:%s) test %s running\", globals.DefaultTenant, t.Name()), nil, 10},\n\t\t{eventtypes.SERVICE_UNRESPONSIVE, fmt.Sprintf(\"(tenant:%s) test %s unresponsive\", globals.DefaultTenant, t.Name()), nil, 15},\n\t\t{eventtypes.SERVICE_STOPPED, fmt.Sprintf(\"(tenant:%s) test %s stopped\", globals.DefaultTenant, t.Name()), nil, 11},\n\t}\n\n\t// start recorder\n\trecorderEventsDir, err := ioutil.TempDir(\"\", t.Name())\n\tAssertOk(t, err, \"failed to create recorder events directory\")\n\tdefer os.RemoveAll(recorderEventsDir)\n\tgo func() {\n\t\tdefer wg.Done()\n\n\t\tevtsRecorder, err := recorder.NewRecorder(&recorder.Config{\n\t\t\tComponent: uuid.NewV4().String(),\n\t\t\tEvtsProxyURL: ti.evtProxyServices.EvtsProxy.RPCServer.GetListenURL(),\n\t\t\tBackupDir: recorderEventsDir}, ti.logger)\n\t\tif err != nil {\n\t\t\tlog.Errorf(\"failed to create recorder, err: %v\", err)\n\t\t\treturn\n\t\t}\n\t\tti.recorders.Lock()\n\t\tti.recorders.list = append(ti.recorders.list, evtsRecorder)\n\t\tti.recorders.Unlock()\n\n\t\t// record events\n\t\tfor i := range recordEvents {\n\t\t\tif objRef, ok := recordEvents[i].objRef.(cluster.Node); ok {\n\t\t\t\tobjRef.ObjectMeta.Name = CreateAlphabetString(5)\n\t\t\t\trecordEvents[i].objRef = &objRef\n\t\t\t}\n\t\t\tfor j := 0; j < recordEvents[i].repeat; j++ {\n\t\t\t\tevtsRecorder.Event(recordEvents[i].eventType, recordEvents[i].message, recordEvents[i].objRef)\n\t\t\t}\n\t\t}\n\n\t\t// wait for the batch interval\n\t\ttime.Sleep(ti.batchInterval + 10*time.Millisecond)\n\t\t// resend the events again after batch interval, this should increase the hits but not recreate the alerts as per our alert policy\n\t\t// thus, no alert export for these events.\n\t\tfor i := range recordEvents {\n\t\t\tevtsRecorder.Event(recordEvents[i].eventType, recordEvents[i].message, recordEvents[i].objRef)\n\t\t}\n\t}()\n\n\tfor messageCh, expectedMessages := range messages {\n\t\tcloseMsgCh := make(chan struct{})\n\n\t\t// ensure all the alerts are exported to the given syslog(UDP/TCP) server in the respective format.\n\t\twg.Add(1)\n\t\tgo func() {\n\t\t\tdefer wg.Done()\n\n\t\t\tfor {\n\t\t\t\tselect {\n\t\t\t\tcase <-closeMsgCh:\n\t\t\t\t\treturn\n\t\t\t\tcase msg, ok := <-messageCh:\n\t\t\t\t\tif !ok {\n\t\t\t\t\t\treturn\n\t\t\t\t\t}\n\n\t\t\t\t\tm.Lock()\n\t\t\t\t\tfor i := 0; i < len(expectedMessages); i++ {\n\t\t\t\t\t\tif len(expectedMessages[i].Substrs) > 0 {\n\t\t\t\t\t\t\tmatch := true\n\t\t\t\t\t\t\tfor _, substr := range expectedMessages[i].Substrs {\n\t\t\t\t\t\t\t\tmatch = match && strings.Contains(msg, substr)\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\tif match && syslog.ValidateSyslogMessage(expectedMessages[i].MsgFormat, msg) {\n\t\t\t\t\t\t\t\texpectedMessages = append(expectedMessages[:i], expectedMessages[i+1:]...)\n\t\t\t\t\t\t\t\tbreak\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t\tm.Unlock()\n\t\t\t\t}\n\t\t\t}\n\t\t}()\n\n\t\tAssertEventually(t,\n\t\t\tfunc() (bool, interface{}) {\n\t\t\t\tm.Lock()\n\t\t\t\tdefer m.Unlock()\n\t\t\t\tif len(expectedMessages) != 0 {\n\t\t\t\t\treturn false, fmt.Sprintf(\"pending: %v\", len(expectedMessages))\n\t\t\t\t}\n\t\t\t\treturn true, nil\n\t\t\t}, \"did not receive all the expected syslog messages\", \"200ms\", \"10s\")\n\n\t\tclose(closeMsgCh)\n\t}\n\n\twg.Wait()\n}", "func (x *XcChaincode) buildTurnOutMessage(fromAccount string, toPlatform string, toAccount string, value *big.Int, now string) []byte {\n\tstate := turnOutMessage{fromAccount, value, toPlatform, toAccount, now}\n\tstateJson, _ := json.Marshal(state)\n\treturn stateJson\n}", "func (c *Client) BuildCreateRequest(ctx context.Context, v interface{}) (*http.Request, error) {\n\tu := &url.URL{Scheme: c.scheme, Host: c.host, Path: CreateLogPath()}\n\treq, err := http.NewRequest(\"POST\", u.String(), nil)\n\tif err != nil {\n\t\treturn nil, goahttp.ErrInvalidURL(\"log\", \"create\", u.String(), err)\n\t}\n\tif ctx != nil {\n\t\treq = req.WithContext(ctx)\n\t}\n\n\treturn req, nil\n}", "func getCostEstimationLogs(ctx context.Context, client *tfe.Client, r *tfe.Run) error {\n\tif r.CostEstimate == nil {\n\t\treturn nil\n\t}\n\tmsgPrefix := \"Cost estimation\"\n\tstarted := time.Now()\n\tupdated := started\n\tfor i := 0; ; i++ {\n\t\t// select {\n\t\t// case <-stopCtx.Done():\n\t\t// \treturn stopCtx.Err()\n\t\t// case <-cancelCtx.Done():\n\t\t// \treturn cancelCtx.Err()\n\t\t// case <-time.After(backoff(backoffMin, backoffMax, i)):\n\t\t// }\n\n\t\t// Retrieve the cost estimate to get its current status.\n\t\tce, err := client.CostEstimates.Read(ctx, r.CostEstimate.ID)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\t// If the run is canceled or errored, but the cost-estimate still has\n\t\t// no result, there is nothing further to render.\n\t\tif ce.Status != tfe.CostEstimateFinished {\n\t\t\tif r.Status == tfe.RunCanceled || r.Status == tfe.RunErrored {\n\t\t\t\treturn nil\n\t\t\t}\n\t\t}\n\n\t\t// checking if i == 0 so as to avoid printing this starting horizontal-rule\n\t\t// every retry, and that it only prints it on the first (i=0) attempt.\n\t\tif i == 0 {\n\t\t\tfmt.Println(\"------------------------------------------------------------------------\")\n\t\t}\n\n\t\tswitch ce.Status {\n\t\tcase tfe.CostEstimateFinished:\n\t\t\tdelta, err := strconv.ParseFloat(ce.DeltaMonthlyCost, 64)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\tsign := \"+\"\n\t\t\tif delta < 0 {\n\t\t\t\tsign = \"-\"\n\t\t\t}\n\n\t\t\tdeltaRepr := strings.Replace(ce.DeltaMonthlyCost, \"-\", \"\", 1)\n\n\t\t\tfmt.Println(msgPrefix + \":\")\n\t\t\tfmt.Printf(\"Resources: %d of %d estimated\", ce.MatchedResourcesCount, ce.ResourcesCount)\n\t\t\tfmt.Printf(\" $%s/mo %s$%s\", ce.ProposedMonthlyCost, sign, deltaRepr)\n\n\t\t\tif len(r.PolicyChecks) == 0 && r.HasChanges {\n\t\t\t\tfmt.Println(\"------------------------------------------------------------------------\")\n\t\t\t}\n\n\t\t\t// if b.CLI != nil {\n\t\t\t// \tb.CLI.Output(b.Colorize().Color(msgPrefix + \":\"))\n\t\t\t// \tb.CLI.Output(b.Colorize().Color(fmt.Sprintf(\"Resources: %d of %d estimated\", ce.MatchedResourcesCount, ce.ResourcesCount)))\n\t\t\t// \tb.CLI.Output(b.Colorize().Color(fmt.Sprintf(\" $%s/mo %s$%s\", ce.ProposedMonthlyCost, sign, deltaRepr)))\n\n\t\t\t// \tif len(r.PolicyChecks) == 0 && r.HasChanges && op.Type == backend.OperationTypeApply {\n\t\t\t// \t\tb.CLI.Output(\"------------------------------------------------------------------------\")\n\t\t\t// \t}\n\t\t\t// }\n\n\t\t\treturn nil\n\t\tcase tfe.CostEstimatePending, tfe.CostEstimateQueued:\n\t\t\t// Check if 30 seconds have passed since the last update.\n\t\t\tcurrent := time.Now()\n\t\t\tif i == 0 || current.Sub(updated).Seconds() > 30 {\n\t\t\t\tupdated = current\n\t\t\t\telapsed := \"\"\n\n\t\t\t\t// Calculate and set the elapsed time.\n\t\t\t\tif i > 0 {\n\t\t\t\t\telapsed = fmt.Sprintf(\n\t\t\t\t\t\t\" (%s elapsed)\", current.Sub(started).Truncate(30*time.Second))\n\t\t\t\t}\n\t\t\t\tfmt.Println(msgPrefix + \":\")\n\t\t\t\tfmt.Println(\"Waiting for cost estimate to complete...\" + elapsed)\n\t\t\t}\n\t\t\tcontinue\n\t\tcase tfe.CostEstimateSkippedDueToTargeting:\n\t\t\tfmt.Println(msgPrefix + \":\")\n\t\t\tfmt.Println(\"Not available for this plan, because it was created with the -target option.\")\n\t\t\tfmt.Println(\"------------------------------------------------------------------------\")\n\t\t\treturn nil\n\t\tcase tfe.CostEstimateErrored:\n\t\t\tfmt.Println(msgPrefix + \" errored.\")\n\t\t\tfmt.Println(\"------------------------------------------------------------------------\")\n\t\t\treturn nil\n\t\tcase tfe.CostEstimateCanceled:\n\t\t\treturn errors.New(msgPrefix + \" canceled.\")\n\t\tdefault:\n\t\t\treturn errors.New(\"Unknown or unexpected cost estimate state: \" + string(ce.Status))\n\t\t}\n\t}\n}", "func finalMessage(l *Logger, logCat LogCat, v ...interface{}) string {\n\tbase := baseMessage(l, logCat)\n\tmsg := fmt.Sprint(v...)\n\twrappedMsg := fmt.Sprintf(`message=\"%s\"`, msg)\n\n\treturn base + \", \" + wrappedMsg\n}", "func (p *BuildParams) String() string {\n\tstr := fmt.Sprintf(\"BUILD-%s-%d\", p.Project, *p.Definition)\n\n\tif p.Branch != nil {\n\t\tstr = fmt.Sprintf(\"%s-%s\", str, *p.Branch)\n\t}\n\n\treturn str\n}", "func makeMessage(data Payload, job IJob) message {\n\treturn message{\n\t\tId: messageId(uuid.New().String()),\n\t\tAddress: job.Config().getAddr().md5(),\n\t\tQueue: job.Queue().Name,\n\t\tPayload: data,\n\t\tState: messageSend,\n\t\tCreateAt: time.Now(),\n\t}\n}", "func writeLog(msg ...interface{}) {\n\tlogLocker.Lock()\n\tdefer logLocker.Unlock()\n\tif *confVerbose {\n\t\tcolor.Green(fmt.Sprint(time.Now().Format(\"02_01_06-15.04.05\"), \"[WRITE] ->\", msg))\n\t}\n}", "func Msg(logger log.Logger, message string, keyvals ...interface{}) error {\n\tprepended := make([]interface{}, 0, len(keyvals)+2)\n\tprepended = append(prepended, structure.MessageKey, message)\n\tprepended = append(prepended, keyvals...)\n\treturn logger.Log(prepended...)\n}", "func generateMessages(n int) []Message {\n\tvar msgs []Message\n\tfor i := 0; i < n; i++ {\n\t\tmsgs = append(msgs, Message{\n\t\t\tID: rand.Int63(),\n\t\t\tName: \"The name of the job. Could be anything\",\n\t\t\tRef: \"Reference to some root / parent object\",\n\t\t\tDescription: \"Message to test queues\",\n\t\t\tParams: map[string]string{\n\t\t\t\t\"Attempts\": \"18\",\n\t\t\t\t\"State\": \"Colorado\",\n\t\t\t\t\"MaxDeviation\": \"46\",\n\t\t\t},\n\t\t})\n\t}\n\treturn msgs\n}", "func newPrintExprWithMessage(msg string) *dst.CallExpr {\n\treturn newPrintExprWithArgs([]dst.Expr{\n\t\t&dst.BasicLit{\n\t\t\tKind: token.STRING,\n\t\t\tValue: `\"` + msg + `\\n\"`,\n\t\t},\n\t})\n}", "func (srv *server) build(messages []string) {\n\tsrv.query = fmt.Sprintf(\"INSERT INTO %s VALUES %s;\", tableName, strings.Join(messages, \" ,\"))\n}", "func PrnLog(msg string, colorCode string, noNewLine bool, noAddTime bool) {\n\tif len(colorCode) == 0 {\n\t\tcolorCode = \"yellow\"\n\t}\n\n\tif colorCode != \"yellow\" {\n\t\tcolor.Unset()\n\t}\n\tif lastColorCode != colorCode {\n\t\tlastColorCode = colorCode\n\n\t\tswitch colorCode {\n\t\tcase \"yellow\":\n\t\t\tcolor.Set(color.FgYellow)\n\t\tcase \"green\":\n\t\t\tcolor.Set(color.FgGreen)\n\t\tcase \"red\":\n\t\t\tcolor.Set(color.FgRed)\n\t\tcase \"white\":\n\t\t\tcolor.Set(color.FgWhite)\n\t\tdefault:\n\t\t\tcolor.Set(color.FgYellow)\n\t\t}\n\t}\n\n\tif !noAddTime {\n\t\tmsg = FormatDate(time.Now(), \"yyyy-mm-dd HH:MM:SS\") + \" - \" + msg\n\t}\n\n\tif noNewLine {\n\t\t//fmt.Printf(msg)\n\t\t//log.Printf(msg)\n\t\t//log.Print(msg)\n\t\tfmt.Print(msg)\n\t} else {\n\t\tfmt.Println(msg)\n\t\t//fmt.Println(msg)\n\t\t//log.Println(msg)\n\t\t//log.Print(msg + \"\\n\")\n\t}\n\n\t//agrego la traza de tiempo solo a la salida al archivo de logs.\n\t//if !NoAddTime {\n\t// msg = time.Now().Format(\"02/01/2006 15:04:05\") + \" \" + msg\n\t//}\n\n\t//color.Unset()\n\n\t//esta validacion es para controlar el salto de linea adentro del arreglo que va al log.\n\t// if ($script:nonewlineFlag -eq $false){\n\t// \t $script:lineas += $msg\n\t// \tif ($NoNewLine){\n\t// \t $script:nonewlineFlag = $true;\n\t// \t}\n\t// } else {\n\t// \t$script:lineas[$script:lineas.Count-1] += $msg\n\t// \tif ($NoNewLine -eq $false){\n\t// \t $script:nonewlineFlag = $false;\n\t// \t}\n\t// }\n}", "func Mock(tgt module.DeliveryTarget, globalChecks []module.Check) *MsgPipeline {\n\treturn &MsgPipeline{\n\t\tmsgpipelineCfg: msgpipelineCfg{\n\t\t\tglobalChecks: globalChecks,\n\t\t\tperSource: map[string]sourceBlock{},\n\t\t\tdefaultSource: sourceBlock{\n\t\t\t\tperRcpt: map[string]*rcptBlock{},\n\t\t\t\tdefaultRcpt: &rcptBlock{\n\t\t\t\t\ttargets: []module.DeliveryTarget{tgt},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n}", "func (mail *Mail) BuildMessage() string {\n\tmessage := \"\"\n\tmessage += fmt.Sprintf(\"From: %s\\r\\n\", senderID)\n\tif len(mail.toIds) > 0 {\n\t\tmessage += fmt.Sprintf(\"To: %s\\r\\n\", strings.Join(mail.toIds, \";\"))\n\t}\n\n\tmessage += fmt.Sprintf(\"Subject: %s\\r\\n\", mail.subject)\n\tmessage += fmt.Sprintf(\"Content-Type: text/html; charset=utf-8\")\n\tmessage += \"\\r\\n\" + mail.body\n\n\treturn message\n}", "func generateLogLine(id int) string {\n\tmethod := httpMethods[rand.Intn(len(httpMethods))]\n\tnamespace := namespaces[rand.Intn(len(namespaces))]\n\n\tpodName := rand.String(rand.IntnRange(3, 5))\n\turl := fmt.Sprintf(\"/api/v1/namespaces/%s/pods/%s\", namespace, podName)\n\tstatus := rand.IntnRange(200, 600)\n\n\treturn fmt.Sprintf(\"%d %s %s %d\", id, method, url, status)\n}" ]
[ "0.57305515", "0.532833", "0.52610326", "0.5235716", "0.51883453", "0.5136516", "0.5134061", "0.5091816", "0.50882214", "0.5044602", "0.5035794", "0.50196993", "0.49808282", "0.49630845", "0.49628553", "0.49331522", "0.49271595", "0.48909366", "0.48870495", "0.48639286", "0.48531088", "0.4832801", "0.48234797", "0.48212475", "0.4783635", "0.47527218", "0.47247797", "0.4719681", "0.47130632", "0.47006032", "0.46961194", "0.46948305", "0.46827805", "0.466588", "0.4646734", "0.46454918", "0.4644258", "0.46379808", "0.4635368", "0.46158743", "0.46139213", "0.4601347", "0.460033", "0.45882952", "0.45857912", "0.45801255", "0.4579664", "0.45727718", "0.45683706", "0.45676088", "0.45642227", "0.4557643", "0.4546957", "0.45348847", "0.45321238", "0.45241654", "0.45235503", "0.4520909", "0.45178074", "0.45142314", "0.45113605", "0.4511273", "0.4506106", "0.44976056", "0.44972306", "0.4495688", "0.44901454", "0.4489377", "0.44814813", "0.44809842", "0.4463142", "0.44594038", "0.44537485", "0.44492", "0.44424483", "0.441888", "0.44119158", "0.44073895", "0.43993175", "0.43950173", "0.4388368", "0.4387317", "0.43865803", "0.43821624", "0.43817082", "0.4373422", "0.43637618", "0.43618378", "0.43608013", "0.43602747", "0.4359773", "0.43553302", "0.4349206", "0.43477222", "0.43467546", "0.43436402", "0.43435615", "0.43338755", "0.43324775", "0.43304986" ]
0.61156356
0
fibonacci is a function that returns a function that returns an int.
func fibonacci() func() int { first, second := 0, 1 return func() int { ret := first first, second = second, first+second return ret } }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func fibonacci() func() int {\n\n\treturn func() int {\n\t\tif x == -1 {\n\t\t\tx = 0\n\t\t\treturn x\n\t\t}\n\t\tif y == -1 {\n\t\t\ty = 1\n\t\t\treturn y\n\t\t}\n\n\t\tvar cur int = x + y\n\t\tx = y\n\t\ty = cur\n\t\treturn cur\n\t}\n\n}", "func fibonacci() func() int {\n\tterm_1, term_2 := 0, 1\n\treturn func () int {\n\t\tto_return := term_1\n\t\t\n\t\tterm_1 = term_2\n\t\tterm_2 = term_2 + to_return\n\t\n\t\treturn to_return\n\t}\n}", "func fibonacci() func() int {\n\tx0 := 0\n\tx1 := 0\n\treturn func() int {\n\t\txn := x0 + x1\n\t\tif xn == 0 {\n\t\t\tx1++\n\t\t\treturn xn\n\t\t} else {\n\t\t\tx0 = x1\n\t\t\tx1 = xn\n\t\t\treturn xn\n\t\t}\n\t}\n}", "func fibonacci() func() int {\n\ta:= 0\n\tb:= 1\n\treturn func() int {\n\t\tx := a+b\n\t\ta = b\n\t\tb = x\n\t\treturn x\n\t}\n}", "func fibonacci() func() int {\n\titeracao := 0\n\tprox := 0\n\tprimeiro := 0\n\tsegundo := 1\n\n\treturn func() int {\n\t\tif iteracao <= 1 {\n\t\t\tprox = iteracao\n\t\t\titeracao += 1\n\t\t} else {\n\t\t\tprox = primeiro + segundo\n\t\t\tprimeiro = segundo\n\t\t\tsegundo = prox\n\t\t}\n\t\treturn prox\n\t}\n}", "func fibonacci() func() int {\n\tv := 0\n\tprev := 0\n\treturn func() int {\n\t\tif v == 0 {\n\t\t\tv = 1\n\t\t\treturn prev\n\t\t}\n\t\tans := v + prev\n\t\tprev = v\n\t\tv = ans\n\t\treturn prev\n\t}\n}", "func fibonacci() func() int {\n\tcount := 0\n\thold := 0\n\tprevVal := 0\n\tnextVal := 1\n\treturn func() int {\n\t\tif count == 0 {\n\t\t\tcount++\n\t\t\treturn 0\n\t\t}\n\n\t\tif count == 1 {\n\t\t\tcount++\n\t\t\treturn 1\n\t\t}\n\n\t\thold = nextVal\n\t\tnextVal = prevVal + nextVal\n\t\tprevVal = hold\n\t\treturn nextVal\n\t}\n}", "func fibonacci() func() int {\n\tx, y := 0, 1\n\treturn func() int {\n\t\tx, y = y, x+y\n\t\treturn x\n\t}\n}", "func fibonacci() func(int) int {\n\tfirst := 0\n\tsecond := 1\n\treturn func(x int) int {\n\t\tnext := first + second\n\t\tfirst = second\n\t\tsecond = next\n\t\treturn\tnext\n\t}\n}", "func fibonacci() func() int64 {\n\tfirst, second := int64(0), int64(1)\n\treturn func() int64 {\n\t\tret := first\n\t\tfirst, second = second, first+second\n\t\treturn ret\n\t}\n}", "func fibonacci() func() int {\n\tx := 0\n\ty := 1\n\treturn func() int {\n\t\tx,y = y,x+y\n\t\treturn x\n\t}\n}", "func fibaonacci() func() int {\n\tindex := 0\n\tfn0 := 0\n\tfn1 := 0\n\tfn2 := 0\n\t\n\treturn func() int {\n\t\tif index == 0 {\n\t\t\tfn0 = 0;\n\t\t} else if index == 1 {\n\t\t\tfn0 = 1;\n\t\t} else if index > 1 {\n\t\t\tfn2 = fn1;\n\t\t\tfn1 = fn0;\n\t\t\tfn0 = fn1 + fn2\n\t\t}\n\t\t//fmt.Println(\"Index=\", index, \"fn1=\", fn1, \"fn2=\", fn2)\n\t\tindex += 1\n\t\treturn fn0\n\t}\n}", "func fibonacci() func() int {\n\tx := 0\n\ty := 1\n\treturn func() int {\n\t\tx,y = y, x+y\n\t\treturn x\n\t}\n}", "func fibonacci() func() int {\r\n\ta := make([]int, 0)\r\n\ti := -1\r\n\treturn func() int {\r\n\t\ti++\r\n\t\tif i == 0 {\r\n\t\t\ta = append(a, 0)\r\n\t\t} else if i == 1 {\r\n\t\t\ta = append(a, 1)\r\n\t\t} else {\r\n\t\t\ta = append(a, a[i-2]+a[i-1])\r\n\t\t}\r\n\t\treturn a[i]\r\n\t}\r\n}", "func fibonacci() func() int {\n prev1 := 0\n prev2 := 0\n next := 0\n return func() int {\n next = prev1 + prev2\n if prev1 == 0 {\n prev1++\n } else {\n prev1 = prev2\n }\n prev2 = next\n return next\n }\n}", "func fibonacci() func() int {\n\tn := -1 \t\n\treturn func () int {\n\t\tn++\t\n\t\tif n == 0{\n\t\t\treturn 0\n\t\t}else if n == 1 {\n\t\t\treturn 1\n\t\t}else{\n\t\t\ta := make([]int, n+1)\n\t\t \ta[0] = 0\n\t\t\ta[1] = 1\n\t\t\tfor i:=2; i<=n; i++ {\n\t\t\t\ta[i] = a[i-1] + a[i-2]\n\t\t\t}\n\t\t\treturn a[n]\n\t\t}\n\t}\n}", "func fibonacci() func(int) int {\n\tsum :=0\n\treturn func(i int) int {\n\t\tif i < 0 {\n\t\t\treturn 0\n\t\t}\n\t\tswitch i {\n\t\t\tcase 0:{\n\t\t\t\treturn 0\n\t\t\t}\n\t\t\tcase 1: {\n\t\t\t\treturn 1\n\t\t\t} \n\t\t\tdefault:{\n\t\t\t\tf :=fibonacci()\n\t\t\t\tsum = f(i-1) + f(i-2)\n\t\t\t\treturn sum\n\t\t\t}\n\t\t}\n\t}\n}", "func fibonacci() func() int {\n fib0 := 0\n fib1 := 1\n return func() int {\n result := fib0\n next := fib0 + fib1\n fib0 = fib1\n fib1 = next\n return result\n }\n}", "func Fibonacci() func() int {\n\ta, b := 0, 1\n\treturn func() int {\n\t\ta, b = b, a+b\n\t\treturn a\n\t}\n}", "func fibonacci() func() int {\n\tvar fib func (n int) int\n n := 0\n fib = func(n int) int {\n if n == 0 {\n return 0\n } else if n == 1 {\n return 1\n } else {\n return fib(n-1) + fib(n-2)\n }\n }\n return func() int {\n acc := fib(n)\n\t\tn++\n\t\treturn acc \n }\n}", "func fibonacci() func() int {\n\tp1, p2, p3 := 0, 1, 1\n\tcount := 0\n\treturn func() int {\n\t\tcount++\n\t\tif count == 1 {\n\t\t\treturn p1\n\t\t} else if count == 2 {\n\t\t\treturn p2\n\t\t} else {\n\t\t\tp3 = p1 + p2\n\t\t\tp1, p2 = p2, p3\n\t\t\treturn p3\n\t\t}\n\t}\n}", "func fibonacci() func() int {\n\txN := 0\n\tyN := 1\n\tfN := 0\n\tcnt := 0\n\t\n\treturn func() int {\n\t\tif cnt <= 0 {\n\t\t\tcnt++\n\t\t\treturn xN\n\t\t} else if cnt <= 1 {\n\t\t\tcnt++\n\t\t\treturn yN\n\t\t} else {\n\t\t\tcnt++\n\t\t\t// tracing the iterations, the values rotate around,\n\t\t\t// shift through the variables..\n\t\t\tfN = xN /*0, 1, 1, 2, 3, */ + yN /*1, 1, 2, 3, 5*/\n\t\t\t\n\t\t\txN = yN /*1, 1, 2, 3, 5, */\n\t\t\tyN = fN /*1, 2, 3, 5, 8, */\n\t\t\t\n\t\t\treturn fN /*1, 2, 3, 5, 8, ...*/\n\t\t}\n\t}\n}", "func fibonacci() func() int {\n\tvar pre, prepre int = 0, 0\n\treturn func() (next int) {\n\t\tif prepre == 0 && pre == 0 {\n\t\t\tprepre = 1\n\t\t\treturn 0\n\t\t}\n\t\tnext = pre + prepre\n\t\tprepre, pre = pre, next\n\t\treturn\n\t}\n}", "func fibonacci() func() int {\n\tcount := 0\n\tfib := 3\n\tfib_minus_one := 2\n\tfib_minus_two := 1\n\treturn func() int {\n\t\tif count > 2 {\n\t\t\tfib = fib_minus_one + fib_minus_two\n\t\t\tfib_minus_two = fib_minus_one\n\t\t\tfib_minus_one = fib\n\t\t\treturn fib\n\t\t}\n\t\tif count == 2 {\n\t\t\tcount++\n\t\t\treturn 2\n\t\t}\n\t\tcount++\n\t\treturn 1\n\t}\n}", "func fibonacci() func() int {\n\tvar last []int\n\treturn func() int {\n\t\tswitch len(last) {\n\t\tcase 0:\n\t\t\tlast = append(last, 0)\n\t\tcase 1:\n\t\t\tlast = append(last, 1)\n\t\tdefault:\n\t\t\tlast[0], last[1] = last[1], last[0]+last[1]\n\t\t}\n\t\treturn last[len(last)-1]\n\t}\n}", "func fibonacci() func() int {\n\tfirst_call, second_call := true, true\n\tcurr, last := 1, 0\n\treturn func() int {\n\t\tif first_call {\n\t\t\tfirst_call = false\n\t\t\treturn 0\n\t\t} else if second_call {\n\t\t\tsecond_call = false\n\t\t\treturn 1\n\t\t} else {\n\t\t\taux := curr\n\t\t\tcurr += last\n\t\t\tlast = aux\n\t\t\treturn curr\n\t\t}\n\t}\n}", "func Fibonacci() func() int {\n\tx, y := 1, 1\n\treturn func() int {\n\t\tx, y = y, x+y\n\t\treturn x\n\t}\n}", "func fibonacci(n int) func(int) int {\r\n\t\t//Initierar en variabel till en funtion av int\r\n\t\tx := func(n int) int{\r\n\t\t\t//returnar fib talen till x\r\n\t\t\treturn fib(n)\r\n\t\t}\r\n\treturn x\r\n}", "func fibonacci() func() int {\n\ts := []int{0, 1}\n\tvar i int\n\tinc := func() {\n\t\ti++\n\t}\n\treturn func() int {\n\t\tdefer inc()\n\t\tif i >= 2 {\n\t\t\ts = append(s, s[i-2]+s[i-1])\n\t\t}\n\t\treturn s[i]\n\t}\n}", "func fibonacci() func() int64 {\n\t/*\n\t\t関数の中で宣言された変数はローカル変数といい、呼び出しを終了する際に破棄される\n\t\tしかし、クロージャから呼び出されている変数はローカル変数であっても、破棄されずに保持される\n\t\t関数の中で呼び出されるローカル変数がすべて捕捉されるわけではなく、クロージャの中で参照されている変数のみが捕捉される\n\t\tgoにはジェネレータ(自身の内部の状態を保持し、呼び出されるたびに現在の状態から導き出される処理結果を返す)の機能はないが、クロージャを用いることでジェネレータの役割を実現できる\n\t\t※クロージャを別に新しく生成した際は、現在の内部の状態は共有されずに新しく生成される\n\t*/\n\tvar s int64\n\tvar next int64\n\t//var b bool\n\tvar work int64\n\t//var work2 int64\n\treturn func() int64 {\n\t\t/*\n\t\t\tif (s + next) == 0 {\n\t\t\t\tnext = 1\n\t\t\t} else if (s + next) == 1 {\n\t\t\t\ts = next\n\t\t\t} else {\n\t\t\t\tif b == true {\n\t\t\t\t\tif work == 0 {\n\t\t\t\t\t\tnext = next + s\n\t\t\t\t\t} else {\n\t\t\t\t\t\tnext = work\n\t\t\t\t\t}\n\t\t\t\t\twork = next + s\n\t\t\t\t\ts = next\n\t\t\t\t} else {\n\t\t\t\t\tb = true\n\t\t\t\t}\n\t\t\t}\n\t\t*/\n\t\twork = s //ワークスペースに現在のフィボナッチ数の値を退避\n\t\ts = s + next //現在のフィボナッチ数にn-1番目の数値を足す\n\t\tnext = work //n-1番目のフィボナッチ数を次に足す値として代入\n\t\tif s == 0 {\n\t\t\tnext += 1\n\t\t\treturn s\n\t\t} else if s == 1 {\n\t\t\treturn s\n\t\t} else {\n\t\t\ts -= 1\n\t\t\tfibonacci()\n\t\t\ts -= 1\n\t\t\tfibonacci()\n\t\t\ts += 2\n\t\t}\n\t\treturn s\n\t}\n}", "func fibonacci() func() int {\n\tvar first, second, x int\n\n\treturn func() int {\n\t\tvar temp int\n\t\tvar fib_series int\n\t\tx += 1\n\t\tif x == 1 {\n\t\t\tfib_series = 0\n\t\t\tfirst = fib_series\n\t\t} else if x == 2 {\n\t\t\tfib_series = 1\n\n\t\t} else {\n\t\t\tfib_series = first + second\n\n\t\t}\n\t\ttemp = first\n\t\tfirst = fib_series\n\t\tsecond = temp\n\n\t\treturn fib_series\n\t}\n\n}", "func fibonacci() func() int {\n x, y := 0, 1\n i := 0\n return func() int{\n if i!=0{\n x, y = y, x+y\n }\n i++\n return x\n }\n}", "func fibonacci(n int) int {\n\tif n == 0 {\n\t\treturn 0\n\t}\n\tif n == 1 {\n\t\treturn 1\n\t}\n\treturn fibonacci(n-1) + fibonacci(n-2)\n}", "func Fibonacci() func() *big.Int {\n\tx := big.NewInt(-1)\n\ty := big.NewInt(1)\n\n\treturn func() (ret *big.Int) {\n\t\tret = x\n\t\tx.Add(x, y)\n\t\tx, y = y, x\n\t\treturn\n\t}\n}", "func fibonacchi() func() int {\n\tfirst := 0\n\tsecond := 1\n\treturn func() int {\n\t\tres := first\n\t\tfirst, second = second, first+second\n\t\treturn res\n\t}\n}", "func fibonacci() func() int {\n arr := make([]int, 0)\n var i int\n\n return func() int {\n switch i {\n case 0:\n arr = append(arr, 1)\n case 1:\n arr = append(arr, 1)\n default:\n arr = append(arr, arr[i-1] + arr[i-2])\n }\n i++\n return arr[len(arr) - 1]\n }\n}", "func Fibonacci(n int) (f int) {\n\tif n == 0 {\n\t\treturn 0\n\t} else if n == 1 {\n\t\treturn 1\n\t} else {\n\t\tf = Fibonacci(n-2) + Fibonacci(n-1)\n\t}\n\treturn\n}", "func fibonacci() func() int {\n\ti := 0\n\treturn func() int {\n\t\tdefer (func() {\n\t\t\ti += 1\n\t\t})()\n\t\treturn fibRecursive(i)\n\t}\n}", "func fib(n int) int {\r\n\tif (n == 0){\t\r\n\t\treturn int(0)\r\n\t}else if (n == 1){\r\n\t\treturn int(1)\r\n\t}else{\r\n\t\treturn fib(n - 1) + fib(n -2)\r\n\t}\r\n}", "func fibonacci() func() int {\n\tfib := 0\n\t//Defining func that returns an int\n\tclosure_func := func () int {\n\t\tfmt.Println(\"Closure of fibbonaci of \", fib)\n\t\tif fib <= 0{\n\t\t\tfmt.Println(\"fibbonaci of: \", fib, \"is : \", 0)\n\t\t\t// Increase for next iteration\n\t\t\tfib++\n\t\t\treturn 0\n\t\t}\n\t\tif fib == 1{\n\t\t\tfmt.Println(\"fibbonaci of: \", fib, \"is : \", 1)\n\t\t\t// Increase for next iteration\n\t\t\tfib++\n\t\t\treturn 1\n\t\t}\n\t\t// Increase for next iteration\n\t\tfib++\n\t\tvar a = 0\n\t\tb := 1\n\t\tc := 0\n\t\tfor i:= 1; i<fib; i++ {\n\t\t\tprint(i)\n\t\t\tc = a + b\n\t\t\ta = b\n\t\t\tb = c\n\t\t}\n\t\tfmt.Println(\"fibbonaci of: \", fib, \"is : \", c)\n\t\t// Return fibonacci\n\t\treturn c\n\t}//end closure\n\treturn closure_func\n}", "func fibonacci01() func() int {\n\tfN, xN := 1, 0\n\treturn func() int {\n\t\tfN, xN = xN, fN+xN\n\t\treturn fN\n\t}\n}", "func fib() func() int {\n\ta, b := 0, 1\n\treturn func() int {\n\t\tdefer func() { a, b = b, a+b }()\n\t\treturn a\n\t}\n}", "func Fibonacci(number int) int {\n\tif number == 0 {\n\t\treturn 1\n\t} else if number == 1 {\n\t\treturn number\n\t} else {\n\t\treturn Fibonacci(number-2) + Fibonacci(number-1)\n\t}\n}", "func Fibonacci(n int) {\n\t// fibonacci1(n)\n\tfibonacci3(n)\n}", "func fib(n int) int{\n if n == 0 {\n return n\n } else if n == 1 {\n return 1\n } else {\n return fib(n - 1) + fib(n - 2)\n }\n}", "func fib(n int) int {\n\tif n == 0 || n == 1 {\n\t\treturn 1\n\t}\n\n\treturn fib(n-1) + fib(n-2)\n}", "func fib(n int) int {\n\tif n == 0 || n == 1 {\n\t\treturn n\n\t}\n\treturn fib(n-1) + fib(n-2)\n}", "func Fibonacci(number int) int64 {\n\n\tif number == 0 {\n\t\treturn 0\n\t}\n\n\tif number == 1 {\n\t\treturn 1\n\t}\n\n\treturn Fibonacci(number-1) + Fibonacci(number-2)\n}", "func Fibonacci(n int) int {\n\ta0, a1 := 1, 1\n\tfor ; 1 < n; n-- {\n\t\ta0, a1 = a1, a0+a1\n\t}\n\n\treturn a1\n}", "func fibonacci() func() int {\n a := 0\n b := 1\n c := 0\n return func() int {\n c, a, b = a, b, a+b\n return c\n }\n}", "func Fibonacci(n int) int {\n\tif n > 0 && n <= 2 {\n\t\treturn 1\n\t} else if n ==0 {\n\t\treturn 0\n\t}\n\treturn Fibonacci(n-1) + Fibonacci(n-2)\n}", "func fib() func() int {\n a, b := 1, 1\n return func() int {\n a, b = b, a + b\n return b\n }\n}", "func fib(x uint) uint {\n\tif x == 0 {\n\t\treturn 0\n\t} else if x == 1 {\n\t\treturn 1\n\t}\n\treturn fib(x-1) + fib(x-2)\n}", "func fibonacci(n int) (res int) {\n\tvar i_1, i_2 int;\n\tfor i:=0; i<=n; i++ {\n\t\tif i<= 1{\n\t\t\ti_1 = 1;\n\t\t\ti_2 = 1;\n\t\t\tres = 1;\n\t\t} else {\n\t\t\tres = i_1+i_2;\n\t\t}\n\t\ti_2, i_1 = i_1, res;\n\t}\n\treturn;\n}", "func Fibonacci( n int ) float64 {\n\tif n == 0 {\n\t\treturn 0.0\n\t} else if n == 1 {\n\t\treturn 1.0\n\t} else {\n\t\treturn Fibonacci(n-1) + Fibonacci(n-2)\n\t}\n}", "func fibo(n int) int {\n if n < 2 {\n return 1\n }\n return fibo(n-2) + fibo(n-1)\n}", "func fibbonaci() func() int {\n\tn, v := 1, 0\n\treturn func() int {\n\t\tn, v = v, n+v\n\t\treturn n\n\t}\n}", "func fib(n int64) int64 {\n\n\tif n <= 0 {\n\t\treturn 0\n\t}\n\n\tif n <= 2 {\n\t\treturn 1\n\t}\n\n\treturn fib(n-1) + fib(n-2)\n}", "func Fibonacci(index int) int {\n\tif index == 1 || index == 2 {\n\t\treturn 1\n\t}\n\tif index > 2 && index < 25 {\n\t\treturn Fibonacci(index-2) + Fibonacci(index-1)\n\t}\n\tif index < 0 {\n\t\treturn -1\n\t}\n\n\treturn 0\n}", "func Fibonacci(number int) int {\n\tif number <= 0 {\n\t\treturn 0\n\t}\n\n\tn2 := 0\n\tn1 := 0\n\tcurrent := 1\n\n\tfor i := 1; i < number; i++ {\n\t\tn2 = n1\n\t\tn1 = current\n\t\tcurrent = n2 + n1\n\t}\n\treturn current\n}", "func main() {\r\n f := fibonacci(0)\r\n for i := 0; i < 10; i++ {\r\n fmt.Println(f(i))\r\n }\r\n}", "func Fibonacci(lastNumberIndex int) int {\n\tsequence := []int{1, 1}\n\n\tfor len(sequence) < lastNumberIndex {\n\t\tsequence = append(sequence, nextFibonacciElement(sequence))\n\t}\n\n\treturn sequence[len(sequence)-1]\n}", "func fib(n int64) (int64, int64) {\n\tif n < 2 {\n\t\tfmt.Printf(\"n=%v, f1=%v, f2=%v \\n\", n, 1, 1)\n\t\treturn 1, 1\n\t}\n\tf1, f2 := fib(n-1)\n\tfmt.Printf(\"n=%v, f1=%v, f2=%v \\n\", n, f1, f2)\n\treturn f2, f1+f2\n}", "func fibonacci(n int) int {\n current, prev := 0, 1\n for i := 0; i < n; i++ {\n current, prev = current + prev, current\n }\n return current\n}", "func fibonacci(k int) int {\n\tif k <= 1 {\n\t\treturn 1\n\t}\n\treturn fibonacci(k-1) + fibonacci(k - 2)\n}", "func Fibonacci(n int64) []int64 {\n\tif n == 0 {\n\t\treturn []int64{}\n\t}\n\tif n == 1 {\n\t\treturn []int64{1}\n\t}\n\tif n == 2 {\n\t\treturn []int64{1, 1}\n\t}\n\tprevious := Fibonacci(n - 1)\n\treturn append(previous, previous[n - 2] + previous[n - 3])\n}", "func (h *Handler) GetFibonacci(ctx echo.Context) error {\n\tn, err := strconv.ParseUint(ctx.Param(\"n\"), 10, 64)\n\tif n == 0 {\n\t\treturn errs.New(errs.InvalidParameter, \"'n' is not a natural number\", err)\n\t}\n\tnumbers, err := h.f.Numbers(uint(n))\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn ctx.JSON(200, numbers)\n}", "func fib(n int) int {\n\tif scope := fibTracer.MaybeScope(\"wrapper\"); scope != nil {\n\t\tscope.Open(n)\n\t\tr := tracedFib(n, scope)\n\t\tscope.CloseCall(r)\n\t\treturn r\n\t}\n\treturn untracedFib(n)\n}", "func fib(N int) int {\n\tif N <= 1 {\n\t\treturn N\n\t}\n\ta := make([]int, N+1)\n\ta[0] = 0\n\ta[1] = 1\n\tfor i := 2; i <= N; i++ {\n\t\ta[i] = a[i-1] + a[i-2]\n\t}\n\treturn a[N]\n}", "func FibonacciMemoization(n int) int {\n\tvar MemoizedArray = []int{1,1}\n\tif n == 0 || n == 1 {\n\t\treturn 1\n\t}\n\tfor i:=2;i<n+1;i++ {\n\t\tMemoizedArray = append(MemoizedArray,MemoizedArray[i-2]+MemoizedArray[i-1])\t\n\t}\n\treturn MemoizedArray[n]\n}", "func fib(N int) int {\r\n\tif N < 2 {\r\n\t\treturn N\r\n\t}\r\n\ta := 0\r\n\tb := 1\r\n\tfor i := 2; i <= N; i++ {\r\n\t\ta, b = b, a+b\r\n\t}\r\n\treturn b\r\n}", "func Fibonacci(n int) []int {\n\tfibonacci := make([]int, n, n)\n\tfor i := range fibonacci {\n\t\tif i == 0 || i == 1 { // base cases\n\t\t\tfibonacci[i] = 1\n\t\t\tcontinue\n\t\t}\n\t\tfibonacci[i] = fibonacci[i-1] + fibonacci[i-2]\n\t}\n\treturn fibonacci\n}", "func fibonacci(n int, c chan int) {\n\tx, y := 0, 1\n\tfor i := 0; i < n; i++ {\n\t\tc <- x\n\t\tx, y = y, x+y\n\t}\n\tclose(c)\n}", "func TestFibinacciNumber() {\n\tfmt.Printf(\"5th Fibonacci is ---> %d\\n\", fibonacciNumber(5))\n\tfmt.Printf(\"6th Fibonacci is ---> %d\\n\", fibonacciNumber(6))\n\tfmt.Printf(\"7th Fibonacci is ---> %d\\n\", fibonacciNumber(7))\n}", "func main() {\n //fmt.Println(sum_of_even_fibs_up_to(25))\n //=> 10\n\n fmt.Println(sum_of_even_fibs_up_to(4000000))\n //=> ?? \n}", "func MemoizedFibonacci(n int) int {\n\treturn memoizedFibonacci(n)\n}", "func fibGen2(a int) int {\n\t//if a < 2 {\n\t//\treturn a\n\t//}\n\t//for the sake of negative fib:\n\tif a == 0 || a == 1 {\n\t\treturn a\n\t}\n\treturn fibGen2(a-1) + fibGen2(a-2)\n}", "func NewFibonacci(address common.Address, backend bind.ContractBackend) (*Fibonacci, error) {\n\tcontract, err := bindFibonacci(address, backend, backend, backend)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &Fibonacci{FibonacciCaller: FibonacciCaller{contract: contract}, FibonacciTransactor: FibonacciTransactor{contract: contract}, FibonacciFilterer: FibonacciFilterer{contract: contract}}, nil\n}", "func TestFibonacciNumber(t *testing.T) {\n\tcases := []struct {\n\t\tinput int\n\t\texpect int\n\t}{\n\t\t{\n\t\t\tinput: 1,\n\t\t\texpect: 1,\n\t\t},\n\t\t{\n\t\t\tinput: 3,\n\t\t\texpect: 2,\n\t\t},\n\t\t{\n\t\t\tinput: 4,\n\t\t\texpect: 3,\n\t\t},\n\t}\n\tfor _, c := range cases {\n\t\tif fib(c.input) != c.expect {\n\t\t\tt.Fail()\n\t\t}\n\t}\n}", "func ExFibonacciClosure() {\n\tfmt.Println(\"Please read and implement :)\")\n}", "func SolveFibonacciRecursive(n int64) int64 {\n\tif n < 0 {\n\t\tpanic(\"invalid index\")\n\t}\n\tif n <= 1 {\n\t\treturn n\n\t}\n\treturn SolveFibonacciRecursive(n-1) + SolveFibonacciRecursive(n-2)\n}", "func Fibonacci_approx( n float64 ) float64 {\n\treturn ( math.Pow( 1 + math.Sqrt(5), n ) - math.Pow( 1 - math.Sqrt(5), n ) ) / ( math.Pow(2,n) * math.Sqrt(5) )\n}", "func bindFibonacci(address common.Address, caller bind.ContractCaller, transactor bind.ContractTransactor, filterer bind.ContractFilterer) (*bind.BoundContract, error) {\n\tparsed, err := abi.JSON(strings.NewReader(FibonacciABI))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn bind.NewBoundContract(address, parsed, caller, transactor, filterer), nil\n}", "func fibonacci(n int, c chan int) {\n x, y := 0, 1\n for i := 0; i < n; i++ {\n c <- x\n x, y = y, x+y\n }\n close(c)\n}", "func main() {\n\n\tvar n int\n\t_, _ = fmt.Scanf(\"%d\\n\", &n)\n\n\tcache := initCache(n)\n\tfmt.Println(fibonacci(n, cache))\n}", "func fibonacci(c, quit chan int) {\n\tx, y := 0, 1\n\tfor {\n\t\tselect {\n\t\tcase c <- x: // can be chosen as long as c doesn't have a value\n\t\t\tx, y = y, x+y\n\t\tcase <-quit: // can be chosen when quit has some value\n\t\t\tfmt.Println(\"quit\")\n\t\t\treturn\n\t\t}\n\t}\n}", "func Fibonacci(duration time.Duration) *Officer {\n\to := Officer{\n\t\tBlock: func(t Try) {\n\t\t\tvar pre int64\n\t\t\tvar cur int64\n\t\t\tvar i uint\n\t\t\tfor pre, cur, i = 0, 1, 0; i < t.Count; i++ {\n\t\t\t\tpre = cur\n\t\t\t\tcur = pre + cur\n\t\t\t}\n\t\t\ttime.Sleep(duration * time.Duration(pre))\n\t\t},\n\t}\n\treturn &o\n}", "func fibRecur(n int) int {\n\tif n == 0 {\n\t\treturn 0\n\t}\n\n\tif n <= 2 {\n\t\treturn 1\n\t}\n\n\treturn fibRecur(n-1) + fibRecur(n-2)\n}", "func TestFibonacci(t *testing.T) {\n\tfor _, tt := range fibonacciTests {\n\t\tt.Logf(\"Calculating Fibonacci number for %d\", tt.input)\n\t\tactual, err := function.Fibonacci(tt.input, function.DefaultMax)\n\t\tif tt.err && err == nil {\n\t\t\tt.Errorf(\"Fibonacci(%d): expected error\", tt.input)\n\t\t\tcontinue\n\t\t}\n\t\tif !tt.err && err != nil {\n\t\t\tt.Errorf(\"Fibonacci(%d): expected no error. Received '%v'\", tt.input, err)\n\t\t\tcontinue\n\t\t}\n\t\tif actual != tt.expected {\n\t\t\tt.Errorf(\"Fibonacci(%d): expected %d, actual %d\", tt.input, tt.expected, actual)\n\t\t\tcontinue\n\t\t}\n\t}\n}", "func GetFibonacciSequence(conf *config.Config) gin.HandlerFunc {\n\tfib := fibonacci.New(conf.InitFibCacheSize, conf.MaxFibCacheSize)\n\treturn func(c *gin.Context) {\n\t\tnumberStr := c.Param(\"n\")\n\t\tnumber, err := strconv.Atoi(numberStr)\n\t\tif err != nil {\n\t\t\tRespondWithError(c, http.StatusBadRequest, InvalidNumberErrorCode,\n\t\t\t\tfmt.Sprintf(\"invalid number '%s'\", numberStr))\n\t\t\treturn\n\t\t}\n\n\t\tif number < 0 {\n\t\t\tRespondWithError(c, http.StatusBadRequest, NegativeNumberErrorCode,\n\t\t\t\tfmt.Sprintf(\"negative number '%s', only accepts number >= 0\", numberStr))\n\t\t\treturn\n\t\t}\n\n\t\tif number > conf.MaxFibInput {\n\t\t\tRespondWithError(c, http.StatusBadRequest, OverLimitNumberErrorCode,\n\t\t\t\tfmt.Sprintf(\"number '%s' is too big, only accepts number <= %d\", numberStr, conf.MaxFibInput))\n\t\t\treturn\n\t\t}\n\n\t\tlist := fib.GetSequence(number)\n\t\tRespondWithStatus(c, http.StatusOK, list)\n\t}\n}", "func Fibo(a int) int {\n\tif a <= 1 {\n\t\treturn a\n\t}\n\n\treturn Fibo(a-1) + Fibo(a-2)\n}", "func fibonacci(c, quit chan int) {\n\tx, y := 0, 1\n\tfor {\n\t\tselect {\n\t\tcase c <- x:\n\t\t\tx, y = y, x+y\n\t\tcase <-quit:\n\t\t\tfmt.Println(\"quit\")\n\t\t\treturn\n\t\tdefault:\n\t\t\tfmt.Println(\" .\")\n\t\t\ttime.Sleep(50 * time.Millisecond)\n\t\t}\n\t}\n}", "func fibo(c int8) []int8 {\n\tres := []int8{0}\n\tif c == 0 {return res}\n\tif c == 1 {\n\t\tres = append(res, c)\n\t}\n\treturn res\n}", "func calculateFibonacci(n int) []int {\n\t//We must set the first two seed values\n\tslice := []int{0, 1}\n\n\t//If we're calculating more than 2 vals, perform the calculation.\n\t//Otherwise, simply return a subset of the first two seeds.\n\tif n >= 2 {\n\t\tfor i := 2; i < n; i++ {\n\t\t\tnewVal := slice[i-1] + slice[i-2]\n\t\t\tslice = append(slice, newVal)\n\t\t}\n\t} else {\n\t\treturn slice[:n]\n\t}\n\treturn slice\n}", "func RecursiveFib(n int) int {\n\tif n < 2 {\n\t\treturn n\n\t}\n\treturn RecursiveFib(n-2) + RecursiveFib(n-1)\n}", "func (_Fibonacci *FibonacciTransactor) GenerateFib(opts *bind.TransactOpts, n *big.Int) (*types.Transaction, error) {\n\treturn _Fibonacci.contract.Transact(opts, \"generateFib\", n)\n}", "func RecursiveFib(num int) int {\n\tif num == 0 {\n\t\treturn 0\n\t}\n\tif num == 1 {\n\t\treturn 1\n\t}\n\treturn RecursiveFib(num-1) + RecursiveFib(num-2)\n}", "func NthFibonacci(pos int) int {\n\tc := FibonacciGen()\n\tf := make([]int, pos)\n\tfor i := 0; i <= pos; i++ {\n\t\tf = append(f, <-c)\n\t}\n\treturn f[len(f)-1]\n}", "func NewFibonacciCaller(address common.Address, caller bind.ContractCaller) (*FibonacciCaller, error) {\n\tcontract, err := bindFibonacci(address, caller, nil, nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &FibonacciCaller{contract: contract}, nil\n}", "func (_Fibonacci *FibonacciSession) GenerateFib(n *big.Int) (*types.Transaction, error) {\n\treturn _Fibonacci.Contract.GenerateFib(&_Fibonacci.TransactOpts, n)\n}" ]
[ "0.8847999", "0.8757891", "0.87546825", "0.8742662", "0.8730003", "0.870885", "0.86387646", "0.8622545", "0.86220956", "0.86146945", "0.86084855", "0.85591084", "0.85559744", "0.8555152", "0.8547645", "0.8536301", "0.85228187", "0.85178524", "0.8498509", "0.84956574", "0.8494247", "0.8482248", "0.847035", "0.84429264", "0.8438078", "0.84131455", "0.83982104", "0.83582354", "0.83199954", "0.8264451", "0.8209773", "0.8189342", "0.8188591", "0.81853294", "0.80659187", "0.8045534", "0.8007905", "0.7927877", "0.7912563", "0.7910764", "0.78945297", "0.7871988", "0.78054965", "0.77891314", "0.7773056", "0.7770501", "0.7765899", "0.77577937", "0.7745123", "0.77318573", "0.7729581", "0.772577", "0.7649317", "0.7630438", "0.76018244", "0.757601", "0.7565312", "0.7563053", "0.7546456", "0.7543404", "0.7479417", "0.73533404", "0.7294914", "0.7265546", "0.7216187", "0.71538925", "0.7072339", "0.70045674", "0.6915924", "0.6910003", "0.69049275", "0.68917656", "0.6888679", "0.6840753", "0.6793118", "0.67152995", "0.67033476", "0.6689592", "0.6682793", "0.6671349", "0.6659959", "0.6647117", "0.660667", "0.65696394", "0.6530256", "0.65267617", "0.65203536", "0.651208", "0.6487263", "0.6473924", "0.6467542", "0.64615303", "0.6337343", "0.6330229", "0.6323983", "0.63237786", "0.6315483", "0.63072693", "0.6303858", "0.62494516" ]
0.86712015
6
NewDeployment converts BOSH deployment information into a deployment view for the dashboard
func NewDeployment(configTier config.Tier, configSlot config.Slot, boshDeployment *data.Deployment) (deployment *Deployment) { tierName := configTier.Name slotName := configSlot.Name name := fmt.Sprintf("%s / %s - %s", tierName, slotName, boshDeployment.Name) releases := make([]DisplayNameVersion, len(boshDeployment.Releases)) for releaseIndex := range releases { boshRelease := boshDeployment.Releases[releaseIndex] releases[releaseIndex] = DisplayNameVersion{ Name: boshRelease.Name, Version: boshRelease.Version, DisplayClass: "icon-minus blue", } } stemcells := make([]DisplayNameVersion, len(boshDeployment.Stemcells)) for stemcellIndex := range stemcells { boshStemcell := boshDeployment.Stemcells[stemcellIndex] stemcells[stemcellIndex] = DisplayNameVersion{ Name: boshStemcell.Name, Version: boshStemcell.Version, DisplayClass: "icon-minus blue", } } extraData := []Data{} for _, dataChunk := range boshDeployment.ExtraData { for _, dataChunkItem := range dataChunk.Data { displayClass := "icon-minus blue" if dataChunkItem.Indicator == "down" { displayClass = "icon-arrow-down red" } if dataChunkItem.Indicator == "up" { displayClass = "icon-arrow-up green" } dataItem := Data{ Label: dataChunkItem.Label, Value: dataChunkItem.Value, DisplayClass: displayClass, } extraData = append(extraData, dataItem) } } if len(extraData) == 0 { dataItem := Data{ Label: "backup status", Value: "unknown", DisplayClass: "icon-arrow-down red", } extraData = append(extraData, dataItem) } deployment = &Deployment{ Name: name, Releases: releases, Stemcells: stemcells, ExtraData: extraData, } return }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func newDeployment(apployment *appscodev1alpha1.Apployment) *appsv1.Deployment {\n\tlabels := map[string]string{\n\t\t\"app\": \"Appscode\",\n\t\t\"controller\": apployment.Name,\n\t}\n\treturn &appsv1.Deployment{\n\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\tName: apployment.Spec.ApploymentName,\n\t\t\tNamespace: apployment.Namespace,\n\t\t\tOwnerReferences: []metav1.OwnerReference{\n\t\t\t\t*metav1.NewControllerRef(apployment, appscodev1alpha1.SchemeGroupVersion.WithKind(\"Apployment\")),\n\t\t\t},\n\t\t},\n\t\tSpec: appsv1.DeploymentSpec{\n\t\t\tReplicas: apployment.Spec.Replicas,\n\t\t\tSelector: &metav1.LabelSelector{\n\t\t\t\tMatchLabels: labels,\n\t\t\t},\n\t\t\tTemplate: corev1.PodTemplateSpec{\n\t\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\t\tLabels: labels,\n\t\t\t\t},\n\t\t\t\tSpec: corev1.PodSpec{\n\t\t\t\t\tContainers: []corev1.Container{\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tName: apployment.Name,\n\t\t\t\t\t\t\tImage: apployment.Spec.Image,\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n}", "func newDeployment() *appsv1.Deployment {\n\tvar replicas int32 = 1\n\treturn &appsv1.Deployment{\n\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\tNamespace: tNs,\n\t\t\tName: tName,\n\t\t\tLabels: map[string]string{\n\t\t\t\tapplicationNameLabelKey: tName,\n\t\t\t},\n\t\t\tOwnerReferences: []metav1.OwnerReference{tOwnerRef},\n\t\t},\n\t\tSpec: appsv1.DeploymentSpec{\n\t\t\tReplicas: &replicas,\n\t\t\tSelector: &metav1.LabelSelector{MatchLabels: map[string]string{applicationNameLabelKey: tName}},\n\t\t\tTemplate: corev1.PodTemplateSpec{\n\t\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\t\tLabels: map[string]string{\n\t\t\t\t\t\tdashboardLabelKey: dashboardLabelValue,\n\t\t\t\t\t\teventSourceLabelKey: eventSourceLabelValue,\n\t\t\t\t\t\tapplicationNameLabelKey: tName,\n\t\t\t\t\t\tapplicationLabelKey: tName,\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tSpec: corev1.PodSpec{\n\t\t\t\t\tContainers: []corev1.Container{{\n\t\t\t\t\t\tImage: tImg,\n\t\t\t\t\t\tPorts: []corev1.ContainerPort{{\n\t\t\t\t\t\t\tName: portName,\n\t\t\t\t\t\t\tContainerPort: tPort,\n\t\t\t\t\t\t},\n\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\tName: metricsPortName,\n\t\t\t\t\t\t\t\tContainerPort: metricsPort,\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tName: adapterContainerName,\n\t\t\t\t\t\tEnv: tEnvVars,\n\t\t\t\t\t\tReadinessProbe: &corev1.Probe{\n\t\t\t\t\t\t\tHandler: corev1.Handler{\n\t\t\t\t\t\t\t\tHTTPGet: &corev1.HTTPGetAction{\n\t\t\t\t\t\t\t\t\tPath: adapterHealthEndpoint,\n\t\t\t\t\t\t\t\t\tPort: intstr.FromInt(adapterPort),\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n}", "func newDeployment(ctx context.Context, c *vim25.Client, params *types.VMDeployParams, l log.Logger) (*Deployment, error) { //nolint: unparam\n\td := newSimpleDeployment(c, params, l)\n\n\t// step 1. choose Datacenter and folder\n\tif err := d.chooseDatacenter(ctx, params.Datacenter); err != nil {\n\t\terr = errors.Wrap(err, \"Could not choose datacenter\")\n\t\tl.Log(\"err\", err)\n\t\treturn nil, err\n\t}\n\n\tif err := d.chooseFolder(ctx, params.Folder); err != nil {\n\t\terr = errors.Wrap(err, \"Could not choose folder\")\n\t\tl.Log(\"err\", err)\n\t\treturn nil, err\n\t}\n\n\t// step 2. choose computer resource\n\tresType := params.ComputerResources.Type\n\tresPath := params.ComputerResources.Path\n\tif err := d.chooseComputerResource(ctx, resType, resPath); err != nil {\n\t\terr = errors.Wrap(err, \"Could not choose Computer Resource\")\n\t\tl.Log(\"err\", err)\n\t\treturn nil, err\n\t}\n\n\t// step 3. Choose datastore cluster or single datastore\n\tdsType := params.Datastores.Type\n\tdsNames := params.Datastores.Names\n\tif err := d.chooseDatastore(ctx, dsType, dsNames); err != nil {\n\t\terr = errors.Wrap(err, \"Could not choose datastore\")\n\t\tl.Log(\"err\", err)\n\t\treturn nil, err\n\t}\n\n\treturn d, nil\n}", "func (pm *PipelineManager) newDeployment(pipeline *api.Pipeline) *appsv1.Deployment {\n\tlbls := pipeLabels(pipeline)\n\n\tdeployment := &appsv1.Deployment{\n\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\tName: pipeline.Name,\n\t\t\tNamespace: pipeline.Namespace,\n\t\t\tOwnerReferences: []metav1.OwnerReference{\n\t\t\t\t*metav1.NewControllerRef(pipeline, api.SchemeGroupVersion.WithKind(api.PipelineResourceKind)),\n\t\t\t},\n\t\t\tLabels: lbls,\n\t\t},\n\t\tSpec: appsv1.DeploymentSpec{\n\t\t\tSelector: &metav1.LabelSelector{\n\t\t\t\tMatchLabels: lbls,\n\t\t\t},\n\t\t\tStrategy: appsv1.DeploymentStrategy{\n\t\t\t\tType: appsv1.RecreateDeploymentStrategyType,\n\t\t\t},\n\t\t\tMinReadySeconds: 10,\n\t\t\tTemplate: corev1.PodTemplateSpec{\n\t\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\t\tLabels: lbls,\n\t\t\t\t},\n\t\t\t\tSpec: corev1.PodSpec{\n\t\t\t\t\tRestartPolicy: corev1.RestartPolicyAlways,\n\t\t\t\t\tVolumes: []corev1.Volume{\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tName: \"config\",\n\t\t\t\t\t\t\tVolumeSource: corev1.VolumeSource{\n\t\t\t\t\t\t\t\tConfigMap: &corev1.ConfigMapVolumeSource{\n\t\t\t\t\t\t\t\t\tLocalObjectReference: corev1.LocalObjectReference{Name: pipeline.Name},\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\tContainers: []corev1.Container{\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tName: \"gravity\",\n\t\t\t\t\t\t\tImage: pipeline.Spec.Image,\n\t\t\t\t\t\t\tCommand: pipeline.Spec.Command,\n\t\t\t\t\t\t\tPorts: []corev1.ContainerPort{\n\t\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t\tName: \"http\",\n\t\t\t\t\t\t\t\t\tContainerPort: containerPort,\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\tLivenessProbe: &corev1.Probe{\n\t\t\t\t\t\t\t\tHandler: corev1.Handler{\n\t\t\t\t\t\t\t\t\tHTTPGet: &corev1.HTTPGetAction{\n\t\t\t\t\t\t\t\t\t\tPort: intstr.FromString(\"http\"),\n\t\t\t\t\t\t\t\t\t\tPath: \"/healthz\",\n\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\tInitialDelaySeconds: 10,\n\t\t\t\t\t\t\t\tTimeoutSeconds: 5,\n\t\t\t\t\t\t\t\tPeriodSeconds: 10,\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\tVolumeMounts: []corev1.VolumeMount{\n\t\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t\tName: \"config\",\n\t\t\t\t\t\t\t\t\tMountPath: \"/etc/gravity\",\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\tResources: corev1.ResourceRequirements{ //TODO from tps config or metrics\n\t\t\t\t\t\t\t\tRequests: corev1.ResourceList{\n\t\t\t\t\t\t\t\t\t\"cpu\": resource.MustParse(\"100m\"),\n\t\t\t\t\t\t\t\t\t\"memory\": resource.MustParse(\"150M\"),\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n\n\tif pipeline.Spec.Paused {\n\t\tdeployment.Spec.Replicas = int32Ptr(0)\n\t} else {\n\t\tdeployment.Spec.Replicas = int32Ptr(1)\n\t}\n\treturn deployment\n}", "func newDeployment(deploymentName string, replicas int32, podLabels map[string]string, containerName, image string, strategyType appsv1.DeploymentStrategyType) *appsv1.Deployment {\n\tzero := int64(0)\n\treturn &appsv1.Deployment{\n\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\tName: deploymentName,\n\t\t\tLabels: podLabels,\n\t\t},\n\t\tSpec: appsv1.DeploymentSpec{\n\t\t\tReplicas: &replicas,\n\t\t\tSelector: &metav1.LabelSelector{MatchLabels: podLabels},\n\t\t\tStrategy: appsv1.DeploymentStrategy{\n\t\t\t\tType: strategyType,\n\t\t\t},\n\t\t\tTemplate: corev1.PodTemplateSpec{\n\t\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\t\tLabels: podLabels,\n\t\t\t\t},\n\t\t\t\tSpec: corev1.PodSpec{\n\t\t\t\t\tTerminationGracePeriodSeconds: &zero,\n\t\t\t\t\tContainers: []corev1.Container{\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tName: containerName,\n\t\t\t\t\t\t\tImage: image,\n\t\t\t\t\t\t\tSecurityContext: &corev1.SecurityContext{},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n}", "func CreateDeployment(c *gin.Context) {\n\n\tbanzaiUtils.LogInfo(banzaiConstants.TagCreateDeployment, \"Start create deployment\")\n\n\t// --- [ Get cluster ] --- //\n\tbanzaiUtils.LogInfo(banzaiConstants.TagCreateDeployment, \"Get cluster\")\n\tcloudCluster, err := cloud.GetClusterFromDB(c)\n\tif err != nil {\n\t\treturn\n\t}\n\n\tbanzaiUtils.LogInfo(banzaiConstants.TagCreateDeployment, \"Get cluster succeeded\")\n\n\tbanzaiUtils.LogInfo(banzaiConstants.TagCreateDeployment, \"Bind json into DeploymentType struct\")\n\tvar deployment DeploymentType\n\tif err := c.BindJSON(&deployment); err != nil {\n\t\tbanzaiUtils.LogInfo(banzaiConstants.TagCreateDeployment, \"Bind failed\")\n\t\tbanzaiUtils.LogInfo(banzaiConstants.TagCreateDeployment, \"Required field is empty.\"+err.Error())\n\t\tcloud.SetResponseBodyJson(c, http.StatusBadRequest, gin.H{\n\t\t\tcloud.JsonKeyStatus: http.StatusBadRequest,\n\t\t\tcloud.JsonKeyMessage: \"Required field is empty\",\n\t\t\tcloud.JsonKeyError: err,\n\t\t})\n\t\treturn\n\t}\n\n\tbanzaiUtils.LogDebug(banzaiConstants.TagCreateDeployment, fmt.Sprintf(\"Creating chart %s with version %s and release name %s\", deployment.Name, deployment.Version, deployment.ReleaseName))\n\tprefix := viper.GetString(\"dev.chartpath\")\n\tchartPath := path.Join(prefix, deployment.Name)\n\n\tvar values []byte = nil\n\tif deployment.Values != \"\" {\n\t\tparsedJSON, err := yaml.Marshal(deployment.Values)\n\t\tif err != nil {\n\t\t\tbanzaiUtils.LogError(banzaiConstants.TagCreateDeployment, \"Can't parse Values:\", err)\n\t\t}\n\t\tvalues, err = yaml.JSONToYAML(parsedJSON)\n\t\tif err != nil {\n\t\t\tbanzaiUtils.LogError(banzaiConstants.TagCreateDeployment, \"Can't convert JSON to YAML:\", err)\n\t\t\treturn\n\t\t}\n\t}\n\t// --- [ Get K8S Config ] --- //\n\tkubeConfig, err := cloud.GetK8SConfig(cloudCluster, c)\n\tif err != nil {\n\t\treturn\n\t}\n\tbanzaiUtils.LogInfo(banzaiConstants.TagCreateDeployment, \"Getting K8S Config Succeeded\")\n\n\tbanzaiUtils.LogDebug(banzaiConstants.TagCreateDeployment, \"Custom values:\", string(values))\n\tbanzaiUtils.LogInfo(banzaiConstants.TagCreateDeployment, \"Create deployment\")\n\trelease, err := helm.CreateDeployment(chartPath, deployment.ReleaseName, values, kubeConfig)\n\tif err != nil {\n\t\tbanzaiUtils.LogWarn(banzaiConstants.TagCreateDeployment, \"Error during create deployment.\", err.Error())\n\t\tcloud.SetResponseBodyJson(c, http.StatusNotFound, gin.H{\n\t\t\tcloud.JsonKeyStatus: http.StatusNotFound,\n\t\t\tcloud.JsonKeyMessage: fmt.Sprintf(\"%s\", err),\n\t\t})\n\t\treturn\n\t} else {\n\t\tbanzaiUtils.LogInfo(banzaiConstants.TagCreateDeployment, \"Create deployment succeeded\")\n\t}\n\n\treleaseName := release.Release.Name\n\treleaseNotes := release.Release.Info.Status.Notes\n\n\tbanzaiUtils.LogDebug(banzaiConstants.TagCreateDeployment, \"Release name:\", releaseName)\n\tbanzaiUtils.LogDebug(banzaiConstants.TagCreateDeployment, \"Release notes:\", releaseNotes)\n\n\t//Get ingress with deployment prefix TODO\n\t//Get local ingress address?\n\tendpoint, err := cloud.GetK8SEndpoint(cloudCluster, c)\n\tif err != nil {\n\t\tcloud.SetResponseBodyJson(c, http.StatusInternalServerError, gin.H{\n\t\t\tcloud.JsonKeyStatus: http.StatusInternalServerError,\n\t\t\tcloud.JsonKeyMessage: fmt.Sprintf(\"%s\", err),\n\t\t})\n\t\treturn\n\t}\n\n\tdeploymentUrl := fmt.Sprintf(\"http://%s:30080/zeppelin/\", endpoint)\n\tnotify.SlackNotify(fmt.Sprintf(\"Deployment Created: %s\", deploymentUrl))\n\tcloud.SetResponseBodyJson(c, http.StatusCreated, gin.H{\n\t\tcloud.JsonKeyStatus: http.StatusCreated,\n\t\tcloud.JsonKeyMessage: fmt.Sprintf(\"%s\", err),\n\t\tcloud.JsonKeyReleaseName: releaseName,\n\t\tcloud.JsonKeyUrl: deploymentUrl,\n\t\tcloud.JsonKeyNotes: releaseNotes,\n\t})\n\treturn\n}", "func CreateDeployment(c *gin.Context) {\n\tlog := logger.WithFields(logrus.Fields{\"tag\": constants.TagCreateDeployment})\n\tparsedRequest, err := parseCreateUpdateDeploymentRequest(c)\n\tif err != nil {\n\t\tlog.Error(err.Error())\n\t\tc.JSON(http.StatusBadRequest, htype.ErrorResponse{\n\t\t\tCode: http.StatusBadRequest,\n\t\t\tMessage: \"Error during parsing request!\",\n\t\t\tError: errors.Cause(err).Error(),\n\t\t})\n\t\treturn\n\t}\n\trelease, err := helm.CreateDeployment(parsedRequest.deploymentName,\n\t\tparsedRequest.deploymentReleaseName, parsedRequest.values, parsedRequest.kubeConfig,\n\t\tparsedRequest.clusterName)\n\tif err != nil {\n\t\t//TODO distinguish error codes\n\t\tlog.Errorf(\"Error during create deployment. %s\", err.Error())\n\t\tc.JSON(http.StatusBadRequest, htype.ErrorResponse{\n\t\t\tCode: http.StatusBadRequest,\n\t\t\tMessage: \"Error creating deployment\",\n\t\t\tError: err.Error(),\n\t\t})\n\t\treturn\n\t}\n\tlog.Info(\"Create deployment succeeded\")\n\n\treleaseName := release.Release.Name\n\treleaseNotes := release.Release.Info.Status.Notes\n\n\tlog.Debug(\"Release name: \", releaseName)\n\tlog.Debug(\"Release notes: \", releaseNotes)\n\tresponse := htype.CreateUpdateDeploymentResponse{\n\t\tReleaseName: releaseName,\n\t\tNotes: releaseNotes,\n\t}\n\tc.JSON(http.StatusCreated, response)\n\treturn\n}", "func newDeployment(name, ns string, replicas int32) *apps.Deployment {\n\treturn &apps.Deployment{\n\t\tTypeMeta: metav1.TypeMeta{\n\t\t\tKind: \"Deployment\",\n\t\t\tAPIVersion: \"apps/v1\",\n\t\t},\n\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\tNamespace: ns,\n\t\t\tName: name,\n\t\t},\n\t\tSpec: apps.DeploymentSpec{\n\t\t\tReplicas: &replicas,\n\t\t\tSelector: &metav1.LabelSelector{MatchLabels: testLabels()},\n\t\t\tStrategy: apps.DeploymentStrategy{\n\t\t\t\tType: apps.RollingUpdateDeploymentStrategyType,\n\t\t\t\tRollingUpdate: new(apps.RollingUpdateDeployment),\n\t\t\t},\n\t\t\tTemplate: v1.PodTemplateSpec{\n\t\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\t\tLabels: testLabels(),\n\t\t\t\t},\n\t\t\t\tSpec: v1.PodSpec{\n\t\t\t\t\tContainers: []v1.Container{\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tName: fakeContainerName,\n\t\t\t\t\t\t\tImage: fakeImage,\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n}", "func newHiveDeployment(cr *v1alpha1.Hive) *appsv1.Deployment {\n\tlabels := map[string]string{\n\t\t\"app\": \"hive-operator\",\n\t}\n\treplicas := cr.Spec.Size\n\t//deployment present in apps/v1 not corev1\n\t//need metav1 for including the TypeMeta, ObjectMeta\n\treturn &appsv1.Deployment{\n\t\tTypeMeta: metav1.TypeMeta{\n\t\t\tKind: \"Deployment\",\n\t\t\tAPIVersion: \"apps/v1\",\n\t\t},\n\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\tName: \"hive-deployment\",\n\t\t\tNamespace: cr.Namespace,\n\t\t\tOwnerReferences: []metav1.OwnerReference{\n\t\t\t\t*metav1.NewControllerRef(cr, schema.GroupVersionKind{\n\t\t\t\t\tGroup: v1alpha1.SchemeGroupVersion.Group,\n\t\t\t\t\tVersion: v1alpha1.SchemeGroupVersion.Version,\n\t\t\t\t\tKind: \"Hive\",\n\t\t\t\t}),\n\t\t\t},\n\t\t\tLabels: labels,\n\t\t},\n\t\tSpec: appsv1.DeploymentSpec{\n\t\t\tReplicas: &replicas,\n\t\t\tSelector: &metav1.LabelSelector{\n\t\t\t\tMatchLabels: labels,\n\t\t\t},\n\t\t\tTemplate: corev1.PodTemplateSpec{\n\t\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\t\tLabels: labels,\n\t\t\t\t},\n\t\t\t\tSpec: corev1.PodSpec{\n\t\t\t\t\tContainers: []corev1.Container{{\n\t\t\t\t\t\tImage: \"luksa/kubia:v2\",\n\t\t\t\t\t\tName: \"hive-operator\",\n\t\t\t\t\t}},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n}", "func toDeployment(s latest.ServiceConfig, objectMeta metav1.ObjectMeta, podTemplate apiv1.PodTemplateSpec, labelSelector map[string]string, original appsv1.Deployment) *appsv1.Deployment {\n\trevisionHistoryLimit := int32(3)\n\tdep := original.DeepCopy()\n\tdep.ObjectMeta = objectMeta\n\tdep.Spec.Replicas = toReplicas(s.Deploy.Replicas)\n\tdep.Spec.RevisionHistoryLimit = &revisionHistoryLimit\n\tdep.Spec.Template = forceRestartPolicy(podTemplate, apiv1.RestartPolicyAlways)\n\tdep.Spec.Strategy = toDeploymentStrategy(s, original.Spec.Strategy)\n\tdep.Spec.Selector = &metav1.LabelSelector{\n\t\tMatchLabels: labelSelector,\n\t}\n\treturn dep\n}", "func createDeployment(k *kabanerov1alpha1.Kabanero, clientset *kubernetes.Clientset, c client.Client, name string, image string, env []corev1.EnvVar, envFrom []corev1.EnvFromSource, livenessProbe *corev1.Probe, reqLogger logr.Logger) error {\n\tcl := clientset.AppsV1().Deployments(k.ObjectMeta.Namespace)\n\n\t// Check if the Deployment resource already exists.\n\tdInstance := &appsv1.Deployment{}\n\terr := c.Get(context.Background(), types.NamespacedName{\n\t\tName: name,\n\t\tNamespace: k.ObjectMeta.Namespace}, dInstance)\n\n\tdeploymentExists := true\n\tif err != nil {\n\t\tif apierrors.IsNotFound(err) == false {\n\t\t\treturn err\n\t\t}\n\n\t\t// The deployment does not already exist. Create one.\n\t\tdeploymentExists = false\n\n\t\t// Gather Kabanero operator ownerReference information.\n\t\townerRef, err := getOwnerReference(k, c, reqLogger)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\t// Initialize the deployment\n\t\tvar repCount int32 = 1\n\t\tdInstance = &appsv1.Deployment{\n\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\tName: name,\n\t\t\t\tOwnerReferences: []metav1.OwnerReference{\n\t\t\t\t\t{\n\t\t\t\t\t\tAPIVersion: ownerRef.APIVersion,\n\t\t\t\t\t\tKind: ownerRef.Kind,\n\t\t\t\t\t\tName: ownerRef.Name,\n\t\t\t\t\t\tUID: ownerRef.UID,\n\t\t\t\t\t\tController: ownerRef.Controller,\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\tSpec: appsv1.DeploymentSpec{\n\t\t\t\tReplicas: &repCount,\n\t\t\t\tSelector: &metav1.LabelSelector{\n\t\t\t\t\tMatchLabels: map[string]string{\n\t\t\t\t\t\t\"app\": name,\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tTemplate: corev1.PodTemplateSpec{\n\t\t\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\t\t\tLabels: map[string]string{\n\t\t\t\t\t\t\t\"app\": name,\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\tSpec: corev1.PodSpec{\n\t\t\t\t\t\tServiceAccountName: name,\n\t\t\t\t\t\tContainers: []corev1.Container{\n\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\tName: name,\n\t\t\t\t\t\t\t\tImagePullPolicy: \"Always\",\n\t\t\t\t\t\t\t\tPorts: []corev1.ContainerPort{\n\t\t\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t\t\tContainerPort: 9443,\n\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t}\n\t}\n\n\t// Here we update the things that can change. In the future we could\n\t// consider re-applying all the fields in case someone hand-edited the\n\t// deployment object in an incompatible way.\n\tdInstance.Spec.Template.Spec.Containers[0].Env = env\n\tdInstance.Spec.Template.Spec.Containers[0].EnvFrom = envFrom\n\tdInstance.Spec.Template.Spec.Containers[0].Image = image\n\tdInstance.Spec.Template.Spec.Containers[0].LivenessProbe = livenessProbe\n\n\tif deploymentExists == false {\n\t\treqLogger.Info(fmt.Sprintf(\"createDeployment: Deployment for create: %v\", dInstance))\n\n\t\t_, err = cl.Create(dInstance)\n\t} else {\n\t\treqLogger.Info(fmt.Sprintf(\"createDeployment: Deployment for update: %v\", dInstance))\n\n\t\t_, err = cl.Update(dInstance)\n\t}\n\n\treturn err\n}", "func newDeployment(t *testing.T, procUpdates func(ProcessUpdate), kubeClient kubernetes.Interface) *Deployment {\n\tcompList, err := config.NewComponentList(\"../test/data/componentlist.yaml\")\n\tassert.NoError(t, err)\n\tconfig := &config.Config{\n\t\tCancelTimeout: cancelTimeout,\n\t\tQuitTimeout: quitTimeout,\n\t\tBackoffInitialIntervalSeconds: 1,\n\t\tBackoffMaxElapsedTimeSeconds: 1,\n\t\tLog: logger.NewLogger(true),\n\t\tComponentList: compList,\n\t}\n\tcore := newCore(config, &overrides.Builder{}, kubeClient, procUpdates)\n\treturn &Deployment{core}\n}", "func Create(deployment *Deployment) (*Deployment, error) {\n\targs := []string{\n\t\t\"deployment-manager\",\n\t\t\"deployments\",\n\t\t\"create\",\n\t\tdeployment.config.Name,\n\t\t\"--config\",\n\t\tdeployment.configFile,\n\t\t\"--project\",\n\t\tdeployment.config.Project,\n\t}\n\t_, err := runGCloud(args...)\n\tif err != nil {\n\t\tlog.Printf(\"Failed to create deployment: %v, error: %v\", deployment, err)\n\t\treturn nil, err\n\t}\n\toutputs, err := GetOutputs(deployment.config.Name, deployment.config.Project)\n\tif err != nil {\n\t\tlog.Printf(\"Failed to get outputs for deployment: %v, error: %v\", deployment, err)\n\t\treturn nil, err\n\t}\n\tdeployment.Outputs = outputs\n\treturn deployment, nil\n}", "func createDeployment(cluster *client.VanClient, annotations map[string]string) (*v1.Deployment, error) {\n\tname := \"nginx\"\n\treplicas := int32(1)\n\tdep := &v1.Deployment{\n\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\tName: name,\n\t\t\tNamespace: cluster.Namespace,\n\t\t\tAnnotations: annotations,\n\t\t\tLabels: map[string]string{\n\t\t\t\t\"app\": name,\n\t\t\t},\n\t\t},\n\t\tSpec: v1.DeploymentSpec{\n\t\t\tReplicas: &replicas,\n\t\t\tSelector: &metav1.LabelSelector{\n\t\t\t\tMatchLabels: map[string]string{\n\t\t\t\t\t\"app\": name,\n\t\t\t\t},\n\t\t\t},\n\t\t\tTemplate: corev1.PodTemplateSpec{\n\t\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\t\tLabels: map[string]string{\n\t\t\t\t\t\t\"app\": name,\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tSpec: corev1.PodSpec{\n\t\t\t\t\tContainers: []corev1.Container{\n\t\t\t\t\t\t{Name: \"nginx\", Image: \"quay.io/skupper/nginx-unprivileged:stable-alpine\", Ports: []corev1.ContainerPort{{Name: \"web\", ContainerPort: 8080}}, ImagePullPolicy: corev1.PullIfNotPresent},\n\t\t\t\t\t},\n\t\t\t\t\tRestartPolicy: corev1.RestartPolicyAlways,\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n\n\t// Deploying resource\n\tdep, err := cluster.KubeClient.AppsV1().Deployments(cluster.Namespace).Create(context.TODO(), dep, metav1.CreateOptions{})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// Wait for deployment to be ready\n\tdep, err = kube.WaitDeploymentReadyReplicas(dep.Name, cluster.Namespace, 1, cluster.KubeClient, timeout, interval)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn dep, nil\n}", "func (d *Deployment) Create(deploymentCreate ReqDeploymentCreate) (deployment *entitys.RespDeploymentCreate, err error) {\n\tdeployment = &entitys.RespDeploymentCreate{}\n\tvar data []byte\n\tbody := bytes.NewBuffer(data)\n\tw := multipart.NewWriter(body)\n\n\tif err = w.WriteField(\"deployment-name\", deploymentCreate.DeploymentName); err != nil {\n\t\treturn nil, err\n\t}\n\n\tif deploymentCreate.EnableDuplicateFiltering != nil {\n\t\tif err = w.WriteField(\"enable-duplicate-filtering\", strconv.FormatBool(*deploymentCreate.EnableDuplicateFiltering)); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\tif deploymentCreate.DeployChangedOnly != nil {\n\t\tif err = w.WriteField(\"deploy-changed-only\", strconv.FormatBool(*deploymentCreate.DeployChangedOnly)); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\tif deploymentCreate.DeploymentSource != nil {\n\t\tif err = w.WriteField(\"deployment-source\", *deploymentCreate.DeploymentSource); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\tresource := deploymentCreate.Resources\n\tvar fw io.Writer\n\n\tdefer func(resource *os.File) {\n\t\terr := resource.Close()\n\t\tif err != nil {\n\n\t\t}\n\t}(resource)\n\tif fw, err = w.CreateFormFile(\"data\", resource.Name()); err != nil {\n\t\treturn nil, err\n\t}\n\tif _, err = io.Copy(fw, resource); err != nil {\n\t\treturn nil, err\n\t}\n\tif err := w.Close(); err != nil {\n\t\treturn nil, err\n\t}\n\tres, err := d.client.do(http.MethodPost, \"/deployment/create\", map[string]string{}, body, w.FormDataContentType())\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\terr = d.client.readJsonResponse(res, deployment)\n\n\treturn deployment, err\n}", "func newDeploymentForCR(cr *tesseractv1alpha1.OutgoingPortal) *appsv1.Deployment {\n\tvar replicas int32 = 1\n\n\treturn &appsv1.Deployment{\n\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\tName: cr.Name + \"-portal\",\n\t\t\tNamespace: cr.Namespace,\n\t\t\tLabels: map[string]string{\n\t\t\t\t\"app\": cr.Name + \"-portal\",\n\t\t\t},\n\t\t},\n\t\tSpec: appsv1.DeploymentSpec{\n\t\t\tReplicas: &replicas,\n\t\t\tSelector: &metav1.LabelSelector{\n\t\t\t\tMatchLabels: map[string]string{\n\t\t\t\t\t\"app\": cr.Name + \"-portal\",\n\t\t\t\t},\n\t\t\t},\n\t\t\tTemplate: corev1.PodTemplateSpec{\n\t\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\t\tLabels: map[string]string{\n\t\t\t\t\t\t\"app\": cr.Name + \"-portal\",\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tSpec: corev1.PodSpec{\n\t\t\t\t\tContainers: []corev1.Container{\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tName: \"busybox\",\n\t\t\t\t\t\t\tImage: \"envoyproxy/envoy:v1.10.0\",\n\t\t\t\t\t\t\tPorts: []corev1.ContainerPort{\n\t\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t\tName: \"proxy\",\n\t\t\t\t\t\t\t\t\tContainerPort: 80,\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t\tName: \"admin\",\n\t\t\t\t\t\t\t\t\tContainerPort: 8001,\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\tCommand: []string{\n\t\t\t\t\t\t\t\t\"/usr/local/bin/envoy\",\n\t\t\t\t\t\t\t\t\"-c\",\n\t\t\t\t\t\t\t\t\"/config/envoy.yaml\",\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\tVolumeMounts: []corev1.VolumeMount{\n\t\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t\tName: \"config\",\n\t\t\t\t\t\t\t\t\tMountPath: \"/config\",\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t\tName: \"secret\",\n\t\t\t\t\t\t\t\t\tMountPath: \"/secret\",\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\tVolumes: []corev1.Volume{\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tName: \"config\",\n\t\t\t\t\t\t\tVolumeSource: corev1.VolumeSource{\n\t\t\t\t\t\t\t\tConfigMap: &corev1.ConfigMapVolumeSource{\n\t\t\t\t\t\t\t\t\tLocalObjectReference: corev1.LocalObjectReference{\n\t\t\t\t\t\t\t\t\t\tName: cr.Name + \"-portal\",\n\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tName: \"secret\",\n\t\t\t\t\t\t\tVolumeSource: corev1.VolumeSource{\n\t\t\t\t\t\t\t\tSecret: &corev1.SecretVolumeSource{\n\t\t\t\t\t\t\t\t\tSecretName: cr.Name + \"-portal\",\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n}", "func deploymentCommand(_ *cobra.Command, _ []string) error {\n\tnodePort := varIntNodePort\n\thome := varStringHome\n\tremote := varStringRemote\n\tbranch := varStringBranch\n\tif len(remote) > 0 {\n\t\trepo, _ := util.CloneIntoGitHome(remote, branch)\n\t\tif len(repo) > 0 {\n\t\t\thome = repo\n\t\t}\n\t}\n\n\tif len(home) > 0 {\n\t\tpathx.RegisterGoctlHome(home)\n\t}\n\n\t// 0 to disable the nodePort type\n\tif nodePort != 0 && (nodePort < basePort || nodePort > portLimit) {\n\t\treturn errors.New(\"nodePort should be between 30000 and 32767\")\n\t}\n\n\ttext, err := pathx.LoadTemplate(category, deployTemplateFile, deploymentTemplate)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tout, err := pathx.CreateIfNotExist(varStringO)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer out.Close()\n\n\tif varIntTargetPort == 0 {\n\t\tvarIntTargetPort = varIntPort\n\t}\n\n\tt := template.Must(template.New(\"deploymentTemplate\").Parse(text))\n\terr = t.Execute(out, Deployment{\n\t\tName: varStringName,\n\t\tNamespace: varStringNamespace,\n\t\tImage: varStringImage,\n\t\tSecret: varStringSecret,\n\t\tReplicas: varIntReplicas,\n\t\tRevisions: varIntRevisions,\n\t\tPort: varIntPort,\n\t\tTargetPort: varIntTargetPort,\n\t\tNodePort: nodePort,\n\t\tUseNodePort: nodePort > 0,\n\t\tRequestCpu: varIntRequestCpu,\n\t\tRequestMem: varIntRequestMem,\n\t\tLimitCpu: varIntLimitCpu,\n\t\tLimitMem: varIntLimitMem,\n\t\tMinReplicas: varIntMinReplicas,\n\t\tMaxReplicas: varIntMaxReplicas,\n\t\tServiceAccount: varStringServiceAccount,\n\t\tImagePullPolicy: varStringImagePullPolicy,\n\t})\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tfmt.Println(color.Green.Render(\"Done.\"))\n\treturn nil\n}", "func generatorDeployment(c *configuration.Config, a *api.Apicurito) (dep client.Object) {\n\t// Define a new deployment\n\tname := DefineGeneratorName(a)\n\tdeployLabels := map[string]string{\n\t\t\"app\": \"apicurito\",\n\t\t\"component\": name,\n\t\t\"com.company\": \"Red_Hat\",\n\t\t\"rht.prod_name\": \"Red_Hat_Integration\",\n\t\t\"rht.prod_ver\": version.ShortVersion(),\n\t\t\"rht.comp\": \"Fuse\",\n\t\t\"rht.comp_ver\": version.ShortVersion(),\n\t\t\"rht.subcomp\": name,\n\t\t\"rht.subcomp_t\": \"infrastructure\",\n\t}\n\tdep = &appsv1.Deployment{\n\t\tTypeMeta: metav1.TypeMeta{\n\t\t\tAPIVersion: \"apps/v1\",\n\t\t\tKind: \"Deployment\",\n\t\t},\n\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\tName: name,\n\t\t\tNamespace: a.Namespace,\n\t\t\tOwnerReferences: []metav1.OwnerReference{\n\t\t\t\t*metav1.NewControllerRef(a, schema.GroupVersionKind{\n\t\t\t\t\tGroup: api.SchemeGroupVersion.Group,\n\t\t\t\t\tVersion: api.SchemeGroupVersion.Version,\n\t\t\t\t\tKind: a.Kind,\n\t\t\t\t}),\n\t\t\t},\n\t\t},\n\n\t\tSpec: appsv1.DeploymentSpec{\n\t\t\tReplicas: &a.Spec.Size,\n\t\t\tSelector: &metav1.LabelSelector{\n\t\t\t\tMatchLabels: labelComponent(name),\n\t\t\t},\n\t\t\tStrategy: appsv1.DeploymentStrategy{\n\t\t\t\tType: appsv1.RollingUpdateDeploymentStrategyType,\n\t\t\t},\n\t\t\tTemplate: corev1.PodTemplateSpec{\n\t\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\t\tLabels: deployLabels,\n\t\t\t\t},\n\t\t\t\tSpec: corev1.PodSpec{\n\t\t\t\t\tContainers: []corev1.Container{{\n\t\t\t\t\t\tImage: c.GeneratorImage,\n\t\t\t\t\t\tImagePullPolicy: corev1.PullIfNotPresent,\n\t\t\t\t\t\tName: name,\n\t\t\t\t\t\tPorts: []corev1.ContainerPort{\n\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\tContainerPort: 8080,\n\t\t\t\t\t\t\t\tName: \"http\",\n\t\t\t\t\t\t\t\tProtocol: corev1.ProtocolTCP,\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\tContainerPort: 8181,\n\t\t\t\t\t\t\t\tName: \"health\",\n\t\t\t\t\t\t\t\tProtocol: corev1.ProtocolTCP,\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\tContainerPort: 9779,\n\t\t\t\t\t\t\t\tName: \"prometheus\",\n\t\t\t\t\t\t\t\tProtocol: corev1.ProtocolTCP,\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\tContainerPort: 8778,\n\t\t\t\t\t\t\t\tName: \"jolokia\",\n\t\t\t\t\t\t\t\tProtocol: corev1.ProtocolTCP,\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tLivenessProbe: &corev1.Probe{\n\t\t\t\t\t\t\tFailureThreshold: 3,\n\t\t\t\t\t\t\tInitialDelaySeconds: 180,\n\t\t\t\t\t\t\tPeriodSeconds: 10,\n\t\t\t\t\t\t\tSuccessThreshold: 1,\n\t\t\t\t\t\t\tTimeoutSeconds: 1,\n\t\t\t\t\t\t\tHandler: corev1.Handler{\n\t\t\t\t\t\t\t\tTCPSocket: &corev1.TCPSocketAction{\n\t\t\t\t\t\t\t\t\tPort: intstr.FromString(\"http\"),\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tReadinessProbe: &corev1.Probe{\n\t\t\t\t\t\t\tFailureThreshold: 3,\n\t\t\t\t\t\t\tInitialDelaySeconds: 10,\n\t\t\t\t\t\t\tPeriodSeconds: 10,\n\t\t\t\t\t\t\tSuccessThreshold: 1,\n\t\t\t\t\t\t\tTimeoutSeconds: 1,\n\t\t\t\t\t\t\tHandler: corev1.Handler{\n\t\t\t\t\t\t\t\tTCPSocket: &corev1.TCPSocketAction{\n\t\t\t\t\t\t\t\t\tPort: intstr.FromString(\"http\"),\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t}},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n\n\treturn\n}", "func NewDeployment(ctx *plugin.Context, target *Target, prev *Snapshot, plan *Plan, source Source,\n\tlocalPolicyPackPaths []string, preview bool, backendClient BackendClient,\n) (*Deployment, error) {\n\tcontract.Requiref(ctx != nil, \"ctx\", \"must not be nil\")\n\tcontract.Requiref(target != nil, \"target\", \"must not be nil\")\n\tcontract.Requiref(source != nil, \"source\", \"must not be nil\")\n\n\tif err := migrateProviders(target, prev, source); err != nil {\n\t\treturn nil, err\n\t}\n\n\t// Produce a map of all old resources for fast access.\n\t//\n\t// NOTE: we can and do mutate prev.Resources, olds, and depGraph during execution after performing a refresh. See\n\t// deploymentExecutor.refresh for details.\n\toldResources, olds, err := buildResourceMap(prev, preview)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// Build the dependency graph for the old resources.\n\tdepGraph := graph.NewDependencyGraph(oldResources)\n\n\t// Create a goal map for the deployment.\n\tnewGoals := &goalMap{}\n\n\t// Create a resource map for the deployment.\n\tnewResources := &resourceMap{}\n\n\t// Create a new builtin provider. This provider implements features such as `getStack`.\n\tbuiltins := newBuiltinProvider(backendClient, newResources)\n\n\t// Create a new provider registry. Although we really only need to pass in any providers that were present in the\n\t// old resource list, the registry itself will filter out other sorts of resources when processing the prior state,\n\t// so we just pass all of the old resources.\n\treg := providers.NewRegistry(ctx.Host, preview, builtins)\n\n\treturn &Deployment{\n\t\tctx: ctx,\n\t\ttarget: target,\n\t\tprev: prev,\n\t\tplan: plan,\n\t\tolds: olds,\n\t\tsource: source,\n\t\tlocalPolicyPackPaths: localPolicyPackPaths,\n\t\tpreview: preview,\n\t\tdepGraph: depGraph,\n\t\tproviders: reg,\n\t\tgoals: newGoals,\n\t\tnews: newResources,\n\t\tnewPlans: newResourcePlan(target.Config),\n\t}, nil\n}", "func showDeployment(c *cli.Context, w io.Writer) error {\n\tid, err := getDeploymentId(c)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tclient.Photonclient, err = client.GetClient(c)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tdeployment, err := client.Photonclient.Deployments.Get(id)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tvms, err := client.Photonclient.Deployments.GetVms(id)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tvar data []VM_NetworkIPs\n\n\tfor _, vm := range vms.Items {\n\t\tnetworks, err := getVMNetworks(vm.ID, c)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tipAddr := \"N/A\"\n\t\tfor _, nt := range networks {\n\t\t\tnetwork := nt.(map[string]interface{})\n\t\t\tif len(network) != 0 && network[\"network\"] != nil {\n\t\t\t\tif val, ok := network[\"ipAddress\"]; ok && val != nil {\n\t\t\t\t\tipAddr = val.(string)\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t}\n\n\t\t}\n\t\tdata = append(data, VM_NetworkIPs{vm, ipAddr})\n\t}\n\tif utils.NeedsFormatting(c) {\n\t\tutils.FormatObject(deployment, w, c)\n\t} else if c.GlobalIsSet(\"non-interactive\") {\n\t\timageDataStores := getCommaSeparatedStringFromStringArray(deployment.ImageDatastores)\n\t\tsecurityGroups := getCommaSeparatedStringFromStringArray(deployment.Auth.SecurityGroups)\n\n\t\tfmt.Printf(\"%s\\t%s\\t%s\\t%t\\t%s\\t%s\\t%t\\t%s\\n\", deployment.ID, deployment.State,\n\t\t\timageDataStores, deployment.UseImageDatastoreForVms, deployment.SyslogEndpoint,\n\t\t\tdeployment.NTPEndpoint, deployment.LoadBalancerEnabled,\n\t\t\tdeployment.LoadBalancerAddress)\n\n\t\tfmt.Printf(\"%t\\t%s\\t%s\\t%d\\t%s\\n\", deployment.Auth.Enabled, deployment.Auth.Endpoint,\n\t\t\tdeployment.Auth.Tenant, deployment.Auth.Port, securityGroups)\n\n\t} else {\n\t\tsyslogEndpoint := deployment.SyslogEndpoint\n\t\tif len(deployment.SyslogEndpoint) == 0 {\n\t\t\tsyslogEndpoint = \"-\"\n\t\t}\n\t\tntpEndpoint := deployment.NTPEndpoint\n\t\tif len(deployment.NTPEndpoint) == 0 {\n\t\t\tntpEndpoint = \"-\"\n\t\t}\n\n\t\tfmt.Printf(\"\\n\")\n\t\tfmt.Printf(\"Deployment ID: %s\\n\", deployment.ID)\n\t\tfmt.Printf(\" State: %s\\n\", deployment.State)\n\t\tfmt.Printf(\"\\n Image Datastores: %s\\n\", deployment.ImageDatastores)\n\t\tfmt.Printf(\" Use image datastore for vms: %t\\n\", deployment.UseImageDatastoreForVms)\n\t\tfmt.Printf(\"\\n Syslog Endpoint: %s\\n\", syslogEndpoint)\n\t\tfmt.Printf(\" Ntp Endpoint: %s\\n\", ntpEndpoint)\n\t\tfmt.Printf(\"\\n LoadBalancer:\\n\")\n\t\tfmt.Printf(\" Enabled: %t\\n\", deployment.LoadBalancerEnabled)\n\t\tif deployment.LoadBalancerEnabled {\n\t\t\tfmt.Printf(\" Address: %s\\n\", deployment.LoadBalancerAddress)\n\t\t}\n\n\t\tfmt.Printf(\"\\n Auth:\\n\")\n\t\tfmt.Printf(\" Enabled: %t\\n\", deployment.Auth.Enabled)\n\t\tif deployment.Auth.Enabled {\n\t\t\tfmt.Printf(\" Endpoint: %s\\n\", deployment.Auth.Endpoint)\n\t\t\tfmt.Printf(\" Tenant: %s\\n\", deployment.Auth.Tenant)\n\t\t\tfmt.Printf(\" Port: %d\\n\", deployment.Auth.Port)\n\t\t\tfmt.Printf(\" Securitygroups: %v\\n\", deployment.Auth.SecurityGroups)\n\t\t}\n\t}\n\n\tif deployment.Stats != nil {\n\t\tstats := deployment.Stats\n\t\tif c.GlobalIsSet(\"non-interactive\") {\n\t\t\tfmt.Printf(\"%t\\t%s\\t%d\\n\", stats.Enabled, stats.StoreEndpoint, stats.StorePort)\n\t\t} else if !utils.NeedsFormatting(c) {\n\n\t\t\tfmt.Printf(\"\\n Stats:\\n\")\n\t\t\tfmt.Printf(\" Enabled: %t\\n\", stats.Enabled)\n\t\t\tif stats.Enabled {\n\t\t\t\tfmt.Printf(\" Store Endpoint: %s\\n\", stats.StoreEndpoint)\n\t\t\t\tfmt.Printf(\" Store Port: %d\\n\", stats.StorePort)\n\t\t\t}\n\t\t}\n\t} else {\n\t\tif c.GlobalIsSet(\"non-interactive\") {\n\t\t\tfmt.Printf(\"\\n\")\n\t\t}\n\t}\n\n\tif deployment.Migration != nil {\n\t\tmigration := deployment.Migration\n\t\tif c.GlobalIsSet(\"non-interactive\") {\n\t\t\tfmt.Printf(\"%d\\t%d\\t%d\\t%d\\t%d\\n\", migration.CompletedDataMigrationCycles, migration.DataMigrationCycleProgress,\n\t\t\t\tmigration.DataMigrationCycleSize, migration.VibsUploaded, migration.VibsUploading+migration.VibsUploaded)\n\t\t} else if !utils.NeedsFormatting(c) {\n\t\t\tfmt.Printf(\"\\n Migration status:\\n\")\n\t\t\tfmt.Printf(\" Completed data migration cycles: %d\\n\", migration.CompletedDataMigrationCycles)\n\t\t\tfmt.Printf(\" Current data migration cycles progress: %d / %d\\n\", migration.DataMigrationCycleProgress,\n\t\t\t\tmigration.DataMigrationCycleSize)\n\t\t\tfmt.Printf(\" VIB upload progress: %d / %d\\n\", migration.VibsUploaded, migration.VibsUploading+migration.VibsUploaded)\n\t\t}\n\t} else {\n\t\tif c.GlobalIsSet(\"non-interactive\") {\n\t\t\tfmt.Printf(\"\\n\")\n\t\t}\n\t}\n\n\tif deployment.ClusterConfigurations != nil && len(deployment.ClusterConfigurations) != 0 {\n\t\tif c.GlobalIsSet(\"non-interactive\") {\n\t\t\tclusterConfigurations := []string{}\n\t\t\tfor _, c := range deployment.ClusterConfigurations {\n\t\t\t\tclusterConfigurations = append(clusterConfigurations, fmt.Sprintf(\"%s\\t%s\", c.Type, c.ImageID))\n\t\t\t}\n\t\t\tscriptClusterConfigurations := strings.Join(clusterConfigurations, \",\")\n\t\t\tfmt.Printf(\"%s\\n\", scriptClusterConfigurations)\n\t\t} else if !utils.NeedsFormatting(c) {\n\t\t\tfmt.Println(\"\\n Cluster Configurations:\")\n\t\t\tfor i, c := range deployment.ClusterConfigurations {\n\t\t\t\tfmt.Printf(\" ClusterConfiguration %d:\\n\", i+1)\n\t\t\t\tfmt.Println(\" Kind: \", c.Kind)\n\t\t\t\tfmt.Println(\" Type: \", c.Type)\n\t\t\t\tfmt.Println(\" ImageID: \", c.ImageID)\n\t\t\t}\n\t\t}\n\t} else {\n\t\tif c.GlobalIsSet(\"non-interactive\") {\n\t\t\tfmt.Printf(\"\\n\")\n\t\t} else if !utils.NeedsFormatting(c) {\n\t\t\tfmt.Println(\"\\n Cluster Configurations:\")\n\t\t\tfmt.Printf(\" No cluster is supported\")\n\t\t}\n\t}\n\n\tif !utils.NeedsFormatting(c) {\n\t\terr = displayDeploymentSummary(data, c.GlobalIsSet(\"non-interactive\"))\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}", "func CreateDeployment(kubeconfig, serverImage, gatewayImage, name string) error {\n\n\tclientset, _, err := GetKubeClient(kubeconfig)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"cannot get clientset: %v\", err)\n\t}\n\n\tdeploymentsClient := clientset.AppsV1beta1().Deployments(v1.NamespaceDefault)\n\n\tdeployment := &appsv1beta1.Deployment{\n\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\tName: name,\n\t\t},\n\t\tSpec: appsv1beta1.DeploymentSpec{\n\t\t\tReplicas: int32Ptr(1),\n\t\t\tTemplate: v1.PodTemplateSpec{\n\t\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\t\tLabels: map[string]string{\n\t\t\t\t\t\t\"app\": name,\n\t\t\t\t\t\t\"name\": name,\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tSpec: v1.PodSpec{\n\t\t\t\t\tContainers: []v1.Container{\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tName: name,\n\t\t\t\t\t\t\tImage: serverImage,\n\t\t\t\t\t\t},\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tName: fmt.Sprintf(\"%s-dashboard\", name),\n\t\t\t\t\t\t\tImage: gatewayImage,\n\t\t\t\t\t\t\tPorts: []v1.ContainerPort{\n\t\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t\tName: \"http\",\n\t\t\t\t\t\t\t\t\tProtocol: v1.ProtocolTCP,\n\t\t\t\t\t\t\t\t\tContainerPort: 8080,\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n\n\t_, err = deploymentsClient.Create(deployment)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"cannot create deployment: %v\", err)\n\t}\n\n\treturn nil\n}", "func (srv *Service) CreateDeployment(id string, role string, deployName string, replicas *int32, image string, containerPort int32) (interface{}, error) {\n\t//check the number os deployments by the user\n\tdeployments, err := srv.mongoRepository.GetDeploymentsFromUser(id)\n\n\tif err != nil {\n\t\t//return the error sent by the repository\n\t\treturn nil, err\n\t}\n\n\t//check if user has already the max number os deployments\n\tif role != \"admin\" {\n\t\tif len(deployments) > 3 {\n\t\t\t//return a custom error\n\t\t\treturn nil, &pkg.Error{Err: err, Code: http.StatusBadRequest, Message: \"Max number 3 deployments permited per user\"}\n\t\t}\n\t}\n\n\tnamespaceUUID := \"namespace-\" + deployName //generate name for namespace\n\n\t//call driven adapter responsible for creating namespaces inside the kubernetes cluster\n\t_, err = srv.kubernetesRepository.CreateNamespace(namespaceUUID)\n\tif err != nil {\n\t\t//return error from the kubernetes repository method\n\t\treturn nil, err\n\t}\n\n\tdeploymentUUID := \"deployment-\" + deployName //generate name for the deployment\n\n\t//call driven adapter responsible for creating deployments inside the kubernetes cluster\n\t_, err = srv.kubernetesRepository.CreateDeployment(namespaceUUID, deploymentUUID, replicas, image, containerPort)\n\tif err != nil {\n\t\t//creation of the deployment went wrong, delete everything inside it's namespace\n\t\t//call driven adapter responsible for deleting namespaces inside the kubernetes cluster\n\t\t_, _ = srv.kubernetesRepository.DeleteNamespace(namespaceUUID)\n\n\t\t//return error from the kubernetes repository method\n\t\treturn nil, err\n\t}\n\n\tserviceUUID := \"service-\" + deployName //generate name for the service\n\t//create service to expose the deployment\n\t_, err = srv.kubernetesRepository.CreateClusterIPService(namespaceUUID, serviceUUID, containerPort)\n\tif err != nil {\n\t\t//creation of the service went wrong, delete everything inside it's namespace\n\t\t//call driven adapter responsible for deleting namespaces inside the kubernetes cluster\n\t\t_, deperr := srv.kubernetesRepository.DeleteNamespace(namespaceUUID)\n\t\tif deperr != nil {\n\t\t\treturn nil, deperr\n\t\t}\n\t\t//return error from the kubernetes repository method\n\t\treturn nil, err\n\t}\n\n\tingressUUID := \"ingress-\" + deployName //generate name for the service\n\t//create ingress to expose the service\n\t_, err = srv.kubernetesRepository.CreateIngress(namespaceUUID, ingressUUID, deployName)\n\tif err != nil {\n\t\t//creation of the ingress went wrong, delete everything inside it's namespace\n\t\t//call driven adapter responsible for deleting namespaces inside the kubernetes cluster\n\t\t_, deperr := srv.kubernetesRepository.DeleteNamespace(namespaceUUID)\n\t\tif deperr != nil {\n\t\t\treturn nil, deperr\n\t\t}\n\t\t//return error from the kubernetes repository method\n\t\treturn nil, err\n\t}\n\n\tsrv.mongoRepository.InsertDeployment(deployName, id, image)\n\tif err != nil {\n\t\t//delete namespace\n\t\t_, deperr := srv.kubernetesRepository.DeleteNamespace(namespaceUUID)\n\t\tif deperr != nil {\n\t\t\treturn nil, deperr\n\t\t}\n\t\t//return error from the mongo repository method\n\t\treturn nil, err\n\t}\n\n\t//return app uuid\n\treturn deployName, nil\n}", "func createDeployment(cwg ClientWg, name string) {\n\tgo func() {\n\t\tdeploymentsClient := cwg.clientset.AppsV1().Deployments(metav1.NamespaceDefault)\n\t\tfound := checkNodeAlreadyPresent(deploymentsClient, name)\n\t\tif found {\n\t\t\tdefer cwg.wg.Done()\n\t\t\treturn\t\t\t\n\t\t}\n\t\tdeployment := &appv1.Deployment{\n\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\tName: name,\n\t\t\t},\n\t\t\tSpec: appv1.DeploymentSpec{\n\t\t\t\tReplicas: int32Ptr(1),\n\t\t\t\tSelector: &metav1.LabelSelector{\n\t\t\t\t\tMatchLabels: map[string]string{\n\t\t\t\t\t\t\"app\": \"demo\",\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tTemplate: corev1.PodTemplateSpec{\n\t\t\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\t\t\tLabels: map[string]string{\n\t\t\t\t\t\t\t\"app\": \"demo\",\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\tSpec: corev1.PodSpec{\n\t\t\t\t\t\tContainers: []corev1.Container{\n\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\tName: \"web\",\n\t\t\t\t\t\t\t\tImage: \"nginx:1.12\",\n\t\t\t\t\t\t\t\tPorts: []corev1.ContainerPort{\n\t\t\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t\t\tName: \"http\",\n\t\t\t\t\t\t\t\t\t\tProtocol: corev1.ProtocolTCP,\n\t\t\t\t\t\t\t\t\t\tContainerPort: 80,\n\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t}\n\n\t\t// Create Deployment\n\t\tfmt.Println(\"Creating deployment...\")\n\t\tresult, err := deploymentsClient.Create(deployment)\n\t\tif err != nil {\n\t\t\tpanic(err)\n\t\t}\n\t\tfmt.Printf(\"Created deployment %q.\\n\", result.GetObjectMeta().GetName())\n\t\tdefer cwg.wg.Done()\n\t}()\n}", "func (bc *ReconcileJenkinsInstance) newDeployment(instanceName types.NamespacedName) (*appsv1.Deployment, error) {\n\texists := false\n\n\tjenkinsInstance, err := bc.getJenkinsInstance(instanceName)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// Get the deployment with the name specified in JenkinsInstance.spec\n\tdeployment, err := bc.getDeployment(instanceName)\n\n\t// If the resource doesn't exist, we'll create it\n\tif err != nil {\n\t\tif !errors.IsNotFound(err) {\n\t\t\treturn nil, err\n\t\t}\n\t} else {\n\t\t// If the Deployment is not controlled by this JenkinsInstance resource, we should log\n\t\t// a warning to the event recorder and return\n\t\tif !metav1.IsControlledBy(deployment, jenkinsInstance) {\n\t\t\tmsg := fmt.Sprintf(MessageResourceExists, deployment.GetName())\n\t\t\tbc.Event(jenkinsInstance, corev1.EventTypeWarning, ErrResourceExists, msg)\n\t\t\treturn deployment, fmt.Errorf(msg)\n\t\t}\n\n\t\texists = true\n\t}\n\n\tlabels := map[string]string{\n\t\t\"app\": \"jenkinsci\",\n\t\t\"controller\": jenkinsInstance.GetName(),\n\t\t\"component\": string(jenkinsInstance.UID),\n\t}\n\n\t// get binary data for variables and groovy config\n\tjenkinsJvmEnv, err := configdata.Asset(\"environment/jenkins-jvm-environment\")\n\tif err != nil {\n\t\tglog.Errorf(\"Error locating binary asset: %s\", err)\n\t\treturn nil, err\n\t}\n\n\t// Create environment variables\n\t// variables out of jenkins-jvm-environment\n\tvar env []corev1.EnvVar\n\tscanner := bufio.NewScanner(strings.NewReader(string(jenkinsJvmEnv[:])))\n\tfor scanner.Scan() {\n\n\t\tenvComponents := strings.Split(scanner.Text(), \":\")\n\t\tenv = append(env, corev1.EnvVar{\n\t\t\tName: envComponents[0],\n\t\t\tValue: envComponents[1],\n\t\t})\n\t}\n\n\t// user-specified environment variables\n\tfor envVar, envVarVal := range jenkinsInstance.Spec.Env {\n\t\tenv = append(env, corev1.EnvVar{\n\t\t\tName: envVar,\n\t\t\tValue: envVarVal,\n\t\t})\n\t}\n\n\t// build a command string to install plugins and launch jenkins\n\tcommandString := \"\"\n\tcommandString += \"/usr/local/bin/install-plugins.sh $(cat /var/jenkins_home/init.groovy.d/plugins.txt | tr '\\\\n' ' ') && \"\n\tcommandString += \"/sbin/tini -- /usr/local/bin/jenkins.sh\"\n\tcommandString += \"\"\n\n\t// if service account name is specified, check that it exists\n\tif jenkinsInstance.Spec.ServiceAccount != \"\" {\n\t\tserviceAccount := &corev1.ServiceAccount{}\n\t\terr := bc.Client.Get(\n\t\t\tcontext.TODO(),\n\t\t\ttypes.NamespacedName{\n\t\t\t\tNamespace: jenkinsInstance.GetNamespace(),\n\t\t\t\tName: jenkinsInstance.Spec.ServiceAccount},\n\t\t\tserviceAccount)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\t// Get the correct volume source to use\n\t// if pvc name is specified, try to either locate it or create it\n\tpvcName := jenkinsInstance.GetName()\n\tif jenkinsInstance.Spec.Storage != nil {\n\t\tif jenkinsInstance.Spec.Storage.JobsPvc != \"\" {\n\t\t\tpvcName = jenkinsInstance.Spec.Storage.JobsPvc\n\t\t}\n\t\tpvc := &corev1.PersistentVolumeClaim{}\n\t\terr = bc.Client.Get(\n\t\t\tcontext.TODO(),\n\t\t\ttypes.NamespacedName{\n\t\t\t\tNamespace: jenkinsInstance.GetNamespace(),\n\t\t\t\tName: pvcName},\n\t\t\tpvc)\n\n\t\t// if PVC is not found\n\t\tif errors.IsNotFound(err) {\n\t\t\t// error out if pvc spec is not specified\n\t\t\tif jenkinsInstance.Spec.Storage.JobsPvcSpec == nil {\n\t\t\t\treturn nil, fmt.Errorf(\n\t\t\t\t\t\"PVC %s does not exist and JobsPvcSpec is not specified\",\n\t\t\t\t\tpvcName)\n\t\t\t}\n\n\t\t\t// otherwise create the pvc\n\t\t\tpvc = &corev1.PersistentVolumeClaim{\n\t\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\t\tName: pvcName,\n\t\t\t\t\tNamespace: jenkinsInstance.GetNamespace(),\n\t\t\t\t},\n\t\t\t\tSpec: *jenkinsInstance.Spec.Storage.JobsPvcSpec,\n\t\t\t}\n\t\t\terr = controllerutil.SetControllerReference(jenkinsInstance, pvc, bc.scheme)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\terr = bc.Client.Create(context.TODO(), pvc)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t}\n\t}\n\n\t// if PVC name is not specified, use an EmptyDir\n\tvar volumeSource corev1.VolumeSource\n\tif jenkinsInstance.Spec.Storage == nil {\n\t\tvolumeSource = corev1.VolumeSource{\n\t\t\tEmptyDir: &corev1.EmptyDirVolumeSource{},\n\t\t}\n\t} else {\n\t\tvolumeSource = corev1.VolumeSource{\n\t\t\tPersistentVolumeClaim: &corev1.PersistentVolumeClaimVolumeSource{\n\t\t\t\tClaimName: pvcName,\n\t\t\t\tReadOnly: false,\n\t\t\t},\n\t\t}\n\t}\n\n\tvar replicas int32 = JenkinsReplicas\n\tvar runAsUser int64 = 0\n\n\tif exists {\n\t\tdeploymentCopy := deployment.DeepCopy()\n\t\tdeploymentCopy.Annotations = jenkinsInstance.Spec.Annotations\n\t\tdeploymentCopy.Spec.Replicas = &replicas\n\t\tdeploymentCopy.Spec.Selector = &metav1.LabelSelector{\n\t\t\tMatchLabels: labels,\n\t\t}\n\t\tdeploymentCopy.Spec.Template.Spec.Containers = []corev1.Container{\n\t\t\t{\n\t\t\t\tName: \"jenkinsci\",\n\t\t\t\tImage: jenkinsInstance.Spec.Image,\n\t\t\t\tPorts: []corev1.ContainerPort{\n\t\t\t\t\t{\n\t\t\t\t\t\tName: \"master\",\n\t\t\t\t\t\tContainerPort: JenkinsMasterPort,\n\t\t\t\t\t\tHostPort: JenkinsMasterPort,\n\t\t\t\t\t\tProtocol: \"TCP\",\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tName: \"agent\",\n\t\t\t\t\t\tContainerPort: JenkinsAgentPort,\n\t\t\t\t\t\tHostPort: JenkinsAgentPort,\n\t\t\t\t\t\tProtocol: \"TCP\",\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tEnv: env,\n\t\t\t\tCommand: []string{\n\t\t\t\t\t\"bash\",\n\t\t\t\t\t\"-c\",\n\t\t\t\t\tcommandString,\n\t\t\t\t},\n\t\t\t\tImagePullPolicy: JenkinsPullPolicy,\n\t\t\t\tVolumeMounts: []corev1.VolumeMount{\n\t\t\t\t\t{\n\t\t\t\t\t\tName: \"init-groovy-d\",\n\t\t\t\t\t\tReadOnly: false,\n\t\t\t\t\t\tMountPath: \"/var/jenkins_home/init.groovy.d\",\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tName: \"job-storage\",\n\t\t\t\t\t\tReadOnly: false,\n\t\t\t\t\t\tMountPath: \"/var/jenkins_home/jobs\",\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t}\n\t\tdeploymentCopy.Spec.Template.Spec.Volumes = []corev1.Volume{\n\t\t\t{\n\t\t\t\tName: \"init-groovy-d\",\n\t\t\t\tVolumeSource: corev1.VolumeSource{\n\t\t\t\t\tSecret: &corev1.SecretVolumeSource{\n\t\t\t\t\t\tSecretName: jenkinsInstance.GetName(),\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\t{\n\t\t\t\tName: \"job-storage\",\n\t\t\t\tVolumeSource: volumeSource,\n\t\t\t},\n\t\t}\n\t\tdeploymentCopy.Spec.Template.Spec.ServiceAccountName = jenkinsInstance.Spec.ServiceAccount\n\n\t\tchanged := reflect.DeepEqual(deploymentCopy.Annotations, deployment.Annotations) &&\n\t\t\treflect.DeepEqual(deploymentCopy.Spec.Selector, deployment.Spec.Selector) &&\n\t\t\treflect.DeepEqual(deploymentCopy.Spec.Template.Spec.Containers, deployment.Spec.Template.Spec.Containers) &&\n\t\t\treflect.DeepEqual(deploymentCopy.Spec.Template.Spec.Volumes, deployment.Spec.Template.Spec.Volumes) &&\n\t\t\t(deploymentCopy.Spec.Replicas == deployment.Spec.Replicas) &&\n\t\t\t(deploymentCopy.Spec.Template.Spec.ServiceAccountName == deployment.Spec.Template.Spec.ServiceAccountName)\n\n\t\tif !changed {\n\t\t\treturn deployment, nil\n\t\t}\n\n\t\tglog.Info(\"updating deployment\")\n\t\terr = bc.Client.Update(context.TODO(), deploymentCopy)\n\t\treturn deploymentCopy, err\n\n\t} else {\n\t\tdeployment = &appsv1.Deployment{\n\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\tName: jenkinsInstance.GetName(),\n\t\t\t\tNamespace: jenkinsInstance.GetNamespace(),\n\t\t\t\tAnnotations: jenkinsInstance.Spec.Annotations,\n\t\t\t},\n\t\t\tSpec: appsv1.DeploymentSpec{\n\t\t\t\tReplicas: &replicas,\n\t\t\t\tSelector: &metav1.LabelSelector{\n\t\t\t\t\tMatchLabels: labels,\n\t\t\t\t},\n\t\t\t\tTemplate: corev1.PodTemplateSpec{\n\t\t\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\t\t\tLabels: labels,\n\t\t\t\t\t},\n\t\t\t\t\tSpec: corev1.PodSpec{\n\t\t\t\t\t\tSecurityContext: &corev1.PodSecurityContext{\n\t\t\t\t\t\t\tRunAsUser: &runAsUser,\n\t\t\t\t\t\t},\n\t\t\t\t\t\tContainers: []corev1.Container{\n\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\tName: \"jenkinsci\",\n\t\t\t\t\t\t\t\tImage: jenkinsInstance.Spec.Image,\n\t\t\t\t\t\t\t\tPorts: []corev1.ContainerPort{\n\t\t\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t\t\tName: \"master\",\n\t\t\t\t\t\t\t\t\t\tContainerPort: JenkinsMasterPort,\n\t\t\t\t\t\t\t\t\t\tHostPort: JenkinsMasterPort,\n\t\t\t\t\t\t\t\t\t\tProtocol: \"TCP\",\n\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t\t\tName: \"agent\",\n\t\t\t\t\t\t\t\t\t\tContainerPort: JenkinsAgentPort,\n\t\t\t\t\t\t\t\t\t\tHostPort: JenkinsAgentPort,\n\t\t\t\t\t\t\t\t\t\tProtocol: \"TCP\",\n\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\tEnv: env,\n\t\t\t\t\t\t\t\tCommand: []string{\n\t\t\t\t\t\t\t\t\t\"bash\",\n\t\t\t\t\t\t\t\t\t\"-c\",\n\t\t\t\t\t\t\t\t\tcommandString,\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\tImagePullPolicy: JenkinsPullPolicy,\n\t\t\t\t\t\t\t\tVolumeMounts: []corev1.VolumeMount{\n\t\t\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t\t\tName: \"init-groovy-d\",\n\t\t\t\t\t\t\t\t\t\tReadOnly: false,\n\t\t\t\t\t\t\t\t\t\tMountPath: \"/var/jenkins_home/init.groovy.d\",\n\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t\t\tName: \"job-storage\",\n\t\t\t\t\t\t\t\t\t\tReadOnly: false,\n\t\t\t\t\t\t\t\t\t\tMountPath: \"/var/jenkins_home/jobs\",\n\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\n\t\t\t\t\t\tVolumes: []corev1.Volume{\n\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\tName: \"init-groovy-d\",\n\t\t\t\t\t\t\t\tVolumeSource: corev1.VolumeSource{\n\t\t\t\t\t\t\t\t\tSecret: &corev1.SecretVolumeSource{\n\t\t\t\t\t\t\t\t\t\tSecretName: jenkinsInstance.GetName(),\n\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\tName: \"job-storage\",\n\t\t\t\t\t\t\t\tVolumeSource: volumeSource,\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t}\n\n\t\t// assign service account\n\t\tdeployment.Spec.Template.Spec.ServiceAccountName = jenkinsInstance.Spec.ServiceAccount\n\n\t\terr = controllerutil.SetControllerReference(jenkinsInstance, deployment, bc.scheme)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\terr = bc.Client.Create(context.TODO(), deployment)\n\t\treturn deployment, err\n\t}\n}", "func NewDeployment(meta interfaces.MetaDeployment) *Deployment {\n\treturn &Deployment{\n\t\tmeta: meta,\n\t\tConfigs: make(map[string]*Config),\n\t\tAppliedChecksums: meta.AppliedChecksums(),\n\t}\n}", "func createDeployment(client k8sclient.Interface, module *protos.Module) error {\n\tdConfig := createDeploymentObject(module)\n\tlog.Infof(\"Creating deployment with config [%+v]\", dConfig)\n\td, err := client.ExtensionsV1beta1().Deployments(defaultNS).Create(dConfig)\n\tif err == nil {\n\t\tlog.Infof(\"Created Deployment %+v\", d)\n\t} else if errors.IsAlreadyExists(err) {\n\t\tlog.Infof(\"Deployment %+v already exists\", dConfig)\n\t} else {\n\t\tlog.Errorf(\"Failed to create Deployment %+v with error: %v\", dConfig, err)\n\t}\n\n\treturn err\n}", "func (w *Worker) initDeployment(d types.Deployment) {\n\tw.log.Info(\"Initializing new deployment\")\n\tartifact, err := w.ciClient.GetBuildArtifactByID(d.ArtifactID)\n\tif err != nil {\n\t\tw.log.Error(\"Failed to get build artifact\", err)\n\t\treturn\n\t}\n\tmanifestVals := types.ManifestValues{\n\t\tName: d.K8SName,\n\t\tImage: artifact.Name,\n\t\tReplicas: d.Replicas,\n\t}\n\tmanifest, err := renderManifestTemplate(d.Manifest, manifestVals)\n\tif err != nil {\n\t\tw.log.Error(\"Failed to render manifest template\", err)\n\t\treturn\n\t}\n\tfmt.Println(\"Manifest:\\n\" + manifest)\n\tok, stdout := w.kubectl.CreateDeployment(manifest)\n\tif ok != true {\n\t\tfmt.Println(\"fucked up\")\n\t}\n\tfmt.Println(\"stdout: \" + stdout)\n\terr = w.recordRevision(d, stdout)\n\tif err != nil {\n\t\tw.log.Error(\"Failed to write revision to db\", err)\n\t}\n\tif ok == true {\n\t\td.IsInitialized = true\n\t\terr = w.databaseClient.SaveDeployment(&d)\n\t\tif err != nil {\n\t\t\tw.log.Error(\"Failed to update deployment db record\", err)\n\t\t}\n\t}\n}", "func (client *Client) CreateDeployment(req *Request) (*Response, error) {\n\treturn client.Execute(&Request{\n\t\tMethod: \"POST\",\n\t\tPath: DeploymentsPath,\n\t\tQueryParams: req.QueryParams,\n\t\tBody: req.Body,\n\t\tResult: &CreateDeploymentResult{},\n\t})\n}", "func NewDeployment(app common.Application, environment string, strategy common.Strategy, ref string, flags map[string]interface{}) *Deployment {\n\treturn &Deployment{\n\t\tuuid: uuid.NewV1(),\n\t\tapplication: app,\n\t\tenvironment: environment,\n\t\tstrategy: strategy,\n\t\tref: ref,\n\t\tflags: flags,\n\t\tproducts: make(map[string]interface{}),\n\t\tcurrentState: common.DEPLOYMENT_PENDING,\n\t}\n}", "func newCanaryDeployment(sd *sentinalv1alpha1.SentinalDeployment, dep *appsv1beta2.Deployment) *appsv1beta2.Deployment {\n\tcanary := dep.DeepCopy()\n\tcanary.ObjectMeta.ResourceVersion = \"\"\n\tcanary.ObjectMeta.OwnerReferences = []metav1.OwnerReference{\n\t\t*metav1.NewControllerRef(sd, schema.GroupVersionKind{\n\t\t\tGroup: sentinalv1alpha1.SchemeGroupVersion.Group,\n\t\t\tVersion: sentinalv1alpha1.SchemeGroupVersion.Version,\n\t\t\tKind: \"SentinalDeployment\",\n\t\t}),\n\t}\n\n\t//TODO label this\n\tcanary.ObjectMeta.Name = sd.Spec.CanaryDeploymentName\n\t//TODO calculate this based on how many dep's been decremented by\n\tcanary.Spec.Replicas = &sd.Spec.CanaryDesiredReplicas\n\tfor _, canaryContainer := range canary.Spec.Template.Spec.Containers {\n\t\tfor _, updatedContainer := range sd.Spec.CanaryContainers {\n\t\t\tif canaryContainer.Name == updatedContainer.Name {\n\t\t\t\tcanaryContainer.Image = updatedContainer.Image\n\t\t\t}\n\t\t}\n\t}\n\treturn canary\n}", "func Deployment() appsv1.Deployment {\n\treturn appsv1.Deployment{\n\t\tTypeMeta: TypeMeta(),\n\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\tNamespace: DefaultNamespace,\n\t\t\tName: DefaultName,\n\t\t\tLabels: map[string]string{\n\t\t\t\tDefaultSelectorKey: DefaultSelectorValue,\n\t\t\t},\n\t\t\tAnnotations: map[string]string{\n\t\t\t\tcontroller.KconfigEnabledDeploymentAnnotation: \"true\",\n\t\t\t},\n\t\t},\n\t\tSpec: appsv1.DeploymentSpec{\n\t\t\tSelector: &metav1.LabelSelector{\n\t\t\t\tMatchLabels: map[string]string{\n\t\t\t\t\tDefaultSelectorKey: DefaultSelectorValue,\n\t\t\t\t},\n\t\t\t},\n\t\t\tTemplate: corev1.PodTemplateSpec{\n\t\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\t\tNamespace: DefaultNamespace,\n\t\t\t\t\tName: DefaultName,\n\t\t\t\t\tLabels: map[string]string{\n\t\t\t\t\t\tDefaultSelectorKey: DefaultSelectorValue,\n\t\t\t\t\t},\n\t\t\t\t\tAnnotations: map[string]string{\n\t\t\t\t\t\tcontroller.KconfigEnvRefVersionAnnotation: \"0\",\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tSpec: corev1.PodSpec{\n\t\t\t\t\tContainers: []corev1.Container{\n\t\t\t\t\t\tcorev1.Container{\n\t\t\t\t\t\t\tEnv: []corev1.EnvVar{},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n}", "func CreateDeploy(w http.ResponseWriter, r *http.Request) {\n\tdeploy := models.Deploy{}\n\terr := json.NewDecoder(r.Body).Decode(&deploy)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\t// Todo validate requirement id\n\n\terr = models.InsertDeploy(deploy)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tw.WriteHeader(200)\n\terr = json.NewEncoder(w).Encode(deploy)\n\tif err != nil {\n\t\tpanic(error(err))\n\t}\n}", "func UpgradeDeployment(c *gin.Context) {\n\tlog := logger.WithFields(logrus.Fields{\"tag\": \"UpgradeDeployment\"})\n\tname := c.Param(\"name\")\n\tlog.Infof(\"Upgrading deployment: %s\", name)\n\tparsedRequest, err := parseCreateUpdateDeploymentRequest(c)\n\tif err != nil {\n\t\tlog.Error(err.Error())\n\t\tc.JSON(http.StatusBadRequest, htype.ErrorResponse{\n\t\t\tCode: http.StatusBadRequest,\n\t\t\tMessage: \"Error during parsing request!\",\n\t\t\tError: errors.Cause(err).Error(),\n\t\t})\n\t\treturn\n\t}\n\n\trelease, err := helm.UpgradeDeployment(name,\n\t\tparsedRequest.deploymentName, parsedRequest.values,\n\t\tparsedRequest.reuseValues, parsedRequest.kubeConfig, parsedRequest.clusterName)\n\tif err != nil {\n\t\tlog.Errorf(\"Error during upgrading deployment. %s\", err.Error())\n\t\tc.JSON(http.StatusInternalServerError, htype.ErrorResponse{\n\t\t\tCode: http.StatusInternalServerError,\n\t\t\tMessage: \"Error upgrading deployment\",\n\t\t\tError: err.Error(),\n\t\t})\n\t\treturn\n\t}\n\tlog.Info(\"Upgrade deployment succeeded\")\n\n\treleaseNotes := release.Release.Info.Status.Notes\n\n\tlog.Debug(\"Release notes: \", releaseNotes)\n\tresponse := htype.CreateUpdateDeploymentResponse{\n\t\tReleaseName: name,\n\t\tNotes: releaseNotes,\n\t}\n\tc.JSON(http.StatusCreated, response)\n\treturn\n}", "func NewDeployment() (*Deployment, error) {\n\tnow := time.Now()\n\n\tuid := uuid.NewV4()\n\tid := uid.String()\n\n\treturn &Deployment{\n\t\tCreated: &now,\n\t\tId: &id,\n\t\tDeploymentConstructor: &DeploymentConstructor{},\n\t\tStats: NewDeviceDeploymentStats(),\n\t}, nil\n}", "func apicuritoDeployment(c *configuration.Config, a *api.Apicurito) (dep client.Object) {\n\t// Define a new deployment\n\tvar dm int32 = 420\n\tname := DefineUIName(a)\n\tdeployLabels := map[string]string{\n\t\t\"app\": \"apicurito\",\n\t\t\"component\": name,\n\t\t\"com.company\": \"Red_Hat\",\n\t\t\"rht.prod_name\": \"Red_Hat_Integration\",\n\t\t\"rht.prod_ver\": version.ShortVersion(),\n\t\t\"rht.comp\": \"Fuse\",\n\t\t\"rht.comp_ver\": version.ShortVersion(),\n\t\t\"rht.subcomp\": name,\n\t\t\"rht.subcomp_t\": \"infrastructure\",\n\t}\n\tdep = &appsv1.Deployment{\n\t\tTypeMeta: metav1.TypeMeta{\n\t\t\tAPIVersion: \"apps/v1\",\n\t\t\tKind: \"Deployment\",\n\t\t},\n\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\tName: name,\n\t\t\tNamespace: a.Namespace,\n\t\t\tOwnerReferences: []metav1.OwnerReference{\n\t\t\t\t*metav1.NewControllerRef(a, schema.GroupVersionKind{\n\t\t\t\t\tGroup: api.SchemeGroupVersion.Group,\n\t\t\t\t\tVersion: api.SchemeGroupVersion.Version,\n\t\t\t\t\tKind: a.Kind,\n\t\t\t\t}),\n\t\t\t},\n\t\t},\n\t\tSpec: appsv1.DeploymentSpec{\n\t\t\tReplicas: &a.Spec.Size,\n\t\t\tSelector: &metav1.LabelSelector{\n\t\t\t\tMatchLabels: labelComponent(name),\n\t\t\t},\n\t\t\tStrategy: appsv1.DeploymentStrategy{\n\t\t\t\tType: appsv1.RollingUpdateDeploymentStrategyType,\n\t\t\t},\n\t\t\tTemplate: corev1.PodTemplateSpec{\n\t\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\t\tLabels: deployLabels,\n\t\t\t\t},\n\t\t\t\tSpec: corev1.PodSpec{\n\t\t\t\t\tContainers: []corev1.Container{{\n\t\t\t\t\t\tImage: c.UiImage,\n\t\t\t\t\t\tImagePullPolicy: corev1.PullIfNotPresent,\n\t\t\t\t\t\tName: name,\n\t\t\t\t\t\tPorts: []corev1.ContainerPort{{\n\t\t\t\t\t\t\tContainerPort: 8080,\n\t\t\t\t\t\t\tName: \"api-port\",\n\t\t\t\t\t\t\tProtocol: corev1.ProtocolTCP,\n\t\t\t\t\t\t}},\n\t\t\t\t\t\tLivenessProbe: &corev1.Probe{\n\t\t\t\t\t\t\tHandler: corev1.Handler{\n\t\t\t\t\t\t\t\tHTTPGet: &corev1.HTTPGetAction{\n\t\t\t\t\t\t\t\t\tScheme: corev1.URISchemeHTTP,\n\t\t\t\t\t\t\t\t\tPort: intstr.FromString(\"api-port\"),\n\t\t\t\t\t\t\t\t\tPath: \"/\",\n\t\t\t\t\t\t\t\t}},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tReadinessProbe: &corev1.Probe{\n\t\t\t\t\t\t\tHandler: corev1.Handler{\n\t\t\t\t\t\t\t\tHTTPGet: &corev1.HTTPGetAction{\n\t\t\t\t\t\t\t\t\tScheme: corev1.URISchemeHTTP,\n\t\t\t\t\t\t\t\t\tPort: intstr.FromString(\"api-port\"),\n\t\t\t\t\t\t\t\t\tPath: \"/\",\n\t\t\t\t\t\t\t\t}},\n\t\t\t\t\t\t\tPeriodSeconds: 5,\n\t\t\t\t\t\t\tFailureThreshold: 2,\n\t\t\t\t\t\t},\n\t\t\t\t\t\tVolumeMounts: []corev1.VolumeMount{\n\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\tName: name,\n\t\t\t\t\t\t\t\tMountPath: \"/html/config\",\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t}},\n\t\t\t\t\tVolumes: []corev1.Volume{\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tName: name,\n\t\t\t\t\t\t\tVolumeSource: corev1.VolumeSource{\n\t\t\t\t\t\t\t\tConfigMap: &corev1.ConfigMapVolumeSource{\n\t\t\t\t\t\t\t\t\tLocalObjectReference: corev1.LocalObjectReference{\n\t\t\t\t\t\t\t\t\t\tName: name,\n\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\tDefaultMode: &dm,\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n\n\treturn\n}", "func (a *Client) CreateDeployment(params *CreateDeploymentParams, authInfo runtime.ClientAuthInfoWriter) (*CreateDeploymentCreated, *CreateDeploymentAccepted, error) {\n\t// TODO: Validate the params before sending\n\tif params == nil {\n\t\tparams = NewCreateDeploymentParams()\n\t}\n\n\tresult, err := a.transport.Submit(&runtime.ClientOperation{\n\t\tID: \"create-deployment\",\n\t\tMethod: \"POST\",\n\t\tPathPattern: \"/deployments\",\n\t\tProducesMediaTypes: []string{\"application/json\"},\n\t\tConsumesMediaTypes: []string{\"application/json\"},\n\t\tSchemes: []string{\"https\"},\n\t\tParams: params,\n\t\tReader: &CreateDeploymentReader{formats: a.formats},\n\t\tAuthInfo: authInfo,\n\t\tContext: params.Context,\n\t\tClient: params.HTTPClient,\n\t})\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\tswitch value := result.(type) {\n\tcase *CreateDeploymentCreated:\n\t\treturn value, nil, nil\n\tcase *CreateDeploymentAccepted:\n\t\treturn nil, value, nil\n\t}\n\t// safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue\n\tmsg := fmt.Sprintf(\"unexpected success response for deployments: API contract not enforced by server. Client expected to get an error, but got: %T\", result)\n\tpanic(msg)\n}", "func DeployApp(w http.ResponseWriter, r *http.Request) {\n\tparams := mux.Vars(r)\n\tstatus := params[\"status\"]\n\tlog.Printf(\"Params: %s\\n\", params)\n\n\tclientset, err := getConfig()\n\tif err != nil {\n\t\tlog.Fatalln(\"failed to get the config:\", err)\n\t}\n\n\tdeploymentsClient := clientset.AppsV1().Deployments(namespace)\n\n\tdeploymentName := params[\"app\"] + \"-deployment\"\n\n\tlist, err := deploymentsClient.List(metav1.ListOptions{})\n\tif err != nil {\n\t\tlog.Fatalln(\"failed to get deployments:\", err)\n\t}\n\n\tcontainers := []apiv1.Container{createContainer(params[\"app\"], repository+\"/\"+params[\"app\"]+appversion)}\n\n\tif status == \"true\" {\n\t\tfor _, d := range list.Items {\n\t\t\tif d.Name == deploymentName && *d.Spec.Replicas > 0 {\n\t\t\t\tlog.Printf(\"Deployment already running: %s\\n\", deploymentName)\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\n\t\tnodeLabel(params[\"node\"], \"app\", params[\"app\"], \"add\")\n\n\t\tdeployment := &appsv1.Deployment{\n\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\tName: deploymentName,\n\t\t\t},\n\t\t\tSpec: appsv1.DeploymentSpec{\n\t\t\t\tReplicas: int32Ptr(1),\n\t\t\t\tSelector: &metav1.LabelSelector{\n\t\t\t\t\tMatchLabels: map[string]string{\n\t\t\t\t\t\t\"app\": params[\"app\"],\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tTemplate: apiv1.PodTemplateSpec{\n\t\t\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\t\t\tLabels: map[string]string{\n\t\t\t\t\t\t\t\"app\": params[\"app\"],\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\tSpec: apiv1.PodSpec{\n\t\t\t\t\t\tContainers: containers,\n\t\t\t\t\t\tNodeSelector: map[string]string{\n\t\t\t\t\t\t\t\"app\": params[\"app\"],\n\t\t\t\t\t\t},\n\t\t\t\t\t\tVolumes: []apiv1.Volume{\n\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\tName: \"mem\",\n\t\t\t\t\t\t\t\tVolumeSource: apiv1.VolumeSource{\n\t\t\t\t\t\t\t\t\tHostPath: &apiv1.HostPathVolumeSource{\n\t\t\t\t\t\t\t\t\t\tPath: \"/dev/mem\",\n\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\tName: \"gpiomem\",\n\t\t\t\t\t\t\t\tVolumeSource: apiv1.VolumeSource{\n\t\t\t\t\t\t\t\t\tHostPath: &apiv1.HostPathVolumeSource{\n\t\t\t\t\t\t\t\t\t\tPath: \"/dev/gpiomem\",\n\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t}\n\t\t// Create Deployment\n\t\tfmt.Println(\"Creating deployment...\")\n\t\tresult, err := deploymentsClient.Create(deployment)\n\t\tif err != nil {\n\t\t\tpanic(err)\n\t\t}\n\t\tfmt.Printf(\"Created deployment %q.\\n\", result.GetObjectMeta().GetName())\n\n\t} else {\n\n\t\tnodeLabel(params[\"node\"], \"app\", params[\"app\"], \"del\")\n\n\t\tfmt.Println(\"Deleting deployment...\")\n\t\tdeletePolicy := metav1.DeletePropagationForeground\n\t\tif err := deploymentsClient.Delete(deploymentName, &metav1.DeleteOptions{\n\t\t\tPropagationPolicy: &deletePolicy,\n\t\t}); err != nil {\n\t\t\tpanic(err)\n\t\t}\n\t\tfmt.Println(\"Deleted deployment.\")\n\t}\n\n}", "func (s *deploymentServer) createDeployment(ctx context.Context, manifest []byte, env []EnvVar, initVariables []EnvVar) error {\n\tdecoder := k8sYaml.NewYAMLOrJSONDecoder(bytes.NewReader(manifest), 1000)\n\n\td := &appsv1.Deployment{}\n\n\tif err := decoder.Decode(&d); err != nil {\n\t\treturn err\n\t}\n\n\tif len(env) > 0 {\n\t\tcontainers := d.Spec.Template.Spec.Containers\n\t\tapplyEnvironment(containers, env)\n\t}\n\n\tinitContainers := d.Spec.Template.Spec.InitContainers\n\tif len(initContainers) > 0 {\n\t\tfmt.Println(\"deployment \" + d.Namespace + \".\" + d.Name + \" has initContainers\")\n\t\tapplyEnvironment(initContainers, initVariables)\n\t} else {\n\t\tfmt.Println(\"deployment \" + d.Namespace + \".\" + d.Name + \" has not initContainers; bug in config\")\n\t}\n\n\tappsAPI := s.clientset.AppsV1()\n\tapiDeployments := appsAPI.Deployments(d.Namespace)\n\n\tif _, err := apiDeployments.Create(ctx, d, metav1.CreateOptions{}); err != nil {\n\t\treturn fmt.Errorf(\"deployment create error '%s'\", err.Error())\n\t}\n\n\treturn nil\n}", "func (r *Reconciler) newChannelDeployment(secret *corev1.Secret) (*appsv1.Deployment, error) {\n\n\t// Get The Channel Deployment Name (One Channel Deployment Per Kafka Auth Secret)\n\tdeploymentName := util.ChannelDnsSafeName(secret.Name)\n\n\t// Replicas Int Value For De-Referencing\n\treplicas := int32(r.config.Channel.Replicas)\n\n\t// Create The Channel Container Environment Variables\n\tchannelEnvVars, err := r.channelDeploymentEnvVars(secret)\n\tif err != nil {\n\t\tr.logger.Error(\"Failed To Create Channel Deployment Environment Variables\", zap.Error(err))\n\t\treturn nil, err\n\t}\n\n\t// Create & Return The Channel's Deployment\n\tdeployment := &appsv1.Deployment{\n\t\tTypeMeta: metav1.TypeMeta{\n\t\t\tAPIVersion: appsv1.SchemeGroupVersion.String(),\n\t\t\tKind: constants.DeploymentKind,\n\t\t},\n\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\tName: deploymentName,\n\t\t\tNamespace: commonconstants.KnativeEventingNamespace,\n\t\t\tLabels: map[string]string{\n\t\t\t\tconstants.AppLabel: deploymentName, // Matches Service Selector Key/Value Below\n\t\t\t\tconstants.KafkaChannelChannelLabel: \"true\", // Allows for identification of KafkaChannels\n\t\t\t},\n\t\t\tOwnerReferences: []metav1.OwnerReference{\n\t\t\t\tutil.NewSecretOwnerReference(secret),\n\t\t\t},\n\t\t},\n\t\tSpec: appsv1.DeploymentSpec{\n\t\t\tReplicas: &replicas,\n\t\t\tSelector: &metav1.LabelSelector{\n\t\t\t\tMatchLabels: map[string]string{\n\t\t\t\t\tconstants.AppLabel: deploymentName, // Matches Template ObjectMeta Pods\n\t\t\t\t},\n\t\t\t},\n\t\t\tTemplate: corev1.PodTemplateSpec{\n\t\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\t\tLabels: map[string]string{\n\t\t\t\t\t\tconstants.AppLabel: deploymentName, // Matched By Deployment Selector Above\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tSpec: corev1.PodSpec{\n\t\t\t\t\tServiceAccountName: r.environment.ServiceAccount,\n\t\t\t\t\tContainers: []corev1.Container{\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tName: deploymentName,\n\t\t\t\t\t\t\tLivenessProbe: &corev1.Probe{\n\t\t\t\t\t\t\t\tHandler: corev1.Handler{\n\t\t\t\t\t\t\t\t\tHTTPGet: &corev1.HTTPGetAction{\n\t\t\t\t\t\t\t\t\t\tPort: intstr.FromInt(constants.HealthPort),\n\t\t\t\t\t\t\t\t\t\tPath: health.LivenessPath,\n\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\tInitialDelaySeconds: constants.ChannelLivenessDelay,\n\t\t\t\t\t\t\t\tPeriodSeconds: constants.ChannelLivenessPeriod,\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\tReadinessProbe: &corev1.Probe{\n\t\t\t\t\t\t\t\tHandler: corev1.Handler{\n\t\t\t\t\t\t\t\t\tHTTPGet: &corev1.HTTPGetAction{\n\t\t\t\t\t\t\t\t\t\tPort: intstr.FromInt(constants.HealthPort),\n\t\t\t\t\t\t\t\t\t\tPath: health.ReadinessPath,\n\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\tInitialDelaySeconds: constants.ChannelReadinessDelay,\n\t\t\t\t\t\t\t\tPeriodSeconds: constants.ChannelReadinessPeriod,\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\tImage: r.environment.ChannelImage,\n\t\t\t\t\t\t\tPorts: []corev1.ContainerPort{\n\t\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t\tName: \"server\",\n\t\t\t\t\t\t\t\t\tContainerPort: int32(constants.HttpContainerPortNumber),\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\tEnv: channelEnvVars,\n\t\t\t\t\t\t\tImagePullPolicy: corev1.PullIfNotPresent,\n\t\t\t\t\t\t\tResources: corev1.ResourceRequirements{\n\t\t\t\t\t\t\t\tRequests: corev1.ResourceList{\n\t\t\t\t\t\t\t\t\tcorev1.ResourceCPU: r.config.Channel.CpuRequest,\n\t\t\t\t\t\t\t\t\tcorev1.ResourceMemory: r.config.Channel.MemoryRequest,\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\tLimits: corev1.ResourceList{\n\t\t\t\t\t\t\t\t\tcorev1.ResourceCPU: r.config.Channel.CpuLimit,\n\t\t\t\t\t\t\t\t\tcorev1.ResourceMemory: r.config.Channel.MemoryLimit,\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n\n\t// Return Channel Deployment\n\treturn deployment, nil\n}", "func (r *ContainerizedWorkloadReconciler) renderDeployment(ctx context.Context,\n\tworkload *oamv1alpha2.ContainerizedWorkload) (*appsv1.Deployment, error) {\n\n\tresources, err := cwh.Translator(ctx, workload)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdeploy, ok := resources[0].(*appsv1.Deployment)\n\tif !ok {\n\t\treturn nil, fmt.Errorf(\"internal error, deployment is not rendered correctly\")\n\t}\n\t// the translator lib doesn't set the namespace\n\tdeploy.Namespace = workload.Namespace\n\t// k8s server-side patch complains if the protocol is not set\n\tfor i := 0; i < len(deploy.Spec.Template.Spec.Containers); i++ {\n\t\tfor j := 0; j < len(deploy.Spec.Template.Spec.Containers[i].Ports); j++ {\n\t\t\tif len(deploy.Spec.Template.Spec.Containers[i].Ports[j].Protocol) == 0 {\n\t\t\t\tdeploy.Spec.Template.Spec.Containers[i].Ports[j].Protocol = corev1.ProtocolTCP\n\t\t\t}\n\t\t}\n\t}\n\tr.Log.Info(\" rendered a deployment\", \"deploy\", deploy.Spec.Template.Spec)\n\n\t// set the controller reference so that we can watch this deployment and it will be deleted automatically\n\tif err := ctrl.SetControllerReference(workload, deploy, r.Scheme); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn deploy, nil\n}", "func (c *Client) CreateDeployment(label, wsId string) (*Deployment, error) {\n\trequest := fmt.Sprintf(deploymentCreateRequest, label, wsId)\n\n\tresponse, err := c.QueryHouston(request)\n\tif err != nil {\n\t\treturn nil, errors.Wrap(err, \"CreateDeployment Failed\")\n\t}\n\n\treturn response.Data.CreateDeployment, nil\n}", "func CreateOrUpdateDeployment(client clientset.Interface, deploy *extensions.Deployment) error {\n if _, err := client.ExtensionsV1beta1().Deployments(deploy.ObjectMeta.Namespace).Create(deploy); err != nil {\n if !apierrors.IsAlreadyExists(err) {\n return fmt.Errorf(\"unable to create deployment: %v\", err)\n }\n\n if _, err := client.ExtensionsV1beta1().Deployments(deploy.ObjectMeta.Namespace).Update(deploy); err != nil {\n return fmt.Errorf(\"unable to update deployment: %v\", err)\n }\n }\n return nil\n}", "func CreateDeployment(ctx context.Context, client clientset.Interface, replicas int32, podLabels map[string]string, nodeSelector map[string]string, namespace string, pvclaims []*v1.PersistentVolumeClaim, securityLevel admissionapi.Level, command string) (*appsv1.Deployment, error) {\n\tdeploymentSpec := testDeployment(replicas, podLabels, nodeSelector, namespace, pvclaims, securityLevel, command)\n\tdeployment, err := client.AppsV1().Deployments(namespace).Create(ctx, deploymentSpec, metav1.CreateOptions{})\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"deployment %q Create API error: %w\", deploymentSpec.Name, err)\n\t}\n\tframework.Logf(\"Waiting deployment %q to complete\", deploymentSpec.Name)\n\terr = WaitForDeploymentComplete(client, deployment)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"deployment %q failed to complete: %w\", deploymentSpec.Name, err)\n\t}\n\treturn deployment, nil\n}", "func (td *OsmTestData) CreateDeployment(ns string, deployment appsv1.Deployment) (*appsv1.Deployment, error) {\n\tdeploymentRet, err := td.Client.AppsV1().Deployments(ns).Create(context.Background(), &deployment, metav1.CreateOptions{})\n\tif err != nil {\n\t\terr := fmt.Errorf(\"Could not create Deployment: %v\", err)\n\t\treturn nil, err\n\t}\n\treturn deploymentRet, nil\n}", "func (m *Machine) CreateBOSHDeployment(namespace string, deployment bdv1.BOSHDeployment) (*bdv1.BOSHDeployment, machine.TearDownFunc, error) {\n\tclient := m.VersionedClientset.BoshdeploymentV1alpha1().BOSHDeployments(namespace)\n\td, err := client.Create(context.Background(), &deployment, metav1.CreateOptions{})\n\treturn d, func() error {\n\t\terr := client.Delete(context.Background(), deployment.GetName(), metav1.DeleteOptions{})\n\t\tif err != nil && !apierrors.IsNotFound(err) {\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\t}, err\n}", "func (client *Client) CreateDeployment(namespace string, deployment model.Deployment) error {\n\treturn client.RestAPI.Post(rest.Rq{\n\t\tBody: deployment,\n\t\tURL: rest.URL{\n\t\t\tPath: deploymentsPath,\n\t\t\tParams: rest.P{\n\t\t\t\t\"namespace\": namespace,\n\t\t\t},\n\t\t},\n\t})\n}", "func (r *Reconciler) newChannelDeployment(channel *knativekafkav1alpha1.KafkaChannel) (*appsv1.Deployment, error) {\n\n\t// Get The Channel Deployment Name (One Channel Deployment Per Kafka Auth Secret)\n\tdeploymentName := util.ChannelDeploymentDnsSafeName(r.kafkaSecretName(channel))\n\n\t// Replicas Int Value For De-Referencing\n\treplicas := int32(r.environment.ChannelReplicas)\n\n\t// Create The Channel Container Environment Variables\n\tchannelEnvVars, err := r.channelDeploymentEnvVars(channel)\n\tif err != nil {\n\t\tr.Logger.Error(\"Failed To Create Channel Deployment Environment Variables\", zap.Error(err))\n\t\treturn nil, err\n\t}\n\n\t// Create & Return The Channel's Deployment\n\tdeployment := &appsv1.Deployment{\n\t\tTypeMeta: metav1.TypeMeta{\n\t\t\tAPIVersion: appsv1.SchemeGroupVersion.String(),\n\t\t\tKind: constants.DeploymentKind,\n\t\t},\n\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\tName: deploymentName,\n\t\t\tNamespace: constants.KnativeEventingNamespace,\n\t\t\tLabels: map[string]string{\n\t\t\t\tAppLabel: deploymentName, // Matches Service Selector Key/Value Below\n\t\t\t\tKafkaChannelChannelLabel: \"true\", // Allows for identification of KafkaChannels\n\t\t\t},\n\t\t},\n\t\tSpec: appsv1.DeploymentSpec{\n\t\t\tReplicas: &replicas,\n\t\t\tSelector: &metav1.LabelSelector{\n\t\t\t\tMatchLabels: map[string]string{\n\t\t\t\t\tAppLabel: deploymentName, // Matches Template ObjectMeta Pods\n\t\t\t\t},\n\t\t\t},\n\t\t\tTemplate: corev1.PodTemplateSpec{\n\t\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\t\tLabels: map[string]string{\n\t\t\t\t\t\tAppLabel: deploymentName, // Matched By Deployment Selector Above\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tSpec: corev1.PodSpec{\n\t\t\t\t\tServiceAccountName: r.environment.ServiceAccount,\n\t\t\t\t\tContainers: []corev1.Container{\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tName: deploymentName,\n\t\t\t\t\t\t\tLivenessProbe: &corev1.Probe{\n\t\t\t\t\t\t\t\tHandler: corev1.Handler{\n\t\t\t\t\t\t\t\t\tHTTPGet: &corev1.HTTPGetAction{\n\t\t\t\t\t\t\t\t\t\tPort: intstr.FromInt(constants.HealthPort),\n\t\t\t\t\t\t\t\t\t\tPath: health.LivenessPath,\n\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\tInitialDelaySeconds: constants.ChannelLivenessDelay,\n\t\t\t\t\t\t\t\tPeriodSeconds: constants.ChannelLivenessPeriod,\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\tReadinessProbe: &corev1.Probe{\n\t\t\t\t\t\t\t\tHandler: corev1.Handler{\n\t\t\t\t\t\t\t\t\tHTTPGet: &corev1.HTTPGetAction{\n\t\t\t\t\t\t\t\t\t\tPort: intstr.FromInt(constants.HealthPort),\n\t\t\t\t\t\t\t\t\t\tPath: health.ReadinessPath,\n\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\tInitialDelaySeconds: constants.ChannelReadinessDelay,\n\t\t\t\t\t\t\t\tPeriodSeconds: constants.ChannelReadinessPeriod,\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\tImage: r.environment.ChannelImage,\n\t\t\t\t\t\t\tPorts: []corev1.ContainerPort{\n\t\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t\tName: \"server\",\n\t\t\t\t\t\t\t\t\tContainerPort: int32(constants.HttpContainerPortNumber),\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\tEnv: channelEnvVars,\n\t\t\t\t\t\t\tImagePullPolicy: corev1.PullAlways,\n\t\t\t\t\t\t\tVolumeMounts: []corev1.VolumeMount{\n\t\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t\tName: constants.LoggingConfigVolumeName,\n\t\t\t\t\t\t\t\t\tMountPath: constants.LoggingConfigMountPath,\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\tResources: corev1.ResourceRequirements{\n\t\t\t\t\t\t\t\tRequests: corev1.ResourceList{\n\t\t\t\t\t\t\t\t\tcorev1.ResourceCPU: r.environment.ChannelCpuRequest,\n\t\t\t\t\t\t\t\t\tcorev1.ResourceMemory: r.environment.ChannelMemoryRequest,\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\tLimits: corev1.ResourceList{\n\t\t\t\t\t\t\t\t\tcorev1.ResourceCPU: r.environment.ChannelCpuLimit,\n\t\t\t\t\t\t\t\t\tcorev1.ResourceMemory: r.environment.ChannelMemoryLimit,\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\tVolumes: []corev1.Volume{\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tName: constants.LoggingConfigVolumeName,\n\t\t\t\t\t\t\tVolumeSource: corev1.VolumeSource{\n\t\t\t\t\t\t\t\tConfigMap: &corev1.ConfigMapVolumeSource{\n\t\t\t\t\t\t\t\t\tLocalObjectReference: corev1.LocalObjectReference{\n\t\t\t\t\t\t\t\t\t\tName: constants.LoggingConfigMapName,\n\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n\n\t// Return Channel Deployment\n\treturn deployment, nil\n}", "func newDeploymentForApplication(application *appV1Alpha1.Application) *appsV1.Deployment {\n\tlabels := labelsForApplication(application.Name)\n\tcontainers := buildContainersForApplication(application)\n\n\treturn &appsV1.Deployment{\n\t\tObjectMeta: metaV1.ObjectMeta{\n\t\t\tName: application.Name,\n\t\t\tNamespace: application.Namespace,\n\t\t},\n\t\tSpec: appsV1.DeploymentSpec{\n\t\t\tReplicas: application.Spec.Replicas,\n\t\t\tSelector: &metaV1.LabelSelector{\n\t\t\t\tMatchLabels: labels,\n\t\t\t},\n\t\t\tTemplate: coreV1.PodTemplateSpec{\n\t\t\t\tObjectMeta: metaV1.ObjectMeta{\n\t\t\t\t\tLabels: labels,\n\t\t\t\t},\n\t\t\t\tSpec: coreV1.PodSpec{\n\t\t\t\t\tContainers: containers,\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n}", "func newDeploymentForCR(cr *v1.Restaurant, cmVersion string) *appsv1.Deployment {\n\treplicas := cr.Spec.Deployment.Replicas\n\tif replicas == 0 {\n\t\treplicas = 1\n\t}\n\tprobe := &corev1.Probe{\n\t\tHandler: corev1.Handler{\n\t\t\tHTTPGet: &corev1.HTTPGetAction{\n\t\t\t\tPath: \"/health\",\n\t\t\t\tPort: intstr.FromInt(8080),\n\t\t\t},\n\t\t},\n\t}\n\tmaxSurge := intstr.FromInt(1)\n\tmaxUnavailable := intstr.FromInt(0)\n\treturn &appsv1.Deployment{\n\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\tName: cr.Name,\n\t\t\tNamespace: cr.Namespace,\n\t\t\tLabels: map[string]string{\n\t\t\t\t\"app\": cr.Name,\n\t\t\t},\n\t\t},\n\t\tSpec: appsv1.DeploymentSpec{\n\t\t\tReplicas: &replicas,\n\t\t\tStrategy: appsv1.DeploymentStrategy{\n\t\t\t\tType: appsv1.RollingUpdateDeploymentStrategyType,\n\t\t\t\tRollingUpdate: &appsv1.RollingUpdateDeployment{\n\t\t\t\t\tMaxSurge: &maxSurge,\n\t\t\t\t\tMaxUnavailable: &maxUnavailable,\n\t\t\t\t},\n\t\t\t},\n\t\t\tSelector: &metav1.LabelSelector{\n\t\t\t\tMatchLabels: getLabels(cr),\n\t\t\t},\n\t\t\tTemplate: corev1.PodTemplateSpec{\n\t\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\t\tLabels: getLabels(cr),\n\t\t\t\t\tAnnotations: map[string]string{\n\t\t\t\t\t\t\"configMapResourceVersion\": cmVersion,\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tSpec: corev1.PodSpec{\n\t\t\t\t\tContainers: []corev1.Container{\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tName: \"restaurant\",\n\t\t\t\t\t\t\tImage: \"quay.io/ruben/restaurant-api:latest\",\n\t\t\t\t\t\t\tCommand: []string{\"./application\", \"-Dquarkus.http.host=0.0.0.0\"},\n\t\t\t\t\t\t\tPorts: []corev1.ContainerPort{\n\t\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t\tName: \"http\",\n\t\t\t\t\t\t\t\t\tContainerPort: 8080,\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\tEnv: []corev1.EnvVar{\n\t\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t\tName: \"DATA_PATH\",\n\t\t\t\t\t\t\t\t\tValue: \"/data\",\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\tLivenessProbe: probe,\n\t\t\t\t\t\t\tReadinessProbe: probe,\n\t\t\t\t\t\t\tVolumeMounts: []corev1.VolumeMount{{\n\t\t\t\t\t\t\t\tName: \"data\",\n\t\t\t\t\t\t\t\tMountPath: \"/data\",\n\t\t\t\t\t\t\t}},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\tVolumes: []corev1.Volume{{\n\t\t\t\t\t\tName: \"data\",\n\t\t\t\t\t\tVolumeSource: corev1.VolumeSource{\n\t\t\t\t\t\t\tConfigMap: &corev1.ConfigMapVolumeSource{\n\t\t\t\t\t\t\t\tLocalObjectReference: corev1.LocalObjectReference{\n\t\t\t\t\t\t\t\t\tName: cr.Name,\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t}},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n}", "func CreateDeployment(chartName string, releaseName string, valueOverrides []byte, kubeConfig []byte, clusterName string) (*rls.InstallReleaseResponse, error) {\n\tdefer tearDown()\n\n\tlogTag := \"CreateDeployment\"\n\n\tutils.LogInfof(logTag, \"Deploying chart='%s', release name='%s'.\", chartName, releaseName)\n\tdownloadedChartPath, err := downloadChartFromRepo(chartName, clusterName)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tutils.LogInfof(logTag, \"Loading chart '%s'\", downloadedChartPath)\n\tchartRequested, err := chartutil.Load(downloadedChartPath)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"Error loading chart: %v\", err)\n\t}\n\tif req, err := chartutil.LoadRequirements(chartRequested); err == nil {\n\t\tif err := checkDependencies(chartRequested, req); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t} else if err != chartutil.ErrRequirementsNotFound {\n\t\treturn nil, fmt.Errorf(\"cannot load requirements: %v\", err)\n\t}\n\tvar namespace = \"default\"\n\tif len(strings.TrimSpace(releaseName)) == 0 {\n\t\treleaseName, _ = generateName(\"\")\n\t}\n\thClient, err := GetHelmClient(kubeConfig)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tinstallRes, err := hClient.InstallReleaseFromChart(\n\t\tchartRequested,\n\t\tnamespace,\n\t\thelm.ValueOverrides(valueOverrides),\n\t\thelm.ReleaseName(releaseName),\n\t\thelm.InstallDryRun(false),\n\t\thelm.InstallReuseName(true),\n\t\thelm.InstallDisableHooks(false),\n\t\thelm.InstallTimeout(30),\n\t\thelm.InstallWait(false))\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"Error deploying chart: %v\", err)\n\t}\n\treturn installRes, nil\n}", "func NewDeployment(deploymentName string, replicas int32, podLabels map[string]string, containerName, image string, strategyType appsv1.DeploymentStrategyType) *appsv1.Deployment {\n\tzero := int64(0)\n\treturn &appsv1.Deployment{\n\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\tName: deploymentName,\n\t\t\tLabels: podLabels,\n\t\t},\n\t\tSpec: appsv1.DeploymentSpec{\n\t\t\tReplicas: &replicas,\n\t\t\tSelector: &metav1.LabelSelector{MatchLabels: podLabels},\n\t\t\tStrategy: appsv1.DeploymentStrategy{\n\t\t\t\tType: strategyType,\n\t\t\t},\n\t\t\tTemplate: v1.PodTemplateSpec{\n\t\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\t\tLabels: podLabels,\n\t\t\t\t},\n\t\t\t\tSpec: v1.PodSpec{\n\t\t\t\t\tTerminationGracePeriodSeconds: &zero,\n\t\t\t\t\tContainers: []v1.Container{\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tName: containerName,\n\t\t\t\t\t\t\tImage: image,\n\t\t\t\t\t\t\tSecurityContext: &v1.SecurityContext{},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n}", "func GetSetDeploymentInformation(res http.ResponseWriter, req *http.Request) {\n\tres.Header().Set(\"Content-Type\", \"application/json\")\n\tdItem := DeploymentItems{1, 5161, \"TSUBR05\"}\n\tpackages := Packages{1, \"DEVL\", \"MP3000\", dItem}\n\tc := SetDeploymentInformation{\"2017-08-18\", \"MyGen\", \"FOOENV\", packages, 3193, \"S0000009884\", \"Completed\"}\n\toutgoingJSON, err := json.Marshal(c)\n\tif err != nil {\n\t\tlog.Println(err.Error())\n\t\thttp.Error(res, err.Error(), http.StatusInternalServerError)\n\t\treturn\n\t}\n\tres.WriteHeader(http.StatusCreated)\n\tfmt.Fprint(res, string(outgoingJSON))\n}", "func (c *Catalog) InterpolateBOSHDeployment(name, manifestRef, opsRef string, secretRef string) bdv1.BOSHDeployment {\n\treturn bdv1.BOSHDeployment{\n\t\tObjectMeta: metav1.ObjectMeta{Name: name},\n\t\tSpec: bdv1.BOSHDeploymentSpec{\n\t\t\tManifest: bdv1.Manifest{Ref: manifestRef, Type: bdv1.ConfigMapType},\n\t\t\tOps: []bdv1.Ops{\n\t\t\t\t{Ref: opsRef, Type: bdv1.ConfigMapType},\n\t\t\t\t{Ref: secretRef, Type: bdv1.SecretType},\n\t\t\t},\n\t\t},\n\t}\n}", "func CreateDeployment(name string, options ...DeploymentOption) *appsv1.Deployment {\n\td := &appsv1.Deployment{\n\t\tTypeMeta: genTypeMeta(gvk.Deployment),\n\t\tObjectMeta: genObjectMeta(name, true),\n\t}\n\n\tfor _, option := range options {\n\t\toption(d)\n\t}\n\n\treturn d\n}", "func NewDeploymentCmd(cli *client.Cli) *cobra.Command {\n\n\tvar DeploymentCmd = &cobra.Command{\n\t\tUse: \"deployment COMMAND\",\n\t\tShort: \"Manage deployments\",\n\t}\n\n\tDeploymentCmd.AddCommand(newGetCommand(cli))\n\tDeploymentCmd.AddCommand(newCreateCommand(cli))\n\tDeploymentCmd.AddCommand(newPromoteCommand(cli))\n\n\treturn DeploymentCmd\n}", "func versionDeploymentDefinition(version *Version) *v1beta1.Deployment {\n\treplicas := int32(version.Replicas)\n\tname := fmt.Sprintf(\n\t\t\"%v-%v\",\n\t\tversion.Environment.Application.Slug,\n\t\tversion.Environment.Slug,\n\t)\n\n\tvar k8sVolumes []v1.Volume\n\tfor index := 0; index < len(version.Volumes); index++ {\n\t\tk8sVolumes = append(k8sVolumes, convertVolume(version.Volumes[index]))\n\t}\n\n\tvar k8sContainers []v1.Container\n\tfor index := 0; index < len(version.Containers); index++ {\n\t\t// @todo refactor\n\t\tk8sContainers = append(k8sContainers, createContainerSpec(version.Containers[index], version.Environment.UUID, version))\n\t}\n\n\tdeployment := &v1beta1.Deployment{\n\t\tObjectMeta: v1.ObjectMeta{\n\t\t\tName: name,\n\t\t\tNamespace: \"brizo\",\n\t\t\tLabels: map[string]string{\n\t\t\t\t\"brizoManaged\": \"true\",\n\t\t\t\t\"appUUID\": version.Environment.Application.UUID,\n\t\t\t\t\"envUUID\": version.Environment.UUID,\n\t\t\t\t\"versionUUID\": version.UUID,\n\t\t\t},\n\t\t},\n\t\tSpec: v1beta1.DeploymentSpec{\n\t\t\tSelector: &metav1.LabelSelector{\n\t\t\t\tMatchLabels: map[string]string{\n\t\t\t\t\t\"appUUID\": version.Environment.Application.UUID,\n\t\t\t\t\t\"envUUID\": version.Environment.UUID,\n\t\t\t\t\t\"versionUUID\": version.UUID,\n\t\t\t\t},\n\t\t\t},\n\t\t\tReplicas: &replicas,\n\t\t\tTemplate: v1.PodTemplateSpec{\n\t\t\t\tObjectMeta: v1.ObjectMeta{\n\t\t\t\t\tLabels: map[string]string{\n\t\t\t\t\t\t\"brizoManaged\": \"true\",\n\t\t\t\t\t\t\"appUUID\": version.Environment.Application.UUID,\n\t\t\t\t\t\t\"envUUID\": version.Environment.UUID,\n\t\t\t\t\t\t\"versionUUID\": version.UUID,\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tSpec: v1.PodSpec{\n\t\t\t\t\tVolumes: k8sVolumes,\n\t\t\t\t\tContainers: k8sContainers,\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n\n\tv1beta1.SetObjectDefaults_Deployment(deployment)\n\treturn deployment\n}", "func KustomizeDeployment(kustomization *Kustomization, tmpl *template.Template) ([]byte, error) {\n\trepo := &kustomization.Repository\n\n\tdata := deploymentData{\n\t\tNs: kustomization.Ns,\n\t\tTier: kustomization.Tier,\n\t\tName: kustomization.Name,\n\t\tGroup: repo.Group,\n\t\tProject: repo.Project,\n\t\tPath: repo.Path,\n\t}\n\n\tmanifestBuffer := new(bytes.Buffer)\n\terr := tmpl.Execute(manifestBuffer, data)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"can not apply variables to deployment template: %v\", err)\n\t}\n\treturn manifestBuffer.Bytes(), nil\n}", "func (mongo *MongoStorage) CreateDeployment(deployment deployment.ResourceDeploy) (deployment.ResourceDeploy, error) {\n\tmongo.logger.Debugf(\"creating deployment\")\n\tvar collection = mongo.db.C(CollectionDeployment)\n\tif deployment.ID == \"\" {\n\t\tdeployment.ID = uuid.New().String()\n\t}\n\tdeployment.CreatedAt = time.Now().UTC().Format(time.RFC3339)\n\tif err := collection.Insert(deployment); err != nil {\n\t\tmongo.logger.WithError(err).Errorf(\"unable to create deployment\")\n\t\tif mgo.IsDup(err) {\n\t\t\treturn deployment, rserrors.ErrResourceAlreadyExists()\n\t\t}\n\t\treturn deployment, err\n\t}\n\treturn deployment, nil\n}", "func newDeploymentWrite(length int) (string, *DeploymentWrite) {\n\tw := &DeploymentWrite{}\n\tw.handlerName = generateHandlerName() + `_w`\n\tw.Input = make(chan msg.Request, length)\n\tw.Shutdown = make(chan struct{})\n\treturn w.handlerName, w\n}", "func (t *Tracker) trackDeployment(oldObj interface{}, newObj interface{}) {\n\tnewDeploy, ok := newObj.(*appsv1.Deployment)\n\tif !ok {\n\t\tlog.Errorf(\"Not a deploy object\")\n\t\treturn\n\t}\n\toldDeploy, ok := oldObj.(*appsv1.Deployment)\n\tif !ok {\n\t\tlog.Errorf(\"Not a deploy object\")\n\t\treturn\n\t}\n\tname, ok := oldDeploy.Labels[\"kcdapp\"]\n\tif !ok {\n\t\treturn\n\t}\n\tcurImage := newDeploy.Spec.Template.Spec.Containers[0].Image\n\tif name == t.kcdapp {\n\t\tlog.Infof(\"Deployment updated: %s\", name)\n\t\tdeployMessage := DeployMessage{\n\t\t\tType: \"deployStatus\",\n\t\t\tVersion: \"v1alpha1\",\n\t\t\tBody: StatusData{\n\t\t\t\tt.clusterName,\n\t\t\t\ttime.Now().UTC(),\n\t\t\t\t*newDeploy,\n\t\t\t\t[]PodInfo{},\n\t\t\t\tt.version,\n\t\t\t},\n\t\t}\n\t\tt.enqueue(t.informerQueues[\"deployment\"], deployMessage)\n\t}\n}", "func templateDeploy(cmd *cobra.Command, args []string) {\n\t//Check deploy template file.\n\tif len(args) <= 0 || utils.IsFileExist(args[0]) == false {\n\t\tfmt.Fprintf(os.Stderr, \"the deploy template file is required, %s\\n\", \"see https://github.com/Huawei/containerops/singular for more detail.\")\n\t\tos.Exit(1)\n\t}\n\n\ttemplate := args[0]\n\td := new(objects.Deployment)\n\n\t//Read template file and parse.\n\tif err := d.ParseFromFile(template, output); err != nil {\n\t\tfmt.Fprintf(os.Stderr, \"parse deploy template error: %s\\n\", err.Error())\n\t\tos.Exit(1)\n\t}\n\n\t//Set private key file path.\n\tif privateKey != \"\" {\n\t\td.Tools.SSH.Private, d.Tools.SSH.Public = privateKey, publicKey\n\t}\n\n\t//The integrity checking of deploy template.\n\tif err := d.Check(); err != nil {\n\t\tfmt.Fprintf(os.Stderr, \"parse deploy template error: %s\\n\", err.Error())\n\t\tos.Exit(1)\n\t}\n\n\t//Set log and error io.Writer\n\tvar logWriters io.Writer\n\n\t//Generate stdout/stderr io.Writer\n\tstdoutFile, _ := os.Create(path.Join(d.Config, \"deploy.log\"))\n\tdefer stdoutFile.Close()\n\n\t//Using MultiWriter log and error.\n\tif verbose == true {\n\t\tlogWriters = io.MultiWriter(stdoutFile, os.Stdout)\n\t} else {\n\t\tlogWriters = io.MultiWriter(stdoutFile)\n\t}\n\n\t//Deploy cloud native stack\n\tif err := module.DeployInfraStacks(d, db, logWriters, timestamp); err != nil {\n\t\tfmt.Fprintf(os.Stderr, \"%s\\n\", err.Error())\n\t\tos.Exit(1)\n\t}\n\n\t//Delete droplets\n\tif del == true {\n\t\tif err := module.DeleteInfraStacks(d, logWriters, timestamp); err != nil {\n\t\t\tfmt.Fprintf(os.Stderr, \"%s\\n\", err.Error())\n\t\t\tos.Exit(1)\n\t\t}\n\t}\n}", "func PrintDeployment(cmd *cobra.Command, deployment *client.DeploymentResponse) {\n\n\t//Print result\n\tcmd.Println(\"------\")\n\n\tif deployment.AppServerName != \"\" {\n\t\tcmd.Printf(\"%s \", deployment.AppServerName)\n\t}\n\tif deployment.EnvironmentName != \"\" {\n\t\tcmd.Printf(\"%s \", deployment.EnvironmentName)\n\t}\n\tif deployment.ReleaseName != \"\" {\n\t\tcmd.Printf(\"%s \", deployment.ReleaseName)\n\t}\n\tif deployment.DeploymentDate != 0 {\n\t\tcmd.Printf(\"%s \", time.Unix(0, deployment.DeploymentDate*int64(time.Millisecond)).Format(\"2006-01-02T15:04\"))\n\t}\n\tif deployment.State != \"\" {\n\t\tcmd.Println(deployment.State)\n\t}\n\tfor _, appsWithVersion := range deployment.AppsWithVersion {\n\t\tcmd.Printf(\"%s \", appsWithVersion.ApplicationName)\n\t\tcmd.Println(appsWithVersion.Version)\n\t}\n\n}", "func (w *DeploymentWrite) show(q *msg.Request, mr *msg.Result) {\n\tvar (\n\t\tinstanceConfigID, status, nextStatus string\n\t\tnewCurrentStatus, details, newNextStatus, deprovisionTask string\n\t\tstatusUpdateRequired, hasUpdate bool\n\t\terr error\n\t\tres sql.Result\n\t)\n\n\tif err = w.stmtGet.QueryRow(\n\t\tq.Deployment.ID,\n\t).Scan(\n\t\t&instanceConfigID,\n\t\t&status,\n\t\t&nextStatus,\n\t\t&details,\n\t); err == sql.ErrNoRows {\n\t\tmr.NotFound(err, q.Section)\n\t\treturn\n\t} else if err != nil {\n\t\tmr.ServerError(err, q.Section)\n\t\treturn\n\t}\n\n\tdepl := proto.Deployment{}\n\tif err = json.Unmarshal([]byte(details), &depl); err != nil {\n\t\tmr.ServerError(err, q.Section)\n\t\treturn\n\t}\n\n\t// returns true if there is a updated version blocked, ie.\n\t// after this deprovisioning a new version will be rolled out\n\t// -- statement always returns true or false, never null\n\tif err = w.stmtDeprovisionForUpdate.QueryRow(\n\t\tq.Deployment.ID,\n\t).Scan(\n\t\t&hasUpdate,\n\t); err != nil {\n\t\tmr.ServerError(err, q.Section)\n\t\treturn\n\t}\n\n\tswitch hasUpdate {\n\tcase false:\n\t\tdeprovisionTask = proto.TaskDelete\n\tdefault:\n\t\tdeprovisionTask = proto.TaskDeprovision\n\t}\n\n\tswitch status {\n\tcase proto.DeploymentAwaitingRollout:\n\t\tnewCurrentStatus = proto.DeploymentRolloutInProgress\n\t\tnewNextStatus = proto.DeploymentActive\n\t\tdepl.Task = proto.TaskRollout\n\t\tstatusUpdateRequired = true\n\tcase proto.DeploymentRolloutInProgress:\n\t\tdepl.Task = proto.TaskRollout\n\t\tstatusUpdateRequired = false\n\tcase proto.DeploymentActive:\n\t\tdepl.Task = proto.TaskRollout\n\t\tstatusUpdateRequired = false\n\tcase proto.DeploymentRolloutFailed:\n\t\tnewCurrentStatus = proto.DeploymentRolloutInProgress\n\t\tnewNextStatus = proto.DeploymentActive\n\t\tdepl.Task = proto.TaskRollout\n\t\tstatusUpdateRequired = true\n\tcase proto.DeploymentAwaitingDeprovision:\n\t\tnewCurrentStatus = proto.DeploymentDeprovisionInProgress\n\t\tnewNextStatus = proto.DeploymentDeprovisioned\n\t\tdepl.Task = deprovisionTask\n\t\tstatusUpdateRequired = true\n\tcase proto.DeploymentDeprovisionInProgress:\n\t\tdepl.Task = deprovisionTask\n\t\tstatusUpdateRequired = false\n\tcase proto.DeploymentDeprovisionFailed:\n\t\tnewCurrentStatus = proto.DeploymentDeprovisionInProgress\n\t\tnewNextStatus = proto.DeploymentDeprovisioned\n\t\tdepl.Task = deprovisionTask\n\t\tstatusUpdateRequired = true\n\tdefault:\n\t\t// the SQL query filters for the above statuses, a different\n\t\t// status should never appear\n\t\tmr.ServerError(\n\t\t\tfmt.Errorf(\n\t\t\t\t\"Impossible deployment state %s encountered\",\n\t\t\t\tstatus,\n\t\t\t),\n\t\t\tq.Section,\n\t\t)\n\t\treturn\n\t}\n\n\tif statusUpdateRequired {\n\t\tif res, err = w.stmtSetStatusUpdate.Exec(\n\t\t\tnewCurrentStatus,\n\t\t\tnewNextStatus,\n\t\t\tinstanceConfigID,\n\t\t); err != nil {\n\t\t\tmr.ServerError(err, q.Section)\n\t\t\treturn\n\t\t}\n\t\tif mr.RowCnt(res.RowsAffected()) {\n\t\t\tmr.Deployment = append(mr.Deployment, depl)\n\t\t}\n\t} else {\n\t\tmr.Deployment = append(mr.Deployment, depl)\n\t\tmr.OK()\n\t}\n}", "func createDeploymentHelper(deployment *appsv1beta2.Deployment, args InstallFlags) {\n\tdepClient := clientset.AppsV1beta2().Deployments(args.Namespace)\n\tvar result *appsv1beta2.Deployment\n\tvar err error\n\tresult, err = depClient.Create(deployment)\n\tif err != nil {\n\t\tif !apierr.IsAlreadyExists(err) {\n\t\t\tlog.Fatal(err)\n\t\t}\n\t\t// deployment already exists\n\t\texisting, err := depClient.Get(deployment.ObjectMeta.Name, metav1.GetOptions{})\n\t\tif err != nil {\n\t\t\tlog.Fatalf(\"Failed to get existing deployment: %v\", err)\n\t\t}\n\t\tif upgradeNeeded(deployment, existing) {\n\t\t\tif !args.Upgrade {\n\t\t\t\tlog.Fatalf(\"Deployment '%s' requires upgrade. Rerun with --upgrade to upgrade the deployment\", deployment.ObjectMeta.Name)\n\t\t\t}\n\t\t\texisting, err = depClient.Update(deployment)\n\t\t\tif err != nil {\n\t\t\t\tlog.Fatalf(\"Failed to update deployment: %v\", err)\n\t\t\t}\n\t\t\tfmt.Printf(\"Existing deployment '%s' updated\\n\", existing.GetObjectMeta().GetName())\n\t\t} else {\n\t\t\tfmt.Printf(\"Existing deployment '%s' up-to-date\\n\", existing.GetObjectMeta().GetName())\n\t\t}\n\t} else {\n\t\tfmt.Printf(\"Deployment '%s' created\\n\", result.GetObjectMeta().GetName())\n\t}\n}", "func ApplicationDeploy(w http.ResponseWriter, r *http.Request) {\n\tvars := mux.Vars(r)\n\t// application := vars[\"application\"]\n\t// appEnv := vars[\"environment\"]\n\tcluster, ok := vars[\"cluster\"]\n\tif !ok {\n\t\tw.Header().Set(\"Content-Type\", \"application/json\")\n\t\tstr := `{\"status\": \"error\", \"description\": \"Please specify a target cluster\"}`\n\t\tw.Write([]byte(str))\n\t\tcommon.LogWarning.Println(\"Cluster was not specified in request\")\n\t} else {\n\t\tcommon.LogInfo.Println(\"Setting cluster \", cluster)\n\n\t\t// convert vars to something compatible with render_template\n\t\tm := make(map[string]interface{})\n\t\tfor k, v := range vars {\n\t\t\tm[k] = v\n\t\t}\n\n\t\tactionsOutput, err := services.RunActions(\"/v1/deploy\", m)\n\n\t\tw.Header().Set(\"Content-Type\", \"application/json\")\n\t\toutputJSON, _ := json.MarshalIndent(actionsOutput, \"\", \" \")\n\t\tw.Write(outputJSON)\n\n\t\tif err != nil {\n\t\t\tw.Header().Set(\"Content-Type\", \"application/text\")\n\t\t\tw.Write([]byte(err.Error()))\n\t\t}\n\t}\n}", "func (s *deployerService) Deploy(ctx context.Context, db *gorm.DB, opts DeploymentsCreateOpts) (*Release, error) {\n\tvar msg jsonmessage.JSONMessage\n\n\tr, err := s.deploy(ctx, db, opts)\n\tif err != nil {\n\t\tmsg = newJSONMessageError(err)\n\t} else {\n\t\tmsg = jsonmessage.JSONMessage{Status: fmt.Sprintf(\"Status: Created new release v%d for %s\", r.Version, r.App.Name)}\n\t}\n\n\tif err := json.NewEncoder(opts.Output).Encode(&msg); err != nil {\n\t\treturn r, err\n\t}\n\n\treturn r, err\n}", "func Deployment(namespace, name string, containerImages ...string) kapisext.Deployment {\n\treturn kapisext.Deployment{\n\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\tNamespace: namespace,\n\t\t\tName: name,\n\t\t\tSelfLink: \"/deployment/\" + name,\n\t\t},\n\t\tSpec: kapisext.DeploymentSpec{\n\t\t\tTemplate: kapi.PodTemplateSpec{\n\t\t\t\tSpec: PodSpec(containerImages...),\n\t\t\t},\n\t\t},\n\t}\n}", "func MakeDeployment(serviceInstance *v1alpha1.ServiceInstance, cfg *config.Config) (*appsv1.Deployment, error) {\n\tif cfg == nil {\n\t\treturn nil, errors.New(\"the Kf defaults configmap couldn't be found\")\n\t}\n\tconfigDefaults, err := cfg.Defaults()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif configDefaults.RouteServiceProxyImage == \"\" {\n\t\treturn nil, errors.New(\"config value for RouteServiceProxyImage couldn't be found\")\n\t}\n\n\treturn &appsv1.Deployment{\n\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\tName: DeploymentName(serviceInstance),\n\t\t\tNamespace: serviceInstance.Namespace,\n\t\t\tOwnerReferences: []metav1.OwnerReference{\n\t\t\t\t*kmeta.NewControllerRef(serviceInstance),\n\t\t\t},\n\t\t\tLabels: v1alpha1.UnionMaps(serviceInstance.GetLabels()),\n\t\t},\n\t\tSpec: appsv1.DeploymentSpec{\n\t\t\tSelector: metav1.SetAsLabelSelector(labels.Set(PodLabels(serviceInstance))),\n\t\t\tTemplate: corev1.PodTemplateSpec{\n\t\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\t\tLabels: v1alpha1.UnionMaps(\n\t\t\t\t\t\tPodLabels(serviceInstance),\n\n\t\t\t\t\t\t// Insert a label for isolating apps with their own NetworkPolicies.\n\t\t\t\t\t\tmap[string]string{\n\t\t\t\t\t\t\tv1alpha1.NetworkPolicyLabel: v1alpha1.NetworkPolicyApp,\n\t\t\t\t\t\t}),\n\t\t\t\t\tAnnotations: map[string]string{\n\t\t\t\t\t\t\"sidecar.istio.io/inject\": \"true\",\n\t\t\t\t\t\t\"traffic.sidecar.istio.io/includeOutboundIPRanges\": \"*\",\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tSpec: makePodSpec(*serviceInstance, configDefaults),\n\t\t\t},\n\t\t\tRevisionHistoryLimit: ptr.Int32(revisionHistoryLimit),\n\t\t\tReplicas: ptr.Int32(replicas),\n\t\t\tStrategy: appsv1.DeploymentStrategy{\n\t\t\t\tType: appsv1.RollingUpdateDeploymentStrategyType,\n\t\t\t\tRollingUpdate: &appsv1.RollingUpdateDeployment{\n\t\t\t\t\tMaxUnavailable: &defaultMaxUnavailable,\n\t\t\t\t\tMaxSurge: &defaultMaxSurge,\n\t\t\t\t},\n\t\t\t},\n\t\t\tProgressDeadlineSeconds: ptr.Int32(600),\n\t\t},\n\t}, nil\n}", "func (c *Catalog) EmptyBOSHDeployment(name, manifestRef string) bdv1.BOSHDeployment {\n\treturn bdv1.BOSHDeployment{\n\t\tObjectMeta: metav1.ObjectMeta{Name: name},\n\t\tSpec: bdv1.BOSHDeploymentSpec{},\n\t}\n}", "func (c *teamClient) WaitForDeployment(logger *log.Entry, resource unstructured.Unstructured, deadline time.Time) error {\n\tvar cur *apps.Deployment\n\tvar nova *apps.Deployment\n\tvar err error\n\tvar resourceVersion int\n\tvar updated bool\n\n\tlogger = logger.WithFields(log.Fields{\n\t\t\"application\": resource.GetName(),\n\t\t\"namespace\": resource.GetNamespace(),\n\t})\n\n\tcli := c.structuredClient.AppsV1().Deployments(resource.GetNamespace())\n\n\t// For Naiserator applications, rely on Naiserator set a terminal rollout status.\n\tgvk := resource.GroupVersionKind()\n\tif gvk.Kind == \"Application\" && gvk.Group == \"nais.io\" {\n\t\treturn c.waitForApplication(logger, resource, deadline)\n\t}\n\n\t// For native Kubernetes deployment objects, get the current deployment object.\n\tfor deadline.After(time.Now()) {\n\t\tcur, err = cli.Get(resource.GetName(), metav1.GetOptions{})\n\t\tif err == nil {\n\t\t\tresourceVersion, _ = strconv.Atoi(cur.GetResourceVersion())\n\t\t\tlogger.Tracef(\"Found current deployment at version %d: %s\", resourceVersion, cur.GetSelfLink())\n\t\t} else if errors.IsNotFound(err) {\n\t\t\tlogger.Tracef(\"Deployment '%s' in namespace '%s' is not currently present in the cluster.\", resource.GetName(), resource.GetNamespace())\n\t\t} else {\n\t\t\tlogger.Tracef(\"Recoverable error while polling for deployment object: %s\", err)\n\t\t\ttime.Sleep(requestInterval)\n\t\t\tcontinue\n\t\t}\n\t\tbreak\n\t}\n\n\t// Wait until the new deployment object is present in the cluster.\n\tfor deadline.After(time.Now()) {\n\t\tnova, err = cli.Get(resource.GetName(), metav1.GetOptions{})\n\t\tif err != nil {\n\t\t\ttime.Sleep(requestInterval)\n\t\t\tcontinue\n\t\t}\n\n\t\trv, _ := strconv.Atoi(nova.GetResourceVersion())\n\t\tif rv > resourceVersion {\n\t\t\tlogger.Tracef(\"New deployment appeared at version %d: %s\", rv, cur.GetSelfLink())\n\t\t\tresourceVersion = rv\n\t\t\tupdated = true\n\t\t}\n\n\t\tif updated && deploymentComplete(nova, &nova.Status) {\n\t\t\treturn nil\n\t\t}\n\n\t\tlogger.WithFields(log.Fields{\n\t\t\t\"deployment_replicas\": nova.Status.Replicas,\n\t\t\t\"deployment_updated_replicas\": nova.Status.UpdatedReplicas,\n\t\t\t\"deployment_available_replicas\": nova.Status.AvailableReplicas,\n\t\t\t\"deployment_observed_generation\": nova.Status.ObservedGeneration,\n\t\t}).Tracef(\"Still waiting for deployment to finish rollout...\")\n\n\t\ttime.Sleep(requestInterval)\n\t}\n\n\tif err != nil {\n\t\treturn fmt.Errorf(\"%s; last error was: %s\", ErrDeploymentTimeout, err)\n\t}\n\n\treturn ErrDeploymentTimeout\n}", "func createDeployment(client GitHubClient, event PullRequestEvent, envName string) (*github.Deployment, error) {\n\trepoName := strings.Split(event.Repository.FullName, \"/\")\n\towner, repo := repoName[0], repoName[1]\n\tref := fmt.Sprintf(\"pull/%v/head\", event.PullRequest.Number)\n\n\treq := &github.DeploymentRequest{\n\t\tRef: github.String(ref),\n\t\tTransientEnvironment: github.Bool(true),\n\t\tEnvironment: github.String(envName),\n\t\tRequiredContexts: &[]string{},\n\t}\n\tctx := context.Background()\n\tdeployment, _, err := client.Repositories.CreateDeployment(ctx, owner, repo, req)\n\tif err != nil {\n\t\treturn deployment, err\n\t}\n\treturn deployment, nil\n}", "func TestNewDeployment(t *testing.T) {\n\ttype args struct {\n\t\tworkersTemplate *jettypes.NodeTemplate\n\t\tcontrollerTemplate *jettypes.NodeTemplate\n\t\tingressTemplate *jettypes.NodeTemplate\n\t\tname string\n\t}\n\ttests := []struct {\n\t\tname string\n\t\targs args\n\t\twant *Deployment\n\t\twantErr bool\n\t}{\n\t\t{\n\t\t\tname: \"nil constructor\",\n\t\t\targs: args{nil, nil, nil, \"test\"},\n\t\t\twant: nil,\n\t\t\twantErr: true,\n\t\t},\n\n\t\t//{\n\t\t//\tname:\"nil constructor\",\n\t\t//\targs: args{\n\t\t//\t\tappConfig.GetWorkersTemplate(),\n\t\t//\t\tappConfig.GetControllersTemplate(),\n\t\t//\t\tappConfig.GetIngresTemplate()},\n\t\t//\twant:nil,\n\t\t//\twantErr:false,\n\t\t//},\n\t}\n\n\tfor _, tt := range tests {\n\t\tt.Run(tt.name, func(t *testing.T) {\n\t\t\tgot, err := NewDeployment(tt.args.workersTemplate, tt.args.controllerTemplate, tt.args.ingressTemplate, tt.args.name)\n\t\t\tif (err != nil) != tt.wantErr {\n\t\t\t\tt.Errorf(\"NewDeployment() error = %v, wantErr %v\", err, tt.wantErr)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tif !reflect.DeepEqual(got, tt.want) {\n\t\t\t\tt.Errorf(\"NewDeployment() got = %v, want %v\", got, tt.want)\n\t\t\t}\n\t\t})\n\t}\n}", "func CreateDeploymentObject(ctx context.Context, name string, selectorValue, image string) (*Object, error) {\n\tobj, err := DecodeFromYAML(ctx, []byte(fmt.Sprintf(deploymentTemplate, name, \"app\", selectorValue, \"app\", selectorValue, name, image)))\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to decode Deployment object from template: %v\", err)\n\t}\n\treturn obj, nil\n}", "func prepareDeployment(deploymentObj metav1.Object, sha string) error {\n\t// Add sha to image, configMapRef, and secretRef\n\t// Find container in deployment that matches the deployment name\n\tapplicationContainer := findContainer(deploymentObj)\n\tif applicationContainer == nil {\n\t\treturn fmt.Errorf(\"unable to find application image in deployment spec\")\n\t}\n\n\t// Remove docker tag, if it exists.\n\t// The image in the deployment.yaml should not have a tag.\n\tshaReplacement := fmt.Sprintf(\"$1:%s\", sha)\n\t// Replace everything after the colon, if it exists. See tests for examples.\n\tregex := regexp.MustCompile(`(.*?)(:|\\z).*`)\n\tapplicationContainer.Image = regex.ReplaceAllString(applicationContainer.Image, shaReplacement)\n\n\trefName := fmt.Sprintf(\"%s-%s\", deploymentObj.GetName(), sha)\n\t// Stub out references to configmap and secret ref to be filled out for each region later.\n\tenvSourceConfigMap := v1.EnvFromSource{\n\t\tConfigMapRef: &v1.ConfigMapEnvSource{\n\t\t\tv1.LocalObjectReference{\n\t\t\t\tName: refName,\n\t\t\t},\n\t\t\tboolRef(false),\n\t\t},\n\t}\n\tenvSourceSecret := v1.EnvFromSource{\n\t\tSecretRef: &v1.SecretEnvSource{\n\t\t\tv1.LocalObjectReference{\n\t\t\t\tName: refName,\n\t\t\t},\n\t\t\t// Secrets may not be defined.\n\t\t\tboolRef(true),\n\t\t},\n\t}\n\tapplicationContainer.EnvFrom = append(applicationContainer.EnvFrom, envSourceConfigMap, envSourceSecret)\n\n\t// Add sha label to deployment and pod.\n\tappendLabel(deploymentObj, \"sha\", sha)\n\tappendLabel(findPodTemplateSpec(deploymentObj), \"sha\", sha)\n\n\treturn nil\n}", "func (c *Catalog) DefaultBOSHDeployment(name, manifestRef string) bdv1.BOSHDeployment {\n\treturn bdv1.BOSHDeployment{\n\t\tObjectMeta: metav1.ObjectMeta{Name: name},\n\t\tSpec: bdv1.BOSHDeploymentSpec{\n\t\t\tManifest: bdv1.Manifest{Ref: manifestRef, Type: bdv1.ConfigMapType},\n\t\t},\n\t}\n}", "func Transform(deployment *appsv1.Deployment) *appsv1.Deployment {\n\treturn &appsv1.Deployment{\n\t\tObjectMeta: metadata.TransformObjectMeta(deployment.ObjectMeta),\n\t\tSpec: appsv1.DeploymentSpec{\n\t\t\tReplicas: deployment.Spec.Replicas,\n\t\t},\n\t\tStatus: appsv1.DeploymentStatus{\n\t\t\tAvailableReplicas: deployment.Status.AvailableReplicas,\n\t\t},\n\t}\n}", "func newDeploymentForCR(cr *tpokkiv1alpha1.GatlingTask, cm *corev1.ConfigMap) *appsv1.Deployment {\n\tlabels := map[string]string{\n\t\t\"app\": \"gatling\",\n\t\t\"gatling_cr\": cr.Name,\n\t}\n\n\tvolumeName := \"configmap-simulations\"\n\t// location must be /input, see https://github.com/tpokki/gatling-image\n\tvolumePath := \"/input\"\n\n\treturn &appsv1.Deployment{\n\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\tName: cr.Name,\n\t\t\tNamespace: cr.Namespace,\n\t\t},\n\t\tSpec: appsv1.DeploymentSpec{\n\t\t\tReplicas: &cr.Spec.Replicas,\n\t\t\tSelector: &metav1.LabelSelector{\n\t\t\t\tMatchLabels: labels,\n\t\t\t},\n\t\t\tTemplate: corev1.PodTemplateSpec{\n\t\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\t\tLabels: labels,\n\t\t\t\t\tAnnotations: map[string]string{\n\t\t\t\t\t\t\"prometheus.io/scrape\": \"true\",\n\t\t\t\t\t\t\"prometheus.io/port\": \"9102\",\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tSpec: corev1.PodSpec{\n\t\t\t\t\tVolumes: []corev1.Volume{\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tName: volumeName,\n\t\t\t\t\t\t\tVolumeSource: corev1.VolumeSource{\n\t\t\t\t\t\t\t\tConfigMap: &corev1.ConfigMapVolumeSource{\n\t\t\t\t\t\t\t\t\tLocalObjectReference: corev1.LocalObjectReference{\n\t\t\t\t\t\t\t\t\t\tName: cm.ObjectMeta.Name,\n\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\tRestartPolicy: cr.Spec.RestartPolicy,\n\t\t\t\t\tContainers: []corev1.Container{\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tName: \"gatling\",\n\t\t\t\t\t\t\tImage: \"quay.io/tpokki/gatling:0.0.1-3.3.1-prometheus\",\n\t\t\t\t\t\t\tArgs: []string{\"-nr\", \"-s\", cr.Spec.ScenarioSpec.Name},\n\t\t\t\t\t\t\tResources: cr.Spec.ResourceRequirements,\n\t\t\t\t\t\t\tVolumeMounts: []corev1.VolumeMount{\n\t\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t\tName: volumeName,\n\t\t\t\t\t\t\t\t\tMountPath: volumePath,\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n}", "func (n *NodeClient) Deploy(dl *gridtypes.Deployment, update bool) error {\n\tdl.TwinID = n.client.id\n\tvar buf bytes.Buffer\n\n\tif err := json.NewEncoder(&buf).Encode(dl); err != nil {\n\t\treturn errors.Wrap(err, \"failed to serialize workload\")\n\t}\n\n\turl := n.url(\"deployment\")\n\tm := http.MethodPost\n\tif update {\n\t\tm = http.MethodPut\n\t}\n\n\trequest, err := http.NewRequest(m, url, &buf)\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"failed to build request\")\n\t}\n\n\tif err := n.client.authorize(request); err != nil {\n\t\treturn errors.Wrap(err, \"failed to sign request\")\n\t}\n\n\tresponse, err := http.DefaultClient.Do(request)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif err := n.response(response, nil, http.StatusAccepted); err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}", "func (app *App) HandleDeployTemplate(w http.ResponseWriter, r *http.Request) {\n\tprojID, err := strconv.ParseUint(chi.URLParam(r, \"project_id\"), 0, 64)\n\n\tif err != nil || projID == 0 {\n\t\tapp.handleErrorFormDecoding(err, ErrProjectDecode, w)\n\t\treturn\n\t}\n\n\tname := chi.URLParam(r, \"name\")\n\tversion := chi.URLParam(r, \"version\")\n\n\t// if version passed as latest, pass empty string to loader to get latest\n\tif version == \"latest\" {\n\t\tversion = \"\"\n\t}\n\n\tgetChartForm := &forms.ChartForm{\n\t\tName: name,\n\t\tVersion: version,\n\t\tRepoURL: app.ServerConf.DefaultApplicationHelmRepoURL,\n\t}\n\n\t// if a repo_url is passed as query param, it will be populated\n\tvals, err := url.ParseQuery(r.URL.RawQuery)\n\n\tif err != nil {\n\t\tapp.handleErrorFormDecoding(err, ErrReleaseDecode, w)\n\t\treturn\n\t}\n\n\tclusterID, err := strconv.ParseUint(vals[\"cluster_id\"][0], 10, 64)\n\n\tif err != nil {\n\t\tapp.handleErrorFormDecoding(err, ErrReleaseDecode, w)\n\t\treturn\n\t}\n\n\tgetChartForm.PopulateRepoURLFromQueryParams(vals)\n\n\tchart, err := loader.LoadChartPublic(getChartForm.RepoURL, getChartForm.Name, getChartForm.Version)\n\n\tif err != nil {\n\t\tapp.handleErrorFormDecoding(err, ErrReleaseDecode, w)\n\t\treturn\n\t}\n\n\tform := &forms.InstallChartTemplateForm{\n\t\tReleaseForm: &forms.ReleaseForm{\n\t\t\tForm: &helm.Form{\n\t\t\t\tRepo: app.Repo,\n\t\t\t\tDigitalOceanOAuth: app.DOConf,\n\t\t\t},\n\t\t},\n\t\tChartTemplateForm: &forms.ChartTemplateForm{},\n\t}\n\n\tform.ReleaseForm.PopulateHelmOptionsFromQueryParams(\n\t\tvals,\n\t\tapp.Repo.Cluster,\n\t)\n\n\tif err := json.NewDecoder(r.Body).Decode(form); err != nil {\n\t\tapp.handleErrorFormDecoding(err, ErrUserDecode, w)\n\t\treturn\n\t}\n\n\tagent, err := app.getAgentFromReleaseForm(\n\t\tw,\n\t\tr,\n\t\tform.ReleaseForm,\n\t)\n\n\tif err != nil {\n\t\tapp.handleErrorFormDecoding(err, ErrUserDecode, w)\n\t\treturn\n\t}\n\n\tregistries, err := app.Repo.Registry.ListRegistriesByProjectID(uint(projID))\n\n\tif err != nil {\n\t\tapp.handleErrorDataRead(err, w)\n\t\treturn\n\t}\n\n\tconf := &helm.InstallChartConfig{\n\t\tChart: chart,\n\t\tName: form.ChartTemplateForm.Name,\n\t\tNamespace: form.ReleaseForm.Form.Namespace,\n\t\tValues: form.ChartTemplateForm.FormValues,\n\t\tCluster: form.ReleaseForm.Cluster,\n\t\tRepo: *app.Repo,\n\t\tRegistries: registries,\n\t}\n\n\trel, err := agent.InstallChart(conf, app.DOConf)\n\n\tif err != nil {\n\t\tapp.sendExternalError(err, http.StatusInternalServerError, HTTPError{\n\t\t\tCode: ErrReleaseDeploy,\n\t\t\tErrors: []string{\"error installing a new chart: \" + err.Error()},\n\t\t}, w)\n\n\t\treturn\n\t}\n\n\ttoken, err := repository.GenerateRandomBytes(16)\n\n\tif err != nil {\n\t\tapp.handleErrorInternal(err, w)\n\t\treturn\n\t}\n\n\t// create release with webhook token in db\n\timage, ok := rel.Config[\"image\"].(map[string]interface{})\n\tif !ok {\n\t\tapp.handleErrorInternal(fmt.Errorf(\"Could not find field image in config\"), w)\n\t\treturn\n\t}\n\n\trepository := image[\"repository\"]\n\trepoStr, ok := repository.(string)\n\n\tif !ok {\n\t\tapp.handleErrorInternal(fmt.Errorf(\"Could not find field repository in config\"), w)\n\t\treturn\n\t}\n\n\trelease := &models.Release{\n\t\tClusterID: form.ReleaseForm.Form.Cluster.ID,\n\t\tProjectID: form.ReleaseForm.Form.Cluster.ProjectID,\n\t\tNamespace: form.ReleaseForm.Form.Namespace,\n\t\tName: form.ChartTemplateForm.Name,\n\t\tWebhookToken: token,\n\t\tImageRepoURI: repoStr,\n\t}\n\n\t_, err = app.Repo.Release.CreateRelease(release)\n\n\tif err != nil {\n\t\tapp.sendExternalError(err, http.StatusInternalServerError, HTTPError{\n\t\t\tCode: ErrReleaseDeploy,\n\t\t\tErrors: []string{\"error creating a webhook: \" + err.Error()},\n\t\t}, w)\n\t}\n\n\t// if github action config is linked, call the github action config handler\n\tif form.GithubActionConfig != nil {\n\t\tgaForm := &forms.CreateGitAction{\n\t\t\tRelease: release,\n\n\t\t\tGitRepo: form.GithubActionConfig.GitRepo,\n\t\t\tGitBranch: form.GithubActionConfig.GitBranch,\n\t\t\tImageRepoURI: form.GithubActionConfig.ImageRepoURI,\n\t\t\tDockerfilePath: form.GithubActionConfig.DockerfilePath,\n\t\t\tGitRepoID: form.GithubActionConfig.GitRepoID,\n\t\t\tRegistryID: form.GithubActionConfig.RegistryID,\n\n\t\t\tShouldGenerateOnly: false,\n\t\t\tShouldCreateWorkflow: form.GithubActionConfig.ShouldCreateWorkflow,\n\t\t}\n\n\t\t// validate the form\n\t\tif err := app.validator.Struct(form); err != nil {\n\t\t\tapp.handleErrorFormValidation(err, ErrProjectValidateFields, w)\n\t\t\treturn\n\t\t}\n\n\t\tapp.createGitActionFromForm(projID, clusterID, form.ChartTemplateForm.Name, gaForm, w, r)\n\t}\n\n\tw.WriteHeader(http.StatusOK)\n}", "func deployLandingPage(k *kabanerov1alpha1.Kabanero, c client.Client) error {\n\t// if enable is false do not deploy the landing page\n\tif k.Spec.Landing.Enable != nil && *(k.Spec.Landing.Enable) == false {\n\t\terr := cleanupLandingPage(k, c)\n\t\treturn err\n\t}\n\trev, err := resolveSoftwareRevision(k, \"landing\", k.Spec.Landing.Version)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t//The context which will be used to render any templates\n\ttemplateContext := rev.Identifiers\n\n\timage, err := imageUriWithOverrides(\"\", \"\", \"\", rev)\n\tif err != nil {\n\t\treturn err\n\t}\n\ttemplateContext[\"image\"] = image\n\n\tf, err := rev.OpenOrchestration(\"kabanero-landing.yaml\")\n\tif err != nil {\n\t\treturn err\n\t}\n\n\ts, err := renderOrchestration(f, templateContext)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tm, err := mf.FromReader(strings.NewReader(s), c)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\ttransforms := []mf.Transformer{mf.InjectOwner(k), mf.InjectNamespace(k.GetNamespace())}\n\terr = m.Transform(transforms...)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\terr = m.ApplyAll()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// Create a clientset to drive API operations on resources.\n\tconfig, err := clientcmd.BuildConfigFromFlags(\"\", \"\")\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tclientset, err := kubernetes.NewForConfig(config)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// Retrieve the kabanero landing URL.\n\tlandingURL, err := getLandingURL(k, config)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// Create a Deployment. The landing application requires knowledge of the landingURL\n\t// post route creation.\n\tenv := []corev1.EnvVar{{Name: \"LANDING_URL\", Value: landingURL}}\n\terr = createDeployment(k, clientset, c, \"kabanero-landing\", image, env, nil, kllog)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// Update the web console's ConfigMap with custom data.\n\terr = customizeWebConsole(k, clientset, config, landingURL)\n\n\treturn err\n}", "func generateDeployment(image string) clusterv1.MachineDeployment {\n\tmachineLabels := map[string]string{\"name\": image}\n\treturn clusterv1.MachineDeployment{\n\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\tName: image,\n\t\t\tAnnotations: make(map[string]string),\n\t\t},\n\t\tSpec: clusterv1.MachineDeploymentSpec{\n\t\t\tReplicas: pointer.Int32(3),\n\t\t\tSelector: metav1.LabelSelector{MatchLabels: machineLabels},\n\t\t\tTemplate: clusterv1.MachineTemplateSpec{\n\t\t\t\tObjectMeta: clusterv1.ObjectMeta{\n\t\t\t\t\tLabels: machineLabels,\n\t\t\t\t},\n\t\t\t\tSpec: clusterv1.MachineSpec{\n\t\t\t\t\tNodeDrainTimeout: &metav1.Duration{Duration: 10 * time.Second},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n}", "func (d *Deployer) Deploy(namespace, rcName string) error {\n\t// Look up the new deployment.\n\tto, err := d.getDeployment(namespace, rcName)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"couldn't get deployment %s: %v\", rcName, err)\n\t}\n\n\t// Decode the config from the deployment.\n\t// TODO: Remove this once we are sure there are no internal versions of configs serialized in DC\n\tconfig, err := appsserialization.DecodeDeploymentConfig(to)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"couldn't decode deployment config from deployment %s: %v\", to.Name, err)\n\t}\n\n\t// Get a strategy for the deployment.\n\ts, err := d.strategyFor(config)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// New deployments must have a desired replica count.\n\tdesiredReplicas, hasDesired := deploymentDesiredReplicas(to)\n\tif !hasDesired {\n\t\treturn fmt.Errorf(\"deployment %s has already run to completion\", to.Name)\n\t}\n\n\t// Find all deployments for the config.\n\tunsortedDeployments, err := d.getDeployments(namespace, config.Name)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"couldn't get controllers in namespace %s: %v\", namespace, err)\n\t}\n\tdeployments := make([]*corev1.ReplicationController, 0, len(unsortedDeployments.Items))\n\tfor i := range unsortedDeployments.Items {\n\t\tdeployments = append(deployments, &unsortedDeployments.Items[i])\n\t}\n\n\t// Sort all the deployments by version.\n\tsort.Sort(appsutil.ByLatestVersionDesc(deployments))\n\n\t// Find any last completed deployment.\n\tvar from *corev1.ReplicationController\n\tfor _, candidate := range deployments {\n\t\tif candidate.Name == to.Name {\n\t\t\tcontinue\n\t\t}\n\t\tif appsutil.IsCompleteDeployment(candidate) {\n\t\t\tfrom = candidate\n\t\t\tbreak\n\t\t}\n\t}\n\n\tif appsutil.DeploymentVersionFor(to) < appsutil.DeploymentVersionFor(from) {\n\t\treturn fmt.Errorf(\"deployment %s is older than %s\", to.Name, from.Name)\n\t}\n\n\t// Scale down any deployments which aren't the new or last deployment.\n\tfor _, candidate := range deployments {\n\t\t// Skip the from/to deployments.\n\t\tif candidate.Name == to.Name {\n\t\t\tcontinue\n\t\t}\n\t\tif from != nil && candidate.Name == from.Name {\n\t\t\tcontinue\n\t\t}\n\t\t// Skip the deployment if it's already scaled down.\n\t\tif candidate.Spec.Replicas == nil || *candidate.Spec.Replicas == 0 {\n\t\t\tcontinue\n\t\t}\n\t\t// Scale the deployment down to zero.\n\t\tretryWaitParams := kscale.NewRetryParams(1*time.Second, 120*time.Second)\n\t\tif err := d.scaler.Scale(candidate.Namespace, candidate.Name, uint(0), &kscale.ScalePrecondition{Size: -1, ResourceVersion: \"\"}, retryWaitParams, retryWaitParams, corev1.SchemeGroupVersion.WithResource(\"replicationcontrollers\"), false); err != nil {\n\t\t\tfmt.Fprintf(d.errOut, \"error: Couldn't scale down prior deployment %s: %v\\n\", appsutil.LabelForDeployment(candidate), err)\n\t\t} else {\n\t\t\tfmt.Fprintf(d.out, \"--> Scaled older deployment %s down\\n\", candidate.Name)\n\t\t}\n\t}\n\n\tif d.until == \"start\" {\n\t\treturn strategy.NewConditionReachedErr(\"Ready to start deployment\")\n\t}\n\n\t// Perform the deployment.\n\tif err := s.Deploy(from, to, int(desiredReplicas)); err != nil {\n\t\treturn err\n\t}\n\tfmt.Fprintln(d.out, \"--> Success\")\n\treturn nil\n}", "func buildDeployment(obj interface{}, groupVersionKind GroupVersionKind, conf CostimatorConfig) (Deployment, error) {\n\tswitch obj.(type) {\n\tdefault:\n\t\treturn Deployment{}, fmt.Errorf(\"APIVersion and Kind not Implemented: %+v\", groupVersionKind)\n\tcase *appsV1.Deployment:\n\t\treturn buildDeploymentV1(obj.(*appsV1.Deployment), conf), nil\n\t}\n}", "func newDeploymentNode(nodeName string, node v1alpha1.ElasticsearchNode, cluster *v1alpha1.Elasticsearch, roleMap map[v1alpha1.ElasticsearchNodeRole]bool) NodeTypeInterface {\n\tdeploymentNode := deploymentNode{}\n\n\tdeploymentNode.populateReference(nodeName, node, cluster, roleMap, int32(1))\n\n\treturn &deploymentNode\n}", "func (c *Controller) RunDeployment(deployment *I.Deployment, response *bytes.Buffer) I.DeployResponse {\n\tuuid := randomizer.StringRunes(10)\n\tlog := I.DeploymentLogger{Log: c.Log, UUID: uuid}\n\treturn c.PushControllerFactory(log).RunDeployment(deployment, response)\n}", "func (c *Context) CreateDeploy(job *work.Job) error {\n // Extract args from job.\n deployUid := job.ArgString(\"deployUid\")\n deployName := job.ArgString(\"name\")\n apiClusterID := uint(job.ArgInt64(\"apiClusterID\"))\n modelVersionID := uint(job.ArgInt64(\"modelVersionID\"))\n sha := job.ArgString(\"sha\")\n envs := job.ArgString(\"envs\")\n logKey := job.ArgString(\"logKey\")\n\n if err := job.ArgError(); err != nil {\n if logKey != \"\" {\n return logBuildableErr(err, logKey, \"Arg error occurred inside create deploy job.\")\n }\n\n app.Log.Errorln(err.Error())\n return err\n }\n\n // Find ApiCluster by ID.\n apiCluster, err := apiclustersvc.FromID(apiClusterID)\n if err != nil {\n return logBuildableErr(err, logKey, \"API cluster not found.\")\n }\n\n // Find ModelVersion by ID.\n modelVersion, err := modelversionsvc.FromID(modelVersionID)\n if err != nil {\n return logBuildableErr(err, logKey, \"Model version not found.\")\n }\n\n // Store ref to project.\n project := &modelVersion.Model.Project\n\n // If sha provided, find Commit by that value. Otherwise, fetch latest commit from repo.\n commit, err := commitsvc.FromShaOrLatest(sha, project)\n if err != nil {\n return logBuildableErr(err, logKey, \"Error finding commit sha to deploy.\")\n }\n\n // Upsert Deploy.\n deploy, isNew, err := deploysvc.Upsert(\n commit.ID,\n modelVersion.ID,\n apiCluster.ID,\n deployUid,\n deployName,\n )\n\n if err != nil {\n return logBuildableErr(err, logKey, \"Failed to upsert deploy.\")\n }\n\n // If Deploy already exists, return an \"Everything up-to-date.\" message.\n if !isNew {\n // TODO: stream back a success message with \"Everything up-to-date.\"\n return nil\n }\n\n // Convert stringified envs into map[string]string representation.\n envsMap, err := envvarsvc.MapFromBytes([]byte(envs))\n if err != nil {\n return failDeploy(deploy.ID, err, logKey, \"Failed to parse deploy environment variables.\")\n }\n\n // Create EnvVars for this Deploy.\n if err := envvarsvc.CreateFromMap(deploy.ID, envsMap); err != nil {\n return failDeploy(deploy.ID, err, logKey, \"Failed to create deploy environment variables.\")\n }\n\n // Define args for the BuildDeploy job.\n jobArgs := work.Q{\n \"resourceID\": deploy.ID,\n \"buildTargetSha\": commit.Sha,\n \"projectID\": project.ID,\n \"targetCluster\": cluster.Api,\n \"logKey\": logKey,\n \"followOnJob\": Names.ApiDeploy,\n \"followOnArgs\": enc.JSON{\n \"deployID\": deploy.ID,\n \"logKey\": logKey,\n },\n }\n\n // Enqueue new job to build this Project for the ApiCluster.\n if _, err := app.JobQueue.Enqueue(Names.BuildDeploy, jobArgs); err != nil {\n return failDeploy(deploy.ID, err, logKey, \"Failed to schedule build deploy job.\")\n }\n\n // Update deploy stage to BuildScheduled.\n if err := deploysvc.UpdateStage(deploy, model.BuildStages.BuildScheduled); err != nil {\n return failDeploy(deploy.ID, err, logKey, \"Failed to update stage of deploy.\")\n }\n\n return nil\n}", "func MakeDeployment(config *deployapi.DeploymentConfig, codec runtime.Codec) (*api.ReplicationController, error) {\n\tvar err error\n\tvar encodedConfig string\n\n\tif encodedConfig, err = EncodeDeploymentConfig(config, codec); err != nil {\n\t\treturn nil, err\n\t}\n\n\tdeployment := &api.ReplicationController{\n\t\tObjectMeta: api.ObjectMeta{\n\t\t\tName: LatestDeploymentIDForConfig(config),\n\t\t\tAnnotations: map[string]string{\n\t\t\t\tdeployapi.DeploymentConfigAnnotation: config.Name,\n\t\t\t\tdeployapi.DeploymentStatusAnnotation: string(deployapi.DeploymentStatusNew),\n\t\t\t\tdeployapi.DeploymentEncodedConfigAnnotation: encodedConfig,\n\t\t\t\tdeployapi.DeploymentVersionAnnotation: strconv.Itoa(config.LatestVersion),\n\t\t\t},\n\t\t\tLabels: config.Labels,\n\t\t},\n\t\tSpec: config.Template.ControllerTemplate,\n\t}\n\n\t// The deployment should be inactive initially\n\tdeployment.Spec.Replicas = 0\n\n\t// Ensure that pods created by this deployment controller can be safely associated back\n\t// to the controller, and that multiple deployment controllers for the same config don't\n\t// manipulate each others' pods.\n\tdeployment.Spec.Template.Labels[deployapi.DeploymentConfigLabel] = config.Name\n\tdeployment.Spec.Template.Labels[deployapi.DeploymentLabel] = deployment.Name\n\tdeployment.Spec.Selector[deployapi.DeploymentConfigLabel] = config.Name\n\tdeployment.Spec.Selector[deployapi.DeploymentLabel] = deployment.Name\n\n\treturn deployment, nil\n}", "func (cxn *Connection) CreateDeployment(d Deployment) (*compose.Deployment, error) {\n\tnewDeployment, errs := cxn.client.CreateDeployment(deploymentParams(d, cxn.accountID))\n\tif len(errs) != 0 {\n\t\treturn nil, fmt.Errorf(\"Unable to create '%s': %v\\n\",\n\t\t\td.GetName(), errs)\n\t}\n\n\treturn newDeployment, cxn.wait(newDeployment.ProvisionRecipeID, d.GetTimeout())\n}", "func GetDeploymentsCommand() cli.Command {\n\tcommand := cli.Command{\n\t\tName: \"deployment\",\n\t\tUsage: \"options for deployment\",\n\t\tSubcommands: []cli.Command{\n\t\t\t{\n\t\t\t\tName: \"list\",\n\t\t\t\tUsage: \"Lists all the deployments\",\n\t\t\t\tArgsUsage: \" \",\n\t\t\t\tDescription: \"[Deprecated] List the current deployment.\",\n\t\t\t\tAction: func(c *cli.Context) {\n\t\t\t\t\terr := listDeployments(c, os.Stdout)\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\tlog.Fatal(\"Error: \", err)\n\t\t\t\t\t}\n\t\t\t\t},\n\t\t\t},\n\t\t\t{\n\t\t\t\tName: \"show\",\n\t\t\t\tUsage: \"Show deployment info\",\n\t\t\t\tArgsUsage: \" \",\n\t\t\t\tDescription: \"Show detailed information about the current deployment.\\n\" +\n\t\t\t\t\t\" Requires system administrator access,\",\n\t\t\t\tAction: func(c *cli.Context) {\n\t\t\t\t\terr := showDeployment(c, os.Stdout)\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\tlog.Fatal(\"Error: \", err)\n\t\t\t\t\t}\n\t\t\t\t},\n\t\t\t},\n\t\t\t{\n\t\t\t\tName: \"list-hosts\",\n\t\t\t\tUsage: \"Lists all ESXi hosts\",\n\t\t\t\tArgsUsage: \" \",\n\t\t\t\tDescription: \"List information about all ESXi hosts used in the deployment.\\n\" +\n\t\t\t\t\t\" For each host, the ID, the current state, the IP, and the type (MGMT and/or CLOUD)\\n\" +\n\t\t\t\t\t\" Requires system administrator access.\",\n\t\t\t\tAction: func(c *cli.Context) {\n\t\t\t\t\terr := listDeploymentHosts(c, os.Stdout)\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\tlog.Fatal(\"Error: \", err)\n\t\t\t\t\t}\n\t\t\t\t},\n\t\t\t},\n\t\t\t{\n\t\t\t\tName: \"list-vms\",\n\t\t\t\tUsage: \"Lists all VMs\",\n\t\t\t\tArgsUsage: \" \",\n\t\t\t\tDescription: \"List all VMs associated with all tenants and projects.\\n\" +\n\t\t\t\t\t\" Requires system administrator access.\",\n\t\t\t\tAction: func(c *cli.Context) {\n\t\t\t\t\terr := listDeploymentVms(c, os.Stdout)\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\tlog.Fatal(\"Error: \", err)\n\t\t\t\t\t}\n\t\t\t\t},\n\t\t\t},\n\t\t\t{\n\t\t\t\tName: \"enable-cluster-type\",\n\t\t\t\tUsage: \"Enable cluster type for deployment\",\n\t\t\t\tArgsUsage: \" \",\n\t\t\t\tDescription: \"Enable a cluster type (e.g. Kubernetes) and specify the image to be used\\n\" +\n\t\t\t\t\t\" when creating the cluster.\\n\" +\n\t\t\t\t\t\" Requires system administrator access.\",\n\t\t\t\tFlags: []cli.Flag{\n\t\t\t\t\tcli.StringFlag{\n\t\t\t\t\t\tName: \"type, k\",\n\t\t\t\t\t\tUsage: \"Cluster type (accepted values are KUBERNETES, MESOS, or SWARM)\",\n\t\t\t\t\t},\n\t\t\t\t\tcli.StringFlag{\n\t\t\t\t\t\tName: \"image-id, i\",\n\t\t\t\t\t\tUsage: \"ID of the cluster image\",\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tAction: func(c *cli.Context) {\n\t\t\t\t\terr := enableClusterType(c)\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\tlog.Fatal(\"Error: \", err)\n\t\t\t\t\t}\n\t\t\t\t},\n\t\t\t},\n\t\t\t{\n\t\t\t\tName: \"disable-cluster-type\",\n\t\t\t\tUsage: \"Disable cluster type for deployment\",\n\t\t\t\tArgsUsage: \" \",\n\t\t\t\tDescription: \"Disable a cluster type (e.g. Kubernetes). Users will no longer be able\\n\" +\n\t\t\t\t\t\" to deploy clusters of that type, but existing clusters will be unaffected.\\n\" +\n\t\t\t\t\t\" Requires system administrator access.\",\n\t\t\t\tFlags: []cli.Flag{\n\t\t\t\t\tcli.StringFlag{\n\t\t\t\t\t\tName: \"type, k\",\n\t\t\t\t\t\tUsage: \"Cluster type (accepted values are KUBERNETES, MESOS, or SWARM)\",\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tAction: func(c *cli.Context) {\n\t\t\t\t\terr := disableClusterType(c)\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\tlog.Fatal(\"Error: \", err)\n\t\t\t\t\t}\n\t\t\t\t},\n\t\t\t},\n\t\t\t{\n\t\t\t\tName: \"update-image-datastores\",\n\t\t\t\tUsage: \"Updates the list of image datastores\",\n\t\t\t\tArgsUsage: \" \",\n\t\t\t\tDescription: \"Update the list of allowed image datastores.\\n\" +\n\t\t\t\t\t\" Requires system administrator access.\",\n\t\t\t\tFlags: []cli.Flag{\n\t\t\t\t\tcli.StringFlag{\n\t\t\t\t\t\tName: \"datastores, d\",\n\t\t\t\t\t\tUsage: \"Comma separated name of datastore names\",\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tAction: func(c *cli.Context) {\n\t\t\t\t\terr := updateImageDatastores(c)\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\tlog.Fatal(\"Error: \", err)\n\t\t\t\t\t}\n\t\t\t\t},\n\t\t\t},\n\t\t\t{\n\t\t\t\tName: \"sync-hosts-config\",\n\t\t\t\tUsage: \"Synchronizes hosts configurations\",\n\t\t\t\tArgsUsage: \" \",\n\t\t\t\tAction: func(c *cli.Context) {\n\t\t\t\t\terr := syncHostsConfig(c)\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\tlog.Fatal(\"Error: \", err)\n\t\t\t\t\t}\n\t\t\t\t},\n\t\t\t},\n\t\t\t{\n\t\t\t\tName: \"pause\",\n\t\t\t\tUsage: \"Pause system under the deployment\",\n\t\t\t\tArgsUsage: \" \",\n\t\t\t\tDescription: \"Pause Photon Controller. All incoming requests that modify the system\\n\" +\n\t\t\t\t\t\" state (other than resume) will be refused. This implies pause-background-states\" +\n\t\t\t\t\t\" Requires system administrator access.\",\n\t\t\t\tAction: func(c *cli.Context) {\n\t\t\t\t\terr := pauseSystem(c)\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\tlog.Fatal(\"Error: \", err)\n\t\t\t\t\t}\n\t\t\t\t},\n\t\t\t},\n\t\t\t{\n\t\t\t\tName: \"pause-background-tasks\",\n\t\t\t\tUsage: \"Pause background tasks\",\n\t\t\t\tArgsUsage: \" \",\n\t\t\t\tDescription: \"Pause all background tasks in Photon Controller, such as image replication.\" +\n\t\t\t\t\t\" Incoming requests from users will continue to work\\n\" +\n\t\t\t\t\t\" Requires system administrator access.\",\n\t\t\t\tAction: func(c *cli.Context) {\n\t\t\t\t\terr := pauseBackgroundTasks(c)\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\tlog.Fatal(\"Error: \", err)\n\t\t\t\t\t}\n\t\t\t\t},\n\t\t\t},\n\t\t\t{\n\t\t\t\tName: \"resume\",\n\t\t\t\tUsage: \"Resume system under the deployment\",\n\t\t\t\tArgsUsage: \" \",\n\t\t\t\tDescription: \"Resume Photon Controller after it has been paused.\\n\" +\n\t\t\t\t\t\" Requires system administrator access.\",\n\t\t\t\tAction: func(c *cli.Context) {\n\t\t\t\t\terr := resumeSystem(c)\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\tlog.Fatal(\"Error: \", err)\n\t\t\t\t\t}\n\t\t\t\t},\n\t\t\t},\n\t\t\t{\n\t\t\t\tName: \"set-security-groups\",\n\t\t\t\tUsage: \"Set security groups for a deployment\",\n\t\t\t\tArgsUsage: \"<security-groups>\",\n\t\t\t\tDescription: \"Provide the list of Lightwave groups that contain the people who are\\n\" +\n\t\t\t\t\t\" allowed to be system administrators. Be careful: providing the wrong group could remove\\n\" +\n\t\t\t\t\t\" your access.\" +\n\t\t\t\t\t\" Requires system administrator access.\",\n\t\t\t\tAction: func(c *cli.Context) {\n\t\t\t\t\terr := setDeploymentSecurityGroups(c)\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\tlog.Fatal(\"Error: \", err)\n\t\t\t\t\t}\n\t\t\t\t},\n\t\t\t},\n\t\t\t{\n\t\t\t\tName: \"migration\",\n\t\t\t\tUsage: \"migrates state and hosts between photon controller deployments\",\n\t\t\t\tSubcommands: []cli.Command{\n\t\t\t\t\t{\n\t\t\t\t\t\tName: \"prepare\",\n\t\t\t\t\t\tUsage: \"initializes the migration\",\n\t\t\t\t\t\tFlags: []cli.Flag{\n\t\t\t\t\t\t\tcli.StringFlag{\n\t\t\t\t\t\t\t\tName: \"endpoint, e\",\n\t\t\t\t\t\t\t\tUsage: \"API endpoint of the old management plane\",\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tAction: func(c *cli.Context) {\n\t\t\t\t\t\t\terr := deploymentMigrationPrepare(c)\n\t\t\t\t\t\t\tif err != nil {\n\t\t\t\t\t\t\t\tlog.Fatal(\"Error: \", err)\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tName: \"finalize\",\n\t\t\t\t\t\tUsage: \"finalizes the migration\",\n\t\t\t\t\t\tFlags: []cli.Flag{\n\t\t\t\t\t\t\tcli.StringFlag{\n\t\t\t\t\t\t\t\tName: \"endpoint, e\",\n\t\t\t\t\t\t\t\tUsage: \"API endpoint of the old management plane\",\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tAction: func(c *cli.Context) {\n\t\t\t\t\t\t\terr := deploymentMigrationFinalize(c)\n\t\t\t\t\t\t\tif err != nil {\n\t\t\t\t\t\t\t\tlog.Fatal(\"Error: \", err)\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tName: \"status\",\n\t\t\t\t\t\tUsage: \"shows the status of the current migration\",\n\t\t\t\t\t\tAction: func(c *cli.Context) {\n\t\t\t\t\t\t\terr := showMigrationStatus(c)\n\t\t\t\t\t\t\tif err != nil {\n\t\t\t\t\t\t\t\tlog.Fatal(\"Error: \", err)\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n\treturn command\n}", "func CreateDeploymentLog(ctx context.Context, db sqlx.Queryer, dl *DeploymentLog) error {\n\tdl.CreatedAt = time.Now()\n\n\terr := sqlx.Get(db, &dl.ID, `\n\t\tinsert into deployment_log (\n\t\t\tcreated_at,\n\t\t\tdeployment_id,\n\t\t\tdev_eui,\n\t\t\tf_port,\n\t\t\tcommand,\n\t\t\tfields\n\t\t) values (\n\t\t\t$1, $2, $3, $4, $5, $6)\n\t\treturning\n\t\t\tid`,\n\t\tdl.CreatedAt,\n\t\tdl.DeploymentID,\n\t\tdl.DevEUI,\n\t\tdl.FPort,\n\t\tdl.Command,\n\t\tdl.Fields,\n\t)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"sql create error: %w\", err)\n\t}\n\n\tlog.WithFields(log.Fields{\n\t\t\"deployment_id\": dl.DeploymentID,\n\t\t\"dev_eui\": dl.DevEUI,\n\t\t\"command\": dl.Command,\n\t}).Info(\"storage: deployment log created\")\n\n\treturn nil\n}", "func SimpleDeployment(image, tag string) *apiv1b2.Deployment {\n\tname := deploymentName\n\timgtag := fmt.Sprintf(\"%s:%s\", image, tag)\n\n\treturn &apiv1b2.Deployment{\n\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\tName: name,\n\t\t\tLabels: map[string]string{\"app\": name, \"tier\": \"api\"},\n\t\t},\n\t\tSpec: apiv1b2.DeploymentSpec{\n\t\t\tSelector: &metav1.LabelSelector{\n\t\t\t\tMatchLabels: map[string]string{\"app\": name, \"tier\": \"api\"},\n\t\t\t},\n\t\t\tTemplate: v1.PodTemplateSpec{\n\t\t\t\tObjectMeta: metav1.ObjectMeta{Name: name,\n\t\t\t\t\tLabels: map[string]string{\"app\": name, \"tier\": \"api\"},\n\t\t\t\t},\n\t\t\t\tSpec: v1.PodSpec{\n\t\t\t\t\tContainers: []v1.Container{\n\t\t\t\t\t\tv1.Container{\n\t\t\t\t\t\t\tName: name,\n\t\t\t\t\t\t\tImage: imgtag,\n\t\t\t\t\t\t\tImagePullPolicy: \"Always\",\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n}", "func New(appName string, pdcfg pdconfig.PDConfig, cfg *pdconfig.AppConfig) (*Deployment, error) {\n\n\td := new(Deployment)\n\td.cfg = *cfg\n\n\t// Capture the supplied arguments.\n\td.appName = appName\n\n\t// All string arguments are mandatory.\n\tif appName == \"\" {\n\t\treturn nil, errors.New(\"Deployment initialization error: Appname is mandatory\")\n\t}\n\tif d.cfg.BaseDir == \"\" {\n\t\treturn nil, errors.New(\"Deployment initialization error: BaseDir is mandatory\")\n\t}\n\n\t// Validate the artifact type.\n\tif ac, err := pdcfg.GetArtifactConfig(d.cfg.ArtifactType); err == nil {\n\t\td.acfg = *ac\n\t} else {\n\t\treturn nil, fmt.Errorf(\"Deployment initialization error: invalid ArtifactType %q\", d.cfg.ArtifactType)\n\t}\n\n\t// Derive the UID/GID from the username/groupname.\n\t// NOTE: Go doesn't yet support looking up a GID from a name, so\n\t// we use the gid from the user.\n\tif d.cfg.User != \"\" {\n\t\tif user, err := user.Lookup(d.cfg.User); err == nil {\n\t\t\tif i, err := strconv.ParseInt(user.Uid, 10, 64); err == nil {\n\t\t\t\td.uid = int(i)\n\t\t\t}\n\t\t\tif i, err := strconv.ParseInt(user.Gid, 10, 64); err == nil {\n\t\t\t\td.gid = int(i)\n\t\t\t}\n\t\t}\n\t}\n\n\t// The parent directory must not be \"/\".\n\tparentDir := absPath(d.cfg.BaseDir)\n\tif parentDir == \"/\" {\n\t\treturn nil, errors.New(\"Deployment initialization error: \\\"/\\\" not permitted as BaseDir\")\n\t}\n\n\t// The parent directory must exist.\n\tif _, err := os.Stat(parentDir); err != nil {\n\t\treturn nil, fmt.Errorf(\"Deployment initialization error: unable to stat BaseDir: %s\", err.Error())\n\t}\n\n\t// If the base dir doesn't exist, create it.\n\td.baseDir = path.Join(parentDir, appName)\n\tif _, err := os.Stat(d.baseDir); err != nil {\n\t\tif err := makeDir(d.baseDir, d.uid, d.gid, 0755); err != nil {\n\t\t\treturn nil, fmt.Errorf(\"Deployment initialization error: %s\", err.Error())\n\t\t}\n\t}\n\n\t// If the artifact dir doesn't exist, create it.\n\td.artifactDir = path.Join(d.baseDir, kARTIFACTDIR)\n\tif _, err := os.Stat(d.artifactDir); err != nil {\n\t\tif err := makeDir(d.artifactDir, d.uid, d.gid, 0755); err != nil {\n\t\t\treturn nil, fmt.Errorf(\"Deployment initialization error: %s\", err.Error())\n\t\t}\n\t}\n\n\t// If the release dir doesn't exist, create it.\n\td.releaseDir = path.Join(d.baseDir, kRELEASEDIR)\n\tif _, err := os.Stat(d.releaseDir); err != nil {\n\t\tif err := makeDir(d.releaseDir, d.uid, d.gid, 0755); err != nil {\n\t\t\treturn nil, fmt.Errorf(\"Deployment initialization error: %s\", err.Error())\n\t\t}\n\t}\n\n\treturn d, nil\n}", "func CreateDeployment(client kubernetes.Interface, deploymentName string, namespace string) (*v1beta1.Deployment, error) {\n\tlogrus.Infof(\"Creating Deployment\")\n\tdeploymentClient := client.ExtensionsV1beta1().Deployments(namespace)\n\tdeployment, err := deploymentClient.Create(GetDeployment(namespace, deploymentName))\n\ttime.Sleep(10 * time.Second)\n\treturn deployment, err\n}", "func (t *Transformer) CreateDeployments(s *object.Service) ([]runtime.Object, error) {\n\tresult := []runtime.Object{}\n\tserviceLabels := map[string]string(s.Labels)\n\n\td := &ext_v1beta1.Deployment{\n\t\tObjectMeta: api_v1.ObjectMeta{\n\t\t\tName: s.Name,\n\t\t\tLabels: *util.MergeMaps(\n\t\t\t\t// The map containing `\"service\": s.Name` should always be\n\t\t\t\t// passed later to avoid being overridden by util.MergeMaps()\n\t\t\t\t&serviceLabels,\n\t\t\t\t&map[string]string{\n\t\t\t\t\t\"service\": s.Name,\n\t\t\t\t},\n\t\t\t),\n\t\t},\n\t\tSpec: ext_v1beta1.DeploymentSpec{\n\t\t\tStrategy: ext_v1beta1.DeploymentStrategy{\n\t\t\t\t// TODO: make it configurable\n\t\t\t\tType: ext_v1beta1.RollingUpdateDeploymentStrategyType,\n\t\t\t\t// TODO: make it configurable\n\t\t\t\tRollingUpdate: nil,\n\t\t\t},\n\t\t\tTemplate: api_v1.PodTemplateSpec{\n\t\t\t\tObjectMeta: api_v1.ObjectMeta{\n\t\t\t\t\tLabels: *util.MergeMaps(\n\t\t\t\t\t\t// The map containing `\"service\": s.Name` should always be\n\t\t\t\t\t\t// passed later to avoid being overridden by util.MergeMaps()\n\t\t\t\t\t\t&serviceLabels,\n\t\t\t\t\t\t&map[string]string{\n\t\t\t\t\t\t\t\"service\": s.Name,\n\t\t\t\t\t\t},\n\t\t\t\t\t),\n\t\t\t\t},\n\t\t\t\tSpec: api_v1.PodSpec{},\n\t\t\t},\n\t\t},\n\t}\n\n\td.Spec.Replicas = s.Replicas\n\n\tfor _, c := range s.Containers {\n\t\tkc := api_v1.Container{\n\t\t\tName: c.Name,\n\t\t\tImage: c.Image,\n\t\t}\n\n\t\tfor _, e := range c.Environment {\n\t\t\tkc.Env = append(kc.Env, api_v1.EnvVar{\n\t\t\t\tName: e.Key,\n\t\t\t\tValue: e.Value,\n\t\t\t})\n\t\t}\n\n\t\tfor _, p := range c.Ports {\n\t\t\tkc.Ports = append(kc.Ports, api_v1.ContainerPort{\n\t\t\t\tName: c.Name,\n\t\t\t\tContainerPort: int32(p.Port.ContainerPort),\n\t\t\t})\n\t\t}\n\n\t\t// TODO: It is assumed that the check is done about the existence of volume in root level volume section\n\t\tfor _, mount := range c.Mounts {\n\t\t\tvolumeMount := api_v1.VolumeMount{\n\t\t\t\tName: mount.VolumeRef,\n\t\t\t\tReadOnly: mount.ReadOnly,\n\t\t\t\tMountPath: mount.MountPath,\n\t\t\t\tSubPath: mount.VolumeSubPath,\n\t\t\t}\n\n\t\t\tkc.VolumeMounts = append(kc.VolumeMounts, volumeMount)\n\n\t\t\t// if this mount does not exist in emptydir then this is coming from root level volumes directive\n\t\t\t// if tomorrow we add support for ConfigMaps or Secrets mounted as volumes the check should be done\n\t\t\t// here to see if it is not coming from configMaps or Secrets\n\t\t\tif !s.EmptyDirVolumeExists(mount.VolumeRef) {\n\t\t\t\tvolume := api_v1.Volume{\n\t\t\t\t\tName: mount.VolumeRef,\n\t\t\t\t\tVolumeSource: api_v1.VolumeSource{\n\t\t\t\t\t\tPersistentVolumeClaim: &api_v1.PersistentVolumeClaimVolumeSource{\n\t\t\t\t\t\t\tClaimName: mount.VolumeRef,\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t}\n\t\t\t\td.Spec.Template.Spec.Volumes = append(d.Spec.Template.Spec.Volumes, volume)\n\t\t\t}\n\t\t}\n\n\t\td.Spec.Template.Spec.Containers = append(d.Spec.Template.Spec.Containers, kc)\n\t}\n\n\t// make entry of emptydir in deployment volume directive\n\tfor _, emptyDir := range s.EmptyDirVolumes {\n\t\tvolume := api_v1.Volume{\n\t\t\tName: emptyDir.Name,\n\t\t\tVolumeSource: api_v1.VolumeSource{\n\t\t\t\tEmptyDir: &api_v1.EmptyDirVolumeSource{},\n\t\t\t},\n\t\t}\n\t\td.Spec.Template.Spec.Volumes = append(d.Spec.Template.Spec.Volumes, volume)\n\t}\n\n\tresult = append(result, d)\n\n\treturn result, nil\n}", "func templateForBlackBoxExporterDeployment(blackBoxImage string) appsv1.Deployment {\n\tlabels := blackbox.GenerateBlackBoxLables()\n\tlabelSelectors := metav1.LabelSelector{\n\t\tMatchLabels: labels}\n\t// hardcode the replicasize for no\n\t//replicas := m.Spec.Size\n\tvar replicas int32 = 1\n\n\tdep := appsv1.Deployment{\n\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\tName: blackbox.BlackBoxName,\n\t\t\tNamespace: blackbox.BlackBoxNamespace,\n\t\t\tLabels: labels,\n\t\t},\n\t\tSpec: appsv1.DeploymentSpec{\n\t\t\tReplicas: &replicas,\n\t\t\tSelector: &labelSelectors,\n\t\t\tTemplate: corev1.PodTemplateSpec{\n\t\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\t\tLabels: labels,\n\t\t\t\t},\n\t\t\t\tSpec: corev1.PodSpec{\n\t\t\t\t\tContainers: []corev1.Container{{\n\t\t\t\t\t\tImage: blackBoxImage,\n\t\t\t\t\t\tName: \"blackbox-exporter\",\n\t\t\t\t\t\tPorts: []corev1.ContainerPort{{\n\t\t\t\t\t\t\tContainerPort: blackbox.BlackBoxPortNumber,\n\t\t\t\t\t\t\tName: blackbox.BlackBoxPortName,\n\t\t\t\t\t\t}},\n\t\t\t\t\t}},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n\treturn dep\n}", "func (a *HyperflexApiService) CreateHyperflexClusterBackupPolicyDeploymentExecute(r ApiCreateHyperflexClusterBackupPolicyDeploymentRequest) (*HyperflexClusterBackupPolicyDeployment, *http.Response, error) {\n\tvar (\n\t\tlocalVarHTTPMethod = http.MethodPost\n\t\tlocalVarPostBody interface{}\n\t\tformFiles []formFile\n\t\tlocalVarReturnValue *HyperflexClusterBackupPolicyDeployment\n\t)\n\n\tlocalBasePath, err := a.client.cfg.ServerURLWithContext(r.ctx, \"HyperflexApiService.CreateHyperflexClusterBackupPolicyDeployment\")\n\tif err != nil {\n\t\treturn localVarReturnValue, nil, &GenericOpenAPIError{error: err.Error()}\n\t}\n\n\tlocalVarPath := localBasePath + \"/api/v1/hyperflex/ClusterBackupPolicyDeployments\"\n\n\tlocalVarHeaderParams := make(map[string]string)\n\tlocalVarQueryParams := url.Values{}\n\tlocalVarFormParams := url.Values{}\n\tif r.hyperflexClusterBackupPolicyDeployment == nil {\n\t\treturn localVarReturnValue, nil, reportError(\"hyperflexClusterBackupPolicyDeployment is required and must be specified\")\n\t}\n\n\t// to determine the Content-Type header\n\tlocalVarHTTPContentTypes := []string{\"application/json\"}\n\n\t// set Content-Type header\n\tlocalVarHTTPContentType := selectHeaderContentType(localVarHTTPContentTypes)\n\tif localVarHTTPContentType != \"\" {\n\t\tlocalVarHeaderParams[\"Content-Type\"] = localVarHTTPContentType\n\t}\n\n\t// to determine the Accept header\n\tlocalVarHTTPHeaderAccepts := []string{\"application/json\"}\n\n\t// set Accept header\n\tlocalVarHTTPHeaderAccept := selectHeaderAccept(localVarHTTPHeaderAccepts)\n\tif localVarHTTPHeaderAccept != \"\" {\n\t\tlocalVarHeaderParams[\"Accept\"] = localVarHTTPHeaderAccept\n\t}\n\tif r.ifMatch != nil {\n\t\tlocalVarHeaderParams[\"If-Match\"] = parameterToString(*r.ifMatch, \"\")\n\t}\n\tif r.ifNoneMatch != nil {\n\t\tlocalVarHeaderParams[\"If-None-Match\"] = parameterToString(*r.ifNoneMatch, \"\")\n\t}\n\t// body params\n\tlocalVarPostBody = r.hyperflexClusterBackupPolicyDeployment\n\treq, err := a.client.prepareRequest(r.ctx, localVarPath, localVarHTTPMethod, localVarPostBody, localVarHeaderParams, localVarQueryParams, localVarFormParams, formFiles)\n\tif err != nil {\n\t\treturn localVarReturnValue, nil, err\n\t}\n\n\tlocalVarHTTPResponse, err := a.client.callAPI(req)\n\tif err != nil || localVarHTTPResponse == nil {\n\t\treturn localVarReturnValue, localVarHTTPResponse, err\n\t}\n\n\tlocalVarBody, err := ioutil.ReadAll(localVarHTTPResponse.Body)\n\tlocalVarHTTPResponse.Body.Close()\n\tlocalVarHTTPResponse.Body = ioutil.NopCloser(bytes.NewBuffer(localVarBody))\n\tif err != nil {\n\t\treturn localVarReturnValue, localVarHTTPResponse, err\n\t}\n\n\tif localVarHTTPResponse.StatusCode >= 300 {\n\t\tnewErr := &GenericOpenAPIError{\n\t\t\tbody: localVarBody,\n\t\t\terror: localVarHTTPResponse.Status,\n\t\t}\n\t\tif localVarHTTPResponse.StatusCode == 400 {\n\t\t\tvar v Error\n\t\t\terr = a.client.decode(&v, localVarBody, localVarHTTPResponse.Header.Get(\"Content-Type\"))\n\t\t\tif err != nil {\n\t\t\t\tnewErr.error = err.Error()\n\t\t\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t\t\t}\n\t\t\tnewErr.model = v\n\t\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t\t}\n\t\tif localVarHTTPResponse.StatusCode == 401 {\n\t\t\tvar v Error\n\t\t\terr = a.client.decode(&v, localVarBody, localVarHTTPResponse.Header.Get(\"Content-Type\"))\n\t\t\tif err != nil {\n\t\t\t\tnewErr.error = err.Error()\n\t\t\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t\t\t}\n\t\t\tnewErr.model = v\n\t\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t\t}\n\t\tif localVarHTTPResponse.StatusCode == 403 {\n\t\t\tvar v Error\n\t\t\terr = a.client.decode(&v, localVarBody, localVarHTTPResponse.Header.Get(\"Content-Type\"))\n\t\t\tif err != nil {\n\t\t\t\tnewErr.error = err.Error()\n\t\t\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t\t\t}\n\t\t\tnewErr.model = v\n\t\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t\t}\n\t\tif localVarHTTPResponse.StatusCode == 404 {\n\t\t\tvar v Error\n\t\t\terr = a.client.decode(&v, localVarBody, localVarHTTPResponse.Header.Get(\"Content-Type\"))\n\t\t\tif err != nil {\n\t\t\t\tnewErr.error = err.Error()\n\t\t\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t\t\t}\n\t\t\tnewErr.model = v\n\t\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t\t}\n\t\tvar v Error\n\t\terr = a.client.decode(&v, localVarBody, localVarHTTPResponse.Header.Get(\"Content-Type\"))\n\t\tif err != nil {\n\t\t\tnewErr.error = err.Error()\n\t\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t\t}\n\t\tnewErr.model = v\n\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t}\n\n\terr = a.client.decode(&localVarReturnValue, localVarBody, localVarHTTPResponse.Header.Get(\"Content-Type\"))\n\tif err != nil {\n\t\tnewErr := &GenericOpenAPIError{\n\t\t\tbody: localVarBody,\n\t\t\terror: err.Error(),\n\t\t}\n\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t}\n\n\treturn localVarReturnValue, localVarHTTPResponse, nil\n}", "func (d *Deployer) Deploy(obj *unstructured.Unstructured) error {\n\tfound := &unstructured.Unstructured{}\n\tfound.SetGroupVersionKind(obj.GroupVersionKind())\n\terr := d.client.Get(context.TODO(), types.NamespacedName{Name: obj.GetName(), Namespace: obj.GetNamespace()}, found)\n\tif err != nil {\n\t\tif errors.IsNotFound(err) {\n\t\t\tlog.Info(\"Create\", \"Kind:\", obj.GroupVersionKind(), \"Name:\", obj.GetName())\n\t\t\treturn d.client.Create(context.TODO(), obj)\n\t\t}\n\t\treturn err\n\t}\n\n\t// if resource has annotation skip-creation-if-exist: true, don't update it to keep customized changes from users\n\tmetadata, ok := obj.Object[\"metadata\"].(map[string]interface{})\n\tif ok {\n\t\tannotations, ok := metadata[\"annotations\"].(map[string]interface{})\n\t\tif ok && annotations != nil && annotations[config.AnnotationSkipCreation] != nil {\n\t\t\tif strings.ToLower(annotations[config.AnnotationSkipCreation].(string)) == \"true\" {\n\t\t\t\tlog.Info(\"Skip creation\", \"Kind:\", obj.GroupVersionKind(), \"Name:\", obj.GetName())\n\t\t\t\treturn nil\n\t\t\t}\n\t\t}\n\t}\n\n\tdeployerFn, ok := d.deployerFns[found.GetKind()]\n\tif ok {\n\t\treturn deployerFn(obj, found)\n\t}\n\treturn nil\n}", "func NewDeploymentsCommand() *cobra.Command {\n\n\tcmd := createListCommand(cmdListDeployments, \"deployments\", \"\")\n\tcmd.Flags().StringVar(&deploymentParams.Name, \"name\", \"\", \"Filter deployments by name\")\n\tcmd.Flags().StringVar(&deploymentParams.StackVersionID, \"stack-version-id\", \"\", \"Filter deployments by stack_version_id\")\n\tcmd.Flags().StringVar(&deploymentParams.ProjectID, \"project-id\", \"\", \"Filter deployments by project_id\")\n\t//cmd.Flags().StringVar(&deploymentParams.ID, \"workload-id\", \"\", \"Filter deployments by workload_id\")\n\tcmd.Flags().StringVar(&deploymentParams.Workload.WorkloadType, \"workload-type\", \"\", \"Filter deployments by workload_type\")\n\n\t// Get\n\tgetCmd := createGetCommand(cmdGetDeployments, \"deployment\", \"\")\n\tcmd.AddCommand(getCmd)\n\n\t// Create\n\tcreateCmd := NewDeploymentsCreateCommand()\n\tcmd.AddCommand(createCmd)\n\n\t// Update\n\tupdateCmd := createUpdateCommand(cmdUpdateDeployments, \"deployment\", \"\")\n\tcmd.AddCommand(updateCmd)\n\n\t// Delete\n\tdeleteCmd := createDeleteCommand(cmdDeleteDeployments, \"deployment\", \"\")\n\tcmd.AddCommand(deleteCmd)\n\n\t// Pods\n\t//podsCmd := NewDeploymentsPodsCommand()\n\t//cmd.AddCommand(podsCmd)\n\n\t// Override\n\toverrideCmd := NewDeploymentsOverridesCommand()\n\tcmd.AddCommand(overrideCmd)\n\n\t// Scale Component\n\tscaleComponentCmd := NewDeploymentsScaleCommand()\n\tcmd.AddCommand(scaleComponentCmd)\n\n\treturn cmd\n}", "func (r *ReconcileGrafana) createDeployment(cr *i8ly.Grafana, resourceName string) error {\n\tresourceHelper := newResourceHelper(cr)\n\tresource, err := resourceHelper.createResource(resourceName)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\trawResource := newUnstructuredResourceMap(resource.(*unstructured.Unstructured))\n\tvar extraVolumeMounts []interface{}\n\n\t// Extra secrets to be added as volumes?\n\tif len(cr.Spec.Secrets) > 0 {\n\t\tvolumes := rawResource.access(\"spec\").access(\"template\").access(\"spec\").get(\"volumes\").([]interface{})\n\n\t\tfor _, secret := range cr.Spec.Secrets {\n\t\t\tvolumeName := fmt.Sprintf(\"secret-%s\", secret)\n\t\t\tlog.Info(fmt.Sprintf(\"adding volume for secret '%s' as '%s'\", secret, volumeName))\n\t\t\tvolumes = append(volumes, core.Volume{\n\t\t\t\tName: volumeName,\n\t\t\t\tVolumeSource: core.VolumeSource{\n\t\t\t\t\tSecret: &core.SecretVolumeSource{\n\t\t\t\t\t\tSecretName: secret,\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t})\n\t\t\textraVolumeMounts = append(extraVolumeMounts, map[string]interface{}{\n\t\t\t\t\"name\": volumeName,\n\t\t\t\t\"readOnly\": true,\n\t\t\t\t\"mountPath\": common.SecretsMountDir + secret,\n\t\t\t})\n\t\t}\n\n\t\trawResource.access(\"spec\").access(\"template\").access(\"spec\").set(\"volumes\", volumes)\n\t}\n\n\t// Extra config maps to be added as volumes?\n\tif len(cr.Spec.ConfigMaps) > 0 {\n\t\tvolumes := rawResource.access(\"spec\").access(\"template\").access(\"spec\").get(\"volumes\").([]interface{})\n\n\t\tfor _, configmap := range cr.Spec.ConfigMaps {\n\t\t\tvolumeName := fmt.Sprintf(\"configmap-%s\", configmap)\n\t\t\tlog.Info(fmt.Sprintf(\"adding volume for configmap '%s' as '%s'\", configmap, volumeName))\n\t\t\tvolumes = append(volumes, core.Volume{\n\t\t\t\tName: volumeName,\n\t\t\t\tVolumeSource: core.VolumeSource{\n\t\t\t\t\tConfigMap: &core.ConfigMapVolumeSource{\n\t\t\t\t\t\tLocalObjectReference: core.LocalObjectReference{\n\t\t\t\t\t\t\tName: configmap,\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t})\n\t\t\textraVolumeMounts = append(extraVolumeMounts, map[string]interface{}{\n\t\t\t\t\"name\": volumeName,\n\t\t\t\t\"readOnly\": true,\n\t\t\t\t\"mountPath\": common.ConfigMapsMountDir + configmap,\n\t\t\t})\n\t\t}\n\n\t\trawResource.access(\"spec\").access(\"template\").access(\"spec\").set(\"volumes\", volumes)\n\t}\n\n\t// Extra containers to add to the deployment?\n\tif len(cr.Spec.Containers) > 0 {\n\t\t// Otherwise append extra containers before submitting the resource\n\t\tcontainers := rawResource.access(\"spec\").access(\"template\").access(\"spec\").get(\"containers\").([]interface{})\n\n\t\tfor _, container := range cr.Spec.Containers {\n\t\t\tcontainers = append(containers, container)\n\t\t\tlog.Info(fmt.Sprintf(\"adding extra container '%v' to '%v'\", container.Name, common.GrafanaDeploymentName))\n\t\t}\n\n\t\trawResource.access(\"spec\").access(\"template\").access(\"spec\").set(\"containers\", containers)\n\t}\n\n\t// Append extra volume mounts to all containers\n\tif len(extraVolumeMounts) > 0 {\n\t\tcontainers := rawResource.access(\"spec\").access(\"template\").access(\"spec\").get(\"containers\").([]interface{})\n\n\t\tfor _, container := range containers {\n\t\t\tvolumeMounts := container.(map[string]interface{})[\"volumeMounts\"].([]interface{})\n\t\t\tvolumeMounts = append(volumeMounts, extraVolumeMounts...)\n\t\t\tcontainer.(map[string]interface{})[\"volumeMounts\"] = volumeMounts\n\t\t}\n\t}\n\n\treturn r.deployResource(cr, resource, resourceName)\n\n}", "func HandleDeploy(w http.ResponseWriter, r *http.Request) {\r\n\r\n\tif r.Method == \"POST\" {\r\n\r\n\t\tvar err error\r\n\r\n\t\turlPart := strings.Split(r.URL.Path, \"/\")\r\n\r\n\t\tname := urlPart[2]\r\n\r\n\t\tif name != \"\" {\r\n\r\n\t\t\t//basis is optionally passed as qs param\r\n\t\t\tbasis := r.URL.Query().Get(\"basis\")\r\n\r\n\t\t\tdefer r.Body.Close()\r\n\r\n\t\t\tbody, _ := ioutil.ReadAll(r.Body)\r\n\r\n\t\t\tbodyString := string(body)\r\n\r\n\t\t\tvm := otto.New()\r\n\r\n\t\t\t//check it compiles\r\n\t\t\tif script, err := vm.Compile(name, bodyString); err == nil {\r\n\r\n\t\t\t\tif hash, err := storage.Set(name, script, basis); err == nil {\r\n\t\t\t\t\tfmt.Printf(\"Deployed Script %s (%s)\\n\", name, hash)\r\n\t\t\t\t\tw.Write([]byte(hash))\r\n\t\t\t\t\treturn\r\n\t\t\t\t}\r\n\t\t\t}\r\n\r\n\t\t\tw.Write([]byte(err.Error()))\r\n\t\t\thttp.Error(w, http.StatusText(http.StatusInternalServerError), http.StatusInternalServerError)\r\n\r\n\t\t\treturn\r\n\t\t}\r\n\t}\r\n\r\n\thttp.Error(w, http.StatusText(http.StatusBadRequest), http.StatusBadRequest)\r\n\r\n}" ]
[ "0.7245263", "0.7115578", "0.69355154", "0.688241", "0.68323797", "0.67257977", "0.66148096", "0.64859754", "0.6411792", "0.6330559", "0.6227838", "0.6215856", "0.6212143", "0.6204911", "0.61451054", "0.6140682", "0.6136553", "0.61164033", "0.61146384", "0.61137414", "0.61049575", "0.61037695", "0.6099287", "0.6072263", "0.6044357", "0.60353625", "0.6032938", "0.60265654", "0.60167414", "0.6008494", "0.59936583", "0.5946707", "0.5930266", "0.5908729", "0.5900772", "0.58793056", "0.58537257", "0.5850564", "0.58379704", "0.57901937", "0.5771238", "0.5769143", "0.57665336", "0.57629025", "0.57547724", "0.57547367", "0.57244354", "0.57204545", "0.5718729", "0.57167274", "0.56953174", "0.5689168", "0.5688823", "0.5688178", "0.56852597", "0.5669781", "0.56690645", "0.5668316", "0.566764", "0.5644969", "0.5637591", "0.5624548", "0.5620389", "0.5587221", "0.55774355", "0.55766624", "0.5540646", "0.5539962", "0.5526645", "0.55263555", "0.5518552", "0.5517418", "0.55166805", "0.5512473", "0.5508479", "0.55066484", "0.5492462", "0.54796237", "0.54707956", "0.5455769", "0.5455065", "0.54539424", "0.545336", "0.5452047", "0.54515195", "0.54428905", "0.5438302", "0.543798", "0.5432821", "0.5425571", "0.5420892", "0.54012287", "0.53917545", "0.5390682", "0.538782", "0.5379507", "0.536944", "0.53685266", "0.5368197", "0.53662187" ]
0.7144714
1
ContainsFilterTag determines if a Deployment has a release matching filterTag
func (deployment *Deployment) ContainsFilterTag(filterTag string) bool { if filterTag == "" { return true } for _, release := range deployment.Releases { if release.Name == filterTag { return true } } return false }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func isReleaseTag(eventType string, payload api.WebhookGithub) bool {\n\tif api.GithubWebhookPush == eventType {\n\t\tif nil != payload[api.GithubWebhookFlagRef] &&\n\t\t\tstrings.Contains(payload[api.GithubWebhookFlagRef].(string),\n\t\t\t\tapi.GithubWebhookFlagTags) {\n\t\t\treturn true\n\t\t}\n\t}\n\n\treturn false\n}", "func (manifest Data) Contains(imageTag string) bool {\n\tcontains := false\n\tfor _, tag := range manifest.Tags {\n\t\tif imageTag == tag {\n\t\t\tcontains = true\n\t\t\tbreak\n\t\t}\n\t}\n\n\treturn contains\n}", "func TagNameContains(v string) predicate.GithubRelease {\n\treturn predicate.GithubRelease(sql.FieldContains(FieldTagName, v))\n}", "func ApplyFilters(secret v1.Secret, sub *appv1alpha1.Subscription) (v1.Secret, bool) {\n\tif klog.V(utils.QuiteLogLel) {\n\t\tfnName := utils.GetFnName()\n\t\tklog.Infof(\"Entering: %v()\", fnName)\n\n\t\tdefer klog.Infof(\"Exiting: %v()\", fnName)\n\t}\n\n\tsecret = CleanUpObject(secret)\n\n\tif sub.Spec.PackageFilter != nil {\n\t\tif sub.Spec.Package != \"\" && sub.Spec.Package != secret.GetName() {\n\t\t\tklog.Info(\"Name does not match, skiping:\", sub.Spec.Package, \"|\", secret.GetName())\n\t\t\treturn secret, false\n\t\t}\n\n\t\tsubAnno := sub.GetAnnotations()\n\t\tklog.V(10).Info(\"checking annotations filter:\", subAnno)\n\n\t\tif subAnno != nil {\n\t\t\tsecretsAnno := secret.GetAnnotations()\n\t\t\tfor k, v := range subAnno {\n\t\t\t\tif secretsAnno[k] != v {\n\t\t\t\t\tklog.Info(\"Annotation filter does not match:\", k, \"|\", v, \"|\", secretsAnno[k])\n\t\t\t\t\treturn secret, false\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\n\treturn secret, true\n}", "func matchTag(ctxt *build.Context, name string, allTags map[string]bool) bool {\n\tif allTags != nil {\n\t\tallTags[name] = true\n\t}\n\n\t// special tags\n\tif ctxt.CgoEnabled && name == \"cgo\" {\n\t\treturn true\n\t}\n\tif name == ctxt.GOOS || name == ctxt.GOARCH || name == ctxt.Compiler {\n\t\treturn true\n\t}\n\tif ctxt.GOOS == \"android\" && name == \"linux\" {\n\t\treturn true\n\t}\n\tif ctxt.GOOS == \"illumos\" && name == \"solaris\" {\n\t\treturn true\n\t}\n\tif ctxt.GOOS == \"ios\" && name == \"darwin\" {\n\t\treturn true\n\t}\n\tif name == \"unix\" && unixOS[ctxt.GOOS] {\n\t\treturn true\n\t}\n\tif name == \"boringcrypto\" {\n\t\tname = \"goexperiment.boringcrypto\" // boringcrypto is an old name for goexperiment.boringcrypto\n\t}\n\n\t// other tags\n\tfor _, tag := range ctxt.BuildTags {\n\t\tif tag == name {\n\t\t\treturn true\n\t\t}\n\t}\n\ttoolTags := extractToolTags(ctxt)\n\tfor _, tag := range toolTags {\n\t\tif tag == name {\n\t\t\treturn true\n\t\t}\n\t}\n\tfor _, tag := range ctxt.ReleaseTags {\n\t\tif tag == name {\n\t\t\treturn true\n\t\t}\n\t}\n\n\treturn false\n}", "func TagContains(t kappnavv1.Tag, substr string) bool {\n\treturn strings.Contains(string(t), substr)\n}", "func FilterDeployments(c *cli.Context) []Kind {\n\targs := c.Args()\n\tuid := c.String(\"uid\")\n\tlabel := c.String(\"label\")\n\tnamespace := c.String(\"namespace\")\n\n\tvar candidates []Kind\n\tvar found []Kind\n\n\t// check args which should contains pod names\n\tfor _, v := range GetDeployments() {\n\t\tif c.NArg() > 0 {\n\t\t\tfor _, a := range args {\n\t\t\t\tif utils.Match(v.Name, a, true) {\n\t\t\t\t\tcandidates = append(candidates, v)\n\t\t\t\t}\n\t\t\t}\n\t\t} else {\n\t\t\tcandidates = append(candidates, v)\n\t\t}\n\t}\n\n\tfor _, v := range candidates {\n\t\to := v.(*Deployment)\n\t\t// check uid\n\t\tif uid != \"\" && !utils.Match(o.UID, uid, true) {\n\t\t\tcontinue\n\t\t}\n\t\t// check namespace\n\t\tif namespace != \"\" && !utils.Match(o.Namespace, namespace, true) {\n\t\t\tcontinue\n\t\t}\n\n\t\t// check label\n\t\tif label != \"\" {\n\t\t\t// one or more labels may be provided\n\t\t\tlabelNotFound := false\n\t\t\tfor _, l := range strings.Split(label, \";\") {\n\t\t\t\tif !o.LabelFound(l) {\n\t\t\t\t\tlabelNotFound = true\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t}\n\t\t\tif labelNotFound {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t}\n\t\t// found it if it reachs this point\n\t\tfound = append(found, o)\n\t}\n\n\treturn found\n}", "func IsReleasedTagVersion(version string) bool {\n\treturn regexp.MustCompile(`^v\\d+\\.\\d+\\.\\d$`).MatchString(version)\n}", "func (m Mineral) HasTag(tag string) bool {\n\tfor _, t := range m.Tags {\n\t\tif t == tag {\n\t\t\treturn true\n\t\t}\n\t}\n\n\treturn false\n}", "func (s Space) HasTag(needle string) bool {\n\tisPrefix := strings.HasSuffix(needle, \"/\")\n\tfor i := range s.Tags {\n\t\tswitch isPrefix {\n\t\tcase true:\n\t\t\tif strings.HasPrefix(s.Tags[i], needle) {\n\t\t\t\treturn true\n\t\t\t}\n\t\tcase false:\n\t\t\tif s.Tags[i] == needle {\n\t\t\t\treturn true\n\t\t\t}\n\t\t}\n\t}\n\treturn false\n}", "func (r TaggedResource) FilterThroughTags(filterTags []Tag) bool {\n\tif len(filterTags) == 0 {\n\t\treturn true\n\t}\n\n\ttagMatches := 0\n\n\tfor _, resourceTag := range r.Tags {\n\t\tfor _, filterTag := range filterTags {\n\t\t\tif resourceTag.Key == filterTag.Key {\n\t\t\t\tr, _ := regexp.Compile(filterTag.Value)\n\t\t\t\tif r.MatchString(resourceTag.Value) {\n\t\t\t\t\ttagMatches++\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\n\treturn tagMatches == len(filterTags)\n}", "func (s *DockerKubeletService) HasFilter(filter containers.FilterType) bool {\n\treturn false\n}", "func (dc *DockerClient) ValidTag(desiredTag, repository string) (bool, error) {\n\thub, err := registry.New(dc.registryURL, dc.username, dc.password)\n\tif err != nil {\n\t\treturn false, err\n\t}\n\n\ttags, err := hub.Tags(repository)\n\tif err != nil {\n\t\treturn false, err\n\t}\n\n\tfor _, tag := range tags {\n\t\tif tag == desiredTag {\n\t\t\treturn true, nil\n\t\t}\n\t}\n\n\treturn false, nil\n}", "func IsDeployment() bool {\n\tenv := os.Getenv(\"ENV\")\n\treturn env == dev || env == prod\n}", "func (o *VulnerabilitiesRequest) HasReleasever() bool {\n\tif o != nil && o.Releasever != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}", "func doesAPIContainAnyMatchingTag(tags, apiTags []string) bool {\n\tfor _, apiTag := range apiTags {\n\t\tapiTag = strings.ToLower(apiTag)\n\t\tfor _, tag := range tags {\n\t\t\tif tag == apiTag {\n\t\t\t\treturn true\n\t\t\t}\n\t\t}\n\t}\n\treturn false\n}", "func matchesDeploymentSpec(matchConfig MatchConfig, deployment v1.DeploymentSpec) bool {\n\treturn MatchesPodSpec(matchConfig, deployment.Template.Spec)\n}", "func lookUpJobRelease(releases []*manifest.Release, jobRelease string) bool {\n\tfor _, release := range releases {\n\t\tif release.Name == jobRelease {\n\t\t\treturn true\n\t\t}\n\t}\n\n\treturn false\n}", "func isAutoCreateReleaseTagGitlab(payload api.WebhookGitlab) bool {\n\tif nil != payload[api.GitlabWebhookFlagRef] &&\n\t\tstrings.Contains(payload[api.GitlabWebhookFlagRef].(string), api.GitlabWebhookFlagTags) &&\n\t\tstrings.HasSuffix(payload[api.GitlabWebhookFlagRef].(string), api.AutoCreateTagFlag) {\n\t\treturn true\n\t}\n\treturn false\n}", "func (target *Target) FilterByTag(name string, value string) {\n\ttarget.Tags = append(target.Tags, Tag{Key: name, Value: value})\n}", "func TagsFilter(t map[string]string) Filter {\n\tj := tagsEncoder(t)\n\treturn Param(\"tags\", j)\n}", "func HasTag(tags []string, requiredTag string) bool {\n\tfor _, tag := range tags {\n\t\tif tag == requiredTag {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}", "func isAutoCreateReleaseTag(payload api.WebhookGithub) bool {\n\tif nil != payload[api.GithubWebhookFlagRef] &&\n\t\tstrings.Contains(payload[api.GithubWebhookFlagRef].(string), api.GithubWebhookFlagTags) &&\n\t\tstrings.HasSuffix(payload[api.GithubWebhookFlagRef].(string), api.AutoCreateTagFlag) {\n\t\treturn true\n\t}\n\treturn false\n}", "func IsRelease() bool {\n\tout, _ := strconv.ParseBool(__RELEASE__)\n\treturn out\n}", "func (m *ccMetric) HasTag(key string) bool {\n\t_, ok := m.tags[key]\n\treturn ok\n}", "func hasEC2Tag(queryTag ec2.Tag, allResourceTags []*ec2.Tag) bool {\n\tfor _, resourceTag := range allResourceTags {\n\t\tif queryTag.Key != nil && resourceTag.Key != nil && *queryTag.Key == *resourceTag.Key {\n\t\t\tif queryTag.Value == nil {\n\t\t\t\treturn true\n\t\t\t}\n\t\t\tif queryTag.Value != nil && resourceTag.Value != nil && *queryTag.Value == *resourceTag.Value {\n\t\t\t\treturn true\n\t\t\t}\n\t\t}\n\t}\n\treturn false\n}", "func (t TagSet) Contains(tag string) bool {\n\t_, ok := t[tag]\n\treturn ok\n}", "func hasTag(tags []*ec2.Tag, Key string, value string) bool {\n\tfor i := range tags {\n\t\tif *tags[i].Key == Key && *tags[i].Value == value {\n\t\t\tlog.Printf(\"\\t\\tTag %s already set with value %s\\n\",\n\t\t\t\t*tags[i].Key,\n\t\t\t\t*tags[i].Value)\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}", "func TagNameEQ(v string) predicate.GithubRelease {\n\treturn predicate.GithubRelease(sql.FieldEQ(FieldTagName, v))\n}", "func (profession Profession) HasTag(tag string) bool {\n\tfor _, t := range profession.Tags {\n\t\tif t == tag {\n\t\t\treturn true\n\t\t}\n\t}\n\n\treturn false\n}", "func originVersionTagExists(rackSpec *rackspec.RackSpec) error {\n\trepo, err := git.Init(memory.NewStorage(), nil)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tdomain := gitutil.CutURLToDomain(rackSpec.Source.GIT)\n\n\tauth := rackconfig.GetGITAuth(domain)\n\n\tfilledURL := strings.Replace(rackSpec.Source.GIT, \"https://\", \"https://\"+auth.Username+\":\"+auth.Password+\"@\", -1)\n\n\tremote, err := repo.CreateRemote(&config.RemoteConfig{\n\t\tName: \"rackjobber\",\n\t\tURLs: []string{filledURL},\n\t})\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treferences, err := remote.List(&git.ListOptions{})\n\tif err != nil {\n\t\treturn err\n\t}\n\n\thasTag := false\n\n\tfor _, ref := range references {\n\t\tif strings.Contains(ref.String(), rackSpec.Version) {\n\t\t\thasTag = true\n\t\t}\n\t}\n\n\tif !hasTag {\n\t\terr := \"the version specified in the rackspec.yml does not correspond to any version-tag on the repository\"\n\t\treturn errors.New(err)\n\t}\n\n\treturn nil\n}", "func tagExists(tag reflect.StructTag, tagName string) bool {\r\n\t_, ok := tag.Lookup(tagName)\r\n\treturn ok\r\n}", "func (l Info) HasTag(tag string) bool {\n\tfor _, t := range l.Tags {\n\t\tif tag == t {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}", "func IsRelease() bool {\n\treturn Release == \"TRUE\"\n}", "func MatchesDeployment(matchConfig MatchConfig, deployment v1.Deployment) bool {\n\tif !matchesDeploymentSpec(matchConfig, deployment.Spec) {\n\t\treturn false\n\t}\n\tmeta := deployment.GetObjectMeta()\n\treturn MatchesAnnotation(matchConfig, meta.GetAnnotations())\n}", "func IsTag(ref string) bool {\n\treturn strings.HasPrefix(ref, \"refs/tags/\")\n}", "func isTagInImageStream(is imagev1.ImageStream, imageTag string) bool {\n\t// Loop through the tags in the imagestream's status attribute\n\tfor _, tag := range is.Status.Tags {\n\t\t// look for a matching tag\n\t\tif tag.Tag == imageTag {\n\t\t\t// Return true if found\n\t\t\treturn true\n\t\t}\n\t}\n\t// Return false if not found.\n\treturn false\n}", "func (c *ContainerRepository) IsTag(column string) bool {\n\tfor _, tag := range EntityInfluxPlanning.ContainerTags {\n\t\tif column == string(tag) {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}", "func hasImage(filter string, containers []map[string]interface{}) (bool, error) {\n\tmatcher, err := regexp.Compile(filter)\n\tif err != nil {\n\t\treturn false, err\n\t}\n\n\tfor _, x := range containers {\n\t\tif im, found := x[\"image\"]; found {\n\t\t\tif matcher.MatchString(fmt.Sprintf(\"%v\", im)) {\n\t\t\t\treturn true, nil\n\t\t\t}\n\t\t}\n\t}\n\n\treturn false, nil\n}", "func (_TokenVesting *TokenVestingFilterer) FilterReleased(opts *bind.FilterOpts, token []common.Address) (*TokenVestingReleasedIterator, error) {\n\n\tvar tokenRule []interface{}\n\tfor _, tokenItem := range token {\n\t\ttokenRule = append(tokenRule, tokenItem)\n\t}\n\n\tlogs, sub, err := _TokenVesting.contract.FilterLogs(opts, \"Released\", tokenRule)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &TokenVestingReleasedIterator{contract: _TokenVesting.contract, event: \"Released\", logs: logs, sub: sub}, nil\n}", "func (v Value) HasTag(needle string) bool {\n\tisPrefix := strings.HasSuffix(needle, \"/\")\n\tfor i := range v.Tags {\n\t\tswitch isPrefix {\n\t\tcase true:\n\t\t\tif strings.HasPrefix(v.Tags[i], needle) {\n\t\t\t\treturn true\n\t\t\t}\n\t\tcase false:\n\t\t\tif v.Tags[i] == needle {\n\t\t\t\treturn true\n\t\t\t}\n\t\t}\n\t}\n\treturn false\n}", "func isReleaseVersion(version string) bool {\n\tparts := strings.Split(version, \"-\")\n\n\tif len(parts) > 2 || strings.Contains(version, \"dirty\") || strings.Contains(version, \"-g\") {\n\t\treturn false\n\t}\n\n\treturn true\n}", "func TagNameIn(vs ...string) predicate.GithubRelease {\n\treturn predicate.GithubRelease(sql.FieldIn(FieldTagName, vs...))\n}", "func isImageDeployedToK8s(iTarget model.ImageTarget, kTarget model.K8sTarget) bool {\n\tid := iTarget.ID()\n\tfor _, depID := range kTarget.DependencyIDs() {\n\t\tif depID == id {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}", "func TagName(v string) predicate.GithubRelease {\n\treturn predicate.GithubRelease(sql.FieldEQ(FieldTagName, v))\n}", "func (bp *BasePayload) ApplyFilter(f *Filter) bool {\n\treturn f.Regexp.Match(bp.GetPayload())\n}", "func isImageDeployedToDC(iTarget model.ImageTarget, dcTarget model.DockerComposeTarget) bool {\n\tid := iTarget.ID()\n\tfor _, depID := range dcTarget.DependencyIDs() {\n\t\tif depID == id {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}", "func (e *PipelineExpr) HasFilter() bool {\n\tfor _, p := range e.MultiStages {\n\t\tswitch p.(type) {\n\t\tcase *LineFilterExpr, *LabelFilterExpr:\n\t\t\treturn true\n\t\tdefault:\n\t\t\tcontinue\n\t\t}\n\t}\n\treturn false\n}", "func LambdaDeploymentGroup_IsResource(construct awscdk.IConstruct) *bool {\n\t_init_.Initialize()\n\n\tvar returns *bool\n\n\t_jsii_.StaticInvoke(\n\t\t\"monocdk.aws_codedeploy.LambdaDeploymentGroup\",\n\t\t\"isResource\",\n\t\t[]interface{}{construct},\n\t\t&returns,\n\t)\n\n\treturn returns\n}", "func containsTags(a map[string]string, b []*elb.Tag) bool {\n\tfor k, v := range a {\n\t\tt := elbTag(k, v)\n\t\tif !containsTag(t, b) {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}", "func (ef *Filter) Contains(key string) bool {\n\t_, ok := ef.filter[key]\n\treturn ok\n}", "func (r ReferenceName) IsTag() bool {\n\treturn strings.HasPrefix(string(r), refTagPrefix)\n}", "func (v *View) TagFilter() map[string]string {\n\tfilter := map[string]string{}\n\tfor _, t := range v.tags {\n\t\tp := strings.Split(t, \"=\")\n\t\tif len(p) == 2 {\n\t\t\tfilter[p[0]] = p[1]\n\t\t} else {\n\t\t\tfilter[p[0]] = \"\"\n\t\t}\n\t}\n\treturn filter\n}", "func (a Article) HasTag(aTag string) bool {\n\tfor _, tag := range a.Tags {\n\t\tif aTag == tag.Name {\n\t\t\treturn true\n\t\t}\n\t}\n\n\treturn false\n}", "func (filter *Filter) Contains(other *Filter) bool {\n\tif other == nil {\n\t\treturn true\n\t}\n\taddresses := make(map[common.Address]interface{})\n\tfor _, addr := range filter.Addresses {\n\t\taddresses[addr] = struct{}{}\n\t}\n\tevents := make(map[common.Hash]interface{})\n\tfor _, ev := range filter.EventSigs {\n\t\tevents[ev] = struct{}{}\n\t}\n\n\tfor _, addr := range other.Addresses {\n\t\tif _, ok := addresses[addr]; !ok {\n\t\t\treturn false\n\t\t}\n\t}\n\tfor _, ev := range other.EventSigs {\n\t\tif _, ok := events[ev]; !ok {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}", "func (o BucketLifecycleConfigurationV2RuleFilterOutput) Tag() BucketLifecycleConfigurationV2RuleFilterTagPtrOutput {\n\treturn o.ApplyT(func(v BucketLifecycleConfigurationV2RuleFilter) *BucketLifecycleConfigurationV2RuleFilterTag {\n\t\treturn v.Tag\n\t}).(BucketLifecycleConfigurationV2RuleFilterTagPtrOutput)\n}", "func (c *Cases) Version(v string) bool {\n\tfor _, r := range build.Default.ReleaseTags {\n\t\tif v == r {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}", "func (ts TagSet) Has(tag string) bool {\n\treturn ts.Find(tag) != -1\n}", "func TagNameHasSuffix(v string) predicate.GithubRelease {\n\treturn predicate.GithubRelease(sql.FieldHasSuffix(FieldTagName, v))\n}", "func (containerRepository *ContainerRepository) IsTag(column string) bool {\n\tfor _, tag := range recommendation_entity.ContainerTags {\n\t\tif column == string(tag) {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}", "func TagNameContainsFold(v string) predicate.GithubRelease {\n\treturn predicate.GithubRelease(sql.FieldContainsFold(FieldTagName, v))\n}", "func anyTagExists(tag reflect.StructTag) bool {\r\n\t_, isEnv := tag.Lookup(\"env\")\r\n\t_, isKv := tag.Lookup(\"kv\")\r\n\t_, isVal := tag.Lookup(\"val\")\r\n\treturn isEnv || isKv || isVal\r\n}", "func ApplyFilter(fargs FilterArgs) bool {\n\tgf := gslbutils.GetGlobalFilter()\n\tif gf == nil {\n\t\tgslbutils.Errf(\"cname: %s, msg: global filter doesn't exist, returning false\", fargs.Cluster)\n\t\treturn false\n\t}\n\tmetaobj, ok := fargs.Obj.(k8sobjects.FilterableObject)\n\tif !ok {\n\t\tgslbutils.Warnf(\"cname: %s, msg: not a meta object, returning\", fargs.Cluster)\n\t\treturn false\n\t}\n\n\t// First see, if there's a namespace filter set for this object's namespace, if not, apply\n\t// the global filter.\n\tgf.GlobalLock.RLock()\n\tdefer gf.GlobalLock.RUnlock()\n\n\tif gf.AppFilter == nil && gf.NSFilter == nil {\n\t\treturn false\n\t}\n\treturn metaobj.ApplyFilter()\n}", "func (f Filter) Contains(filter Filter) bool {\n\treturn f.filters(filter)\n}", "func matchesBuildTags(fpath string, data []byte, bc *build.Context) bool {\n\tdir, name := filepath.Split(fpath)\n\tbc.OpenFile = func(path string) (io.ReadCloser, error) {\n\t\tif path != fpath {\n\t\t\treturn nil, errors.New(\"file not found\")\n\t\t}\n\t\treturn ioutil.NopCloser(bytes.NewReader(data)), nil\n\t}\n\tmatch, err := bc.MatchFile(dir, name)\n\treturn err == nil && match\n}", "func IsReleasePayloadStream(stream string) bool {\n\treturn stream == ReleaseImageStream\n}", "func (signal PRBodySubstringsSignal) Matches(ctx context.Context, pullCtx pull.Context, tag string) (bool, string, error) {\n\tlogger := zerolog.Ctx(ctx)\n\n\tif !signal.Enabled() {\n\t\treturn false, \"\", nil\n\t}\n\n\tbody := pullCtx.Body()\n\n\tif body == \"\" {\n\t\tlogger.Debug().Msgf(\"No body content found to match against\")\n\t\treturn false, \"\", nil\n\t}\n\n\tfor _, signalSubstring := range signal {\n\t\tif strings.Contains(body, signalSubstring) {\n\t\t\treturn true, fmt.Sprintf(\"pull request body matches a %s substring: %q\", tag, signalSubstring), nil\n\t\t}\n\t}\n\n\treturn false, \"\", nil\n}", "func (s *Series) match(tags map[string]string) bool {\n\tfor k, v := range tags {\n\t\tif s.Tags[k] != v {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}", "func TagExists(projectID int64, repositoryID int64, version string) (bool, error) {\n\ttags, err := GetTagByVersion(projectID, repositoryID, version)\n\tif err != nil {\n\t\treturn false, err\n\t}\n\tif tags == nil {\n\t\treturn false, nil\n\t}\n\treturn true, nil\n}", "func determineVersionFromTagBollocks(tag string, source Source) string {\n\tvar re *regexp.Regexp\n\tif source.TagFilterRegex == \"\" {\n\t\tre = regexp.MustCompile(`^v?(?:\\d+\\.)?(?:\\d+\\.)?(?:\\*|\\d+.*)$`)\n\t\ttag = strings.TrimPrefix(tag, \"v\")\n\t} else {\n\t\tre = regexp.MustCompile(source.TagFilterRegex)\n\t}\n\n\tif re.MatchString(tag) {\n\t\treturn tag\n\t}\n\treturn \"\"\n}", "func (r *Role) HasTag(tag string) bool {\n\tfor _, t := range r.Tags {\n\t\tif t == tag {\n\t\t\treturn true\n\t\t}\n\t}\n\n\treturn false\n}", "func (r *Role) HasTag(tag string) bool {\n\tfor _, t := range r.Tags {\n\t\tif t == tag {\n\t\t\treturn true\n\t\t}\n\t}\n\n\treturn false\n}", "func (o *FiltersSecurityGroup) HasTagKeys() bool {\n\tif o != nil && o.TagKeys != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}", "func Filter(swagger *spec.Swagger, tags []string) *spec.Swagger {\n\tf := &filter{\n\t\tswagger: swagger,\n\t\ttags: tags,\n\t\tpaths: make(map[string]spec.PathItem),\n\t\tdefs: make(map[string]spec.Schema),\n\t}\n\n\treturn f.Run()\n}", "func (o *SpansListRequestAttributes) HasFilter() bool {\n\treturn o != nil && o.Filter != nil\n}", "func IsRelease() bool {\n\treturn false\n}", "func TargetCommitishContains(v string) predicate.GithubRelease {\n\treturn predicate.GithubRelease(sql.FieldContains(FieldTargetCommitish, v))\n}", "func (m SecurityListRequest) HasProduct() bool {\n\treturn m.Has(tag.Product)\n}", "func (t tagOptions) has(tag string) bool {\n\tfor _, opt := range t {\n\t\tif opt == tag {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}", "func accessReviewThresholdMatchesFilter(t types.AccessReviewThreshold, parser predicate.Parser) (bool, error) {\n\tif t.Filter == \"\" {\n\t\treturn true, nil\n\t}\n\tifn, err := parser.Parse(t.Filter)\n\tif err != nil {\n\t\treturn false, trace.Wrap(err)\n\t}\n\tfn, ok := ifn.(predicate.BoolPredicate)\n\tif !ok {\n\t\treturn false, trace.BadParameter(\"unsupported type: %T\", ifn)\n\t}\n\treturn fn(), nil\n}", "func (Var) Filter(pred func(*VarFilterContext) bool) bool { return boolResult }", "func (ev Vars) IsProdLike() bool {\n\treturn ev.ServiceEnv() == ServiceEnvPreprod ||\n\t\tev.ServiceEnv() == ServiceEnvBeta ||\n\t\tev.ServiceEnv() == ServiceEnvProd\n}", "func (n *node) HasTag(name string) bool {\n\tfor _, v := range n.tags {\n\t\tif v == name {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}", "func (signal LabelsSignal) Matches(ctx context.Context, pullCtx pull.Context, tag string) (bool, string, error) {\n\tlogger := zerolog.Ctx(ctx)\n\n\tif !signal.Enabled() {\n\t\treturn false, \"\", nil\n\t}\n\n\tlabels, err := pullCtx.Labels(ctx)\n\tif err != nil {\n\t\treturn false, \"\", errors.Wrap(err, \"unable to list pull request labels\")\n\t}\n\n\tif len(labels) == 0 {\n\t\tlogger.Debug().Msgf(\"No labels found to match against\")\n\t\treturn false, \"\", nil\n\t}\n\n\tfor _, signalLabel := range signal {\n\t\tfor _, label := range labels {\n\t\t\tif strings.EqualFold(signalLabel, label) {\n\t\t\t\treturn true, fmt.Sprintf(\"pull request has a %s label: %q\", tag, signalLabel), nil\n\t\t\t}\n\t\t}\n\t}\n\n\treturn false, \"\", nil\n}", "func (l *Logger) HasFilter(flag, filterName string) bool {\n\tl.Lock()\n\tdefer l.Unlock()\n\n\tif l.Filters == nil {\n\t\treturn false\n\t}\n\tfilters, ok := l.Filters[flag]\n\tif !ok {\n\t\treturn false\n\t}\n\t_, ok = filters[filterName]\n\treturn ok\n}", "func (o *FiltersNet) HasTagValues() bool {\n\tif o != nil && o.TagValues != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}", "func IngressDeploymentWithFilteringAnnotation(t *testing.T) *appsv1.Deployment {\n\treturn getDeployment(t, \"testingdata/ingress_deployment_with_filtering_annotation.yaml\")\n}", "func TestTags(t *testing.T) {\n awsRegion := \"us-east-2\"\n tagName := \"Flugel-test\"\n tagOwner := \"InfraTeam-test\"\n\n terraformOpts := terraform.WithDefaultRetryableErrors(t, &terraform.Options{\n TerraformDir: \"../\",\n\n //Now i must map the tags.\n Vars: map[string]interface{}{\n \"tag_name\": tagName,\n \"tag_owner\": tagOwner,\n },\n\n //Then set the region to make the deploy in.\n EnvVars: map[string]string{\n \"AWS_DEFAULT_REGION\": awsRegion,\n },\n },\n )\n\n //After all the testing, the infra must be destroyed.\n defer terraform.Destroy(t, terraformOpts)\n\n //Now, let's run the deploy with all the parameters set.\n terraform.InitAndApply(t, terraformOpts)\n\n //I get the instance and bucket id's, and make first verifications.\n instanceID1 := terraform.Output(t, terraformOpts, \"instance_name_web1\")\n instanceTags1 := aws.GetTagsForEc2Instance(t, awsRegion, instanceID1)\n testTag1, containsTag := instanceTags1[\"Name\"]\n assert.True(t, containsTag, \"True\")\n assert.Equal(t, tagName, testTag1)\n testTag2, containsTag := instanceTags1[\"Owner\"]\n assert.True(t, containsTag, \"True\")\n assert.Equal(t, tagOwner, testTag2)\n\n instanceID2 := terraform.Output(t, terraformOpts, \"instance_name_web2\")\n instanceTags2 := aws.GetTagsForEc2Instance(t, awsRegion, instanceID2)\n testTag3, containsTag := instanceTags2[\"Name\"]\n assert.True(t, containsTag, \"True\")\n assert.Equal(t, tagName, testTag3)\n testTag4, containsTag := instanceTags2[\"Owner\"]\n assert.True(t, containsTag, \"True\")\n assert.Equal(t, tagOwner, testTag4)\n\n //It would be easier to simply parse plain text, but as i put myself into this let's ride with it.\n\n lburl := \"http://\" + terraform.Output(t, terraformOpts, \"load_balancer_url\") + \"/index.html\"\n maxRetries := 3\n timeBetweenRetries := 5 * time.Second\n\n http_helper.HttpGetWithRetryWithCustomValidation(t, lburl, nil, maxRetries, timeBetweenRetries, validate)\n\n // There's no module with \"get X bucket tags\", so i get the bucket id from TF, and separately i seek the bucket that contains\n // tags \"Name\" and \"Owner\" with the desired content, and make sure the id returned matches the previously deployed bucket. \n bucketID := terraform.Output(t, terraformOpts, \"bucket_id\")\n bucketwithTagN := aws.FindS3BucketWithTag (t, awsRegion, \"Name\", tagName)\n bucketwithTagO := aws.FindS3BucketWithTag (t, awsRegion, \"Owner\", tagOwner)\n assert.Equal(t, bucketwithTagN, bucketID)\n assert.Equal(t, bucketwithTagO, bucketID)\n\n}", "func getDeploymentStatus(c client.Client, name string, namespace string) (bool, error) {\n\t// Check if the Deployment resource exists.\n\tdInstance := &appsv1.Deployment{}\n\terr := c.Get(context.Background(), types.NamespacedName{\n\t\tName: name,\n\t\tNamespace: namespace}, dInstance)\n\n\tif err != nil {\n\t\treturn false, err\n\t}\n\n\t// Retrieve the status condition.\n\tfor _, condition := range dInstance.Status.Conditions {\n\t\tif condition.Type == appsv1.DeploymentAvailable {\n\t\t\tif condition.Status == corev1.ConditionTrue {\n\t\t\t\treturn true, nil\n\t\t\t} else {\n\t\t\t\treturn false, fmt.Errorf(\"Deployment Available status condition was %v\", condition.Status)\n\t\t\t}\n\t\t}\n\t}\n\n\t// Did not find the condition\n\treturn false, fmt.Errorf(\"Deployment did not contains an Available status condition\")\n}", "func (o *FiltersSecurityGroup) HasTagValues() bool {\n\tif o != nil && o.TagValues != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}", "func (o *FiltersNatService) HasTagKeys() bool {\n\tif o != nil && o.TagKeys != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}", "func (o BucketLifecycleConfigurationV2RuleFilterPtrOutput) Tag() BucketLifecycleConfigurationV2RuleFilterTagPtrOutput {\n\treturn o.ApplyT(func(v *BucketLifecycleConfigurationV2RuleFilter) *BucketLifecycleConfigurationV2RuleFilterTag {\n\t\tif v == nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn v.Tag\n\t}).(BucketLifecycleConfigurationV2RuleFilterTagPtrOutput)\n}", "func (o *SiteAllOfNameResolutionGcpResolvers) HasInstanceFilter() bool {\n\tif o != nil && o.InstanceFilter != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}", "func shouldDeploy(currentEnvironment, newEnvironment *bitesize.Environment, serviceName string) bool {\n\tcurrentService := currentEnvironment.Services.FindByName(serviceName)\n\tupdatedService := newEnvironment.Services.FindByName(serviceName)\n\n\tif (currentService != nil && currentService.Status.DeployedAt != \"\") || (updatedService != nil && updatedService.Version != \"\") {\n\t\tif diff.ServiceChanged(serviceName) {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}", "func (o *NetworkingProjectIpCreate) HasTag() bool {\n\tif o != nil && o.Tag != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}", "func (o *SiteAllOfNameResolutionGcpResolvers) HasProjectFilter() bool {\n\tif o != nil && o.ProjectFilter != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}", "func ReleaseExists(actionConfig action.Configuration, name string) (bool, error) {\n\thistClient := action.NewHistory(&actionConfig)\n\thistClient.Max = 1\n\n\t_, err := histClient.Run(name)\n\tif err != nil && err != driver.ErrReleaseNotFound {\n\t\treturn false, fmt.Errorf(\"failed checking for chart history: %w\", err)\n\t}\n\n\treturn err != driver.ErrReleaseNotFound, nil\n}", "func (o *FiltersNet) HasTagKeys() bool {\n\tif o != nil && o.TagKeys != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}", "func (o *FiltersNatService) HasTagValues() bool {\n\tif o != nil && o.TagValues != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}", "func HasForbiddenTag(tags []string) error {\n\tfor _, tag := range tags {\n\t\tfor _, forbiddenTag := range secretTypes.ForbiddenTags {\n\t\t\tif tag == forbiddenTag {\n\t\t\t\treturn ForbiddenError{\n\t\t\t\t\tForbiddenTag: tag,\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\treturn nil\n}" ]
[ "0.63597506", "0.5253143", "0.5221754", "0.5196398", "0.51056296", "0.50931746", "0.4982437", "0.49158007", "0.48898113", "0.48771313", "0.4853815", "0.48363844", "0.48362416", "0.48145217", "0.4810747", "0.47806767", "0.47804633", "0.47619316", "0.4756543", "0.47478878", "0.4724794", "0.4721769", "0.47204432", "0.47124916", "0.47083423", "0.46916395", "0.46713102", "0.4668639", "0.46661854", "0.4662273", "0.46599007", "0.46283042", "0.46261176", "0.4620059", "0.46117654", "0.45978174", "0.45958203", "0.4587566", "0.45829892", "0.45736298", "0.45717987", "0.45595327", "0.4557381", "0.45463243", "0.4541321", "0.45383698", "0.45292267", "0.45275328", "0.45181882", "0.44998854", "0.44872952", "0.44860905", "0.44781068", "0.44374308", "0.44287756", "0.44202092", "0.4420123", "0.4417347", "0.44120297", "0.44115895", "0.4406012", "0.44031197", "0.4390947", "0.43783265", "0.43758765", "0.43746793", "0.43716425", "0.43675733", "0.43653935", "0.4364383", "0.43631062", "0.43631062", "0.43613183", "0.43606317", "0.43563232", "0.4352855", "0.4350568", "0.43491474", "0.43461993", "0.4333008", "0.43280396", "0.43264025", "0.43235663", "0.4323096", "0.43133232", "0.43061975", "0.43036103", "0.43008623", "0.42939323", "0.42922705", "0.42862928", "0.42858827", "0.42849714", "0.42836902", "0.4279431", "0.42657155", "0.42606446", "0.42536634", "0.42529947", "0.42502606" ]
0.83583623
0
NewUncertaintyGroup furnishes an UncertaintyGroup for a given set of actions where their quantity is known a priori.
func NewUncertaintyGroup(count uint) UncertaintyGroup { return &uncertaintyGroup{ remaining: count, results: make(chan error), } }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func newGroupAtLeastOnce() *Instruction {\n\treturn &Instruction{\n\t\tType: GroupAtLeastOnceInst,\n\t\tName: \"AtLeastOnce\",\n\t}\n}", "func newGroup(groupId string, broadcastChannelCap int64) *group {\n\n\tg := &group{\n\t\tId: groupId,\n\t\tclients: make(map[string]*socketClient),\n\t\tbroadcastChannel: make(chan interface{}, broadcastChannelCap),\n\t\tshutdownChannel: make(chan interface{}),\n\t\tdownChannel: make(chan interface{}, broadcastChannelCap),\n\t}\n\n\tAppLogger.Infof(\"[newGroup] group: %s created\", groupId)\n\treturn g\n}", "func createGroup() []*State {\n\tgroupID := []byte(\"treehouse\")\n\tcreatorKey := NewSignaturePrivateKey()\n\tcreator, _ := NewStateForEmptyGroup(groupID, creatorKey)\n\tstates := []*State{creator}\n\n\tfor k := 1; k < aTestGroupSize; k++ {\n\t\tidentityKey := NewSignaturePrivateKey()\n\t\tleafKey := NewDHPrivateKey()\n\t\toldGPK, _ := states[k-1].SignedGroupPreKey()\n\t\tadd, _ := Join(identityKey, leafKey, oldGPK)\n\n\t\tfor _, s := range states {\n\t\t\ts.HandleUserAdd(add)\n\t\t}\n\n\t\tnewState, _ := NewStateFromGroupPreKey(identityKey, leafKey, oldGPK)\n\t\tstates = append(states, newState)\n\t}\n\n\treturn states\n}", "func (st *buildStatus) newTestSet(testStats *buildstats.TestStats, names []distTestName) (*testSet, error) {\n\tset := &testSet{\n\t\tst: st,\n\t\ttestStats: testStats,\n\t}\n\tfor _, name := range names {\n\t\tset.items = append(set.items, &testItem{\n\t\t\tset: set,\n\t\t\tname: name,\n\t\t\tduration: testStats.Duration(st.BuilderRev.Name, name.Old),\n\t\t\ttake: make(chan token, 1),\n\t\t\tdone: make(chan token),\n\t\t})\n\t}\n\treturn set, nil\n}", "func newExpectedMachineSetCreateAction(cluster *clusteroperator.Cluster, name string) expectedMachineSetCreateAction {\n\treturn expectedMachineSetCreateAction{\n\t\tnamePrefix: getNamePrefixForMachineSet(cluster, name),\n\t}\n}", "func newGroup() *Group {\n\tg := new(Group)\n\tg.handlers = make([]HandlerFunc, 0)\n\treturn g\n}", "func (l *GroupLookup) newKeyGroup(entries []groupKeyListElement) *groupKeyList {\n\tid := l.nextID\n\tl.nextID++\n\treturn &groupKeyList{\n\t\tid: id,\n\t\telements: entries,\n\t}\n}", "func GroupCreateFailure(actionID string, errors []string) *Action {\n\treturn constructFailureAction(actionID, constants.GroupCreateFailure, errors)\n}", "func newGroupMutation(c config, op Op, opts ...groupOption) *GroupMutation {\n\tm := &GroupMutation{\n\t\tconfig: c,\n\t\top: op,\n\t\ttyp: TypeGroup,\n\t\tclearedFields: make(map[string]struct{}),\n\t}\n\tfor _, opt := range opts {\n\t\topt(m)\n\t}\n\treturn m\n}", "func newGroupMutation(c config, op Op, opts ...groupOption) *GroupMutation {\n\tm := &GroupMutation{\n\t\tconfig: c,\n\t\top: op,\n\t\ttyp: TypeGroup,\n\t\tclearedFields: make(map[string]struct{}),\n\t}\n\tfor _, opt := range opts {\n\t\topt(m)\n\t}\n\treturn m\n}", "func newTiKVGroups(c *PingcapV1alpha1Client, namespace string) *tiKVGroups {\n\treturn &tiKVGroups{\n\t\tclient: c.RESTClient(),\n\t\tns: namespace,\n\t}\n}", "func newStatGroup(size uint64) *statGroup {\n\treturn &statGroup{\n\t\tvalues: make([]float64, size),\n\t\tcount: 0,\n\t}\n}", "func NewGroup(thresholds ...*Threshold) *Group {\n\treturn &Group{Thresholds: thresholds}\n}", "func newGroupAnyOrder() *Instruction {\n\treturn &Instruction{\n\t\tType: GroupAnyOrderInst,\n\t\tName: \"AnyOrder\",\n\t}\n}", "func NewGroup(ctx context.Context) *errGroup {\n\tnewCtx, cancel := context.WithCancel(ctx)\n\treturn &errGroup{\n\t\tctx: newCtx,\n\t\tcancel: cancel,\n\t}\n}", "func TestProposeBadGroup(t *testing.T) {\n\tdefer leaktest.AfterTest(t)\n\tstopper := stop.NewStopper()\n\tcluster := newTestCluster(nil, 3, stopper, t)\n\tdefer stopper.Stop()\n\terr := <-cluster.nodes[1].SubmitCommand(7, \"asdf\", []byte{})\n\tif err == nil {\n\t\tt.Fatal(\"did not get expected error\")\n\t}\n}", "func (_IFactorySpace *IFactorySpaceTransactor) CreateGroup(opts *bind.TransactOpts) (*types.Transaction, error) {\n\treturn _IFactorySpace.contract.Transact(opts, \"createGroup\")\n}", "func (c *UMemClient) NewCreateUMemcacheGroupRequest() *CreateUMemcacheGroupRequest {\n\treq := &CreateUMemcacheGroupRequest{}\n\n\t// setup request with client config\n\tc.Client.SetupRequest(req)\n\n\t// setup retryable with default retry policy (retry for non-create action and common error)\n\treq.SetRetryable(false)\n\treturn req\n}", "func New(ctx context.Context, concurrency int) (*Group, context.Context) {\n\tif concurrency < 1 {\n\t\tconcurrency = 1\n\t}\n\n\tparent, ctx := errgroup.WithContext(ctx)\n\treturn &Group{\n\t\tlimiter: make(chan struct{}, concurrency),\n\t\tparent: parent,\n\t\tctx: ctx,\n\t}, ctx\n}", "func newExpectedMachineSetDeleteAction(cluster *clusteroperator.Cluster, name string) expectedMachineSetDeleteAction {\n\treturn expectedMachineSetDeleteAction{\n\t\tnamePrefix: getNamePrefixForMachineSet(cluster, name),\n\t}\n}", "func NewErrGroup(ctx context.Context, concurrency int) *ErrGroup {\n\tctx, cancel := context.WithCancel(ctx)\n\treturn &ErrGroup{\n\t\tctx: ctx,\n\t\tcancel: cancel,\n\t\tlimit: make(chan struct{}, concurrency),\n\t}\n}", "func newProviderGroup(k key) *providerGroup {\n\tifaceKey := key{\n\t\tres: reflect.SliceOf(k.res),\n\t\ttyp: ptGroup,\n\t}\n\n\treturn &providerGroup{\n\t\tresult: ifaceKey,\n\t\tpl: parameterList{},\n\t}\n}", "func (ic *iamClient) CreateUserGroupPolicies(ctx context.Context, groupID uint64, request AuthorizationScope) error {\n\tif ic == nil {\n\t\treturn ErrServerNotInit\n\t}\n\n\tvar (\n\t\t_ = \"CreateUserGroupPolicies\"\n\t\tpath = fmt.Sprintf(\"/api/v1/open/management/groups/%v/policies/\", groupID)\n\t)\n\n\tvar (\n\t\turl = ic.opt.GateWayHost + path\n\t\tresp = &BaseResponse{}\n\t)\n\n\tauth, err := ic.generateGateWayAuth(\"\")\n\tif err != nil {\n\t\tklog.Errorf(\"CreateUserGroupPolicies generateGateWayAuth failed: %v\", err)\n\t\treturn err\n\t}\n\n\tresult, body, errs := gorequest.New().\n\t\tTimeout(defaultTimeOut).\n\t\tPost(url).\n\t\tSet(\"Content-Type\", \"application/json\").\n\t\tSet(\"Accept\", \"application/json\").\n\t\tSet(\"X-Bkapi-Authorization\", auth).\n\t\tSetDebug(true).\n\t\tSend(&request).\n\t\tEndStruct(resp)\n\n\tif len(errs) != 0 {\n\t\tklog.Errorf(\"CreateUserGroupPolicies gorequest errors=`%s`\", errs)\n\t\treturn errs[0]\n\t}\n\tif result.StatusCode != http.StatusOK || resp.Code != 0 {\n\t\terrMsg := fmt.Errorf(\"CreateUserGroupPolicies API error: code[%v], body[%v], err[%s]\",\n\t\t\tresult.StatusCode, string(body), resp.Message)\n\t\treturn errMsg\n\t}\n\n\tklog.Infof(\"CreateUserGroupPolicies[%s:%s] successful\", request.System, groupID)\n\treturn nil\n}", "func (_BaseContentSpace *BaseContentSpaceTransactor) CreateGroup(opts *bind.TransactOpts) (*types.Transaction, error) {\n\treturn _BaseContentSpace.contract.Transact(opts, \"createGroup\")\n}", "func newGroupZeroOrMore() *Instruction {\n\treturn &Instruction{\n\t\tType: GroupZeroOrMoreInst,\n\t\tName: \"ZeroOrMore\",\n\t}\n}", "func New() *ApplIDRequestAckGrp {\n\tvar m ApplIDRequestAckGrp\n\treturn &m\n}", "func New() *UndlyInstrumentPtysSubGrp {\n\tvar m UndlyInstrumentPtysSubGrp\n\treturn &m\n}", "func (c *UDBClient) NewCreateUDBParamGroupRequest() *CreateUDBParamGroupRequest {\n\treq := &CreateUDBParamGroupRequest{}\n\n\t// setup request with client config\n\tc.Client.SetupRequest(req)\n\n\t// setup retryable with default retry policy (retry for non-create action and common error)\n\treq.SetRetryable(false)\n\treturn req\n}", "func (c *UDBClient) NewCreateUDBParamGroupRequest() *CreateUDBParamGroupRequest {\n\treq := &CreateUDBParamGroupRequest{}\n\n\t// setup request with client config\n\tc.Client.SetupRequest(req)\n\n\t// setup retryable with default retry policy (retry for non-create action and common error)\n\treq.SetRetryable(false)\n\treturn req\n}", "func newBucketGroup(mBuckets int64) bucketGroup {\n\treturn make(bucketGroup, mBuckets)\n}", "func (a *Agent) startNewAction() {\n\tactionTypes := a.mind.actionTypes()\n\n\thighestValue := 0.0\n\tvar bestActionTypes []actionType\n\tfor _, t := range actionTypes {\n\t\tisActive := false\n\t\t// if we currently have an active action, we do not want to start a new action\n\t\tfor _, ac := range a.activity.activeActions {\n\t\t\tif ac.getState() == actionStateActive {\n\t\t\t\tisActive = true\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\n\t\tif isActive {\n\t\t\treturn\n\t\t}\n\n\t\t// TODO what if an action cannot be started\n\t\t// highest value is to eat an apple, but there is no apple, we should somehow start thinking\n\t\t// about how to obtain an apple\n\n\t\tv := actionTypeValue(t)\n\t\tif v >= highestValue {\n\t\t\tcanStart := true\n\t\t\tfor startCond := range t.getConditions()[actionConditionTypeStart] {\n\t\t\t\tif !startCond.isSatisfied(a) {\n\t\t\t\t\tcanStart = false\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tif canStart {\n\t\t\t\tif v > highestValue {\n\t\t\t\t\thighestValue = v\n\t\t\t\t\tbestActionTypes = []actionType{}\n\t\t\t\t}\n\t\t\t\tbestActionTypes = append(bestActionTypes, t)\n\t\t\t}\n\t\t}\n\t}\n\n\tif len(bestActionTypes) == 0 {\n\t\treturn\n\t}\n\n\tbestActionType := bestActionTypes[rand.Intn(len(bestActionTypes))]\n\tfor startCondition := range bestActionType.getConditions()[actionConditionTypeStart] {\n\t\tif !startCondition.isSatisfied(a) {\n\t\t\treturn\n\t\t}\n\t}\n\n\tnewAction := bestActionType.instantiate().(action)\n\ta.activity.activeActions = append(a.activity.activeActions, newAction)\n\ta.mind.addItem(bestActionType, 1.0)\n\n\t// add pre-action conditions for hypothesis training\n\tfor cond := range a.getConditions() {\n\t\tpreActionConditions := newAction.getType().getConditions()[actionConditionTypeObservedAtStart]\n\t\tpreActionConditions[cond] = true\n\t\tnewAction.getPreConditions()[cond] = true\n\t}\n}", "func newMachineSet(name string, cluster *clusteroperator.Cluster, properlyOwned bool) *clusteroperator.MachineSet {\n\tvar controllerReference metav1.OwnerReference\n\tif properlyOwned {\n\t\ttrueVar := true\n\t\tcontrollerReference = metav1.OwnerReference{\n\t\t\tUID: testClusterUUID,\n\t\t\tAPIVersion: clusteroperator.SchemeGroupVersion.String(),\n\t\t\tKind: \"Cluster\",\n\t\t\tName: cluster.Name,\n\t\t\tController: &trueVar,\n\t\t}\n\t}\n\treturn &clusteroperator.MachineSet{\n\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\tName: name,\n\t\t\tNamespace: cluster.Namespace,\n\t\t\tOwnerReferences: []metav1.OwnerReference{controllerReference},\n\t\t},\n\t}\n}", "func newTiDBGroups(c *PingcapV1alpha1Client, namespace string) *tiDBGroups {\n\treturn &tiDBGroups{\n\t\tclient: c.RESTClient(),\n\t\tns: namespace,\n\t}\n}", "func groupCmd(c *cli.Context) error {\n\targs := c.Args()\n\tif !args.Present() {\n\t\tslog.Fatal(\"missing identity file to create the group.toml\")\n\t}\n\tif c.NArg() < 3 {\n\t\tslog.Fatal(\"not enough identities (\", c.NArg(), \") to create a group toml. At least 3!\")\n\t}\n\tvar threshold = key.DefaultThreshold(c.NArg())\n\tif c.IsSet(\"threshold\") {\n\t\tif c.Int(\"threshold\") < threshold {\n\t\t\tslog.Print(\"WARNING: You are using a threshold which is TOO LOW.\")\n\t\t\tslog.Print(\"\t\t It should be at least \", threshold)\n\t\t}\n\t\tthreshold = c.Int(\"threshold\")\n\t}\n\n\tpublics := make([]*key.Identity, c.NArg())\n\tfor i, str := range args {\n\t\tpub := &key.Identity{}\n\t\tslog.Print(\"Reading public identity from \", str)\n\t\tif err := key.Load(str, pub); err != nil {\n\t\t\tslog.Fatal(err)\n\t\t}\n\t\tpublics[i] = pub\n\t}\n\tgroup := key.NewGroup(publics, threshold)\n\tgroupPath := path.Join(fs.Pwd(), gname)\n\tif c.String(\"out\") != \"\" {\n\t\tgroupPath = c.String(\"out\")\n\t}\n\tif err := key.Save(groupPath, group, false); err != nil {\n\t\tslog.Fatal(err)\n\t}\n\tslog.Printf(\"Group file written in %s. Distribute it to all the participants to start the DKG\", groupPath)\n\treturn nil\n}", "func NewGroup(cs []conf.Criteria) ([]*Strategy, error) {\n\tstrats := make([]*Strategy, 0, len(cs))\n\tfor i := range cs {\n\t\tstrat, err := NewStrategy(cs[i])\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tstrats = append(strats, strat)\n\t}\n\n\treturn strats, nil\n}", "func CreateGroup(params types.ContextParams, clientSet apimachinery.ClientSetInterface, groupItems []metadata.Group) []Group {\n\tresults := make([]Group, 0)\n\tfor _, grp := range groupItems {\n\n\t\tresults = append(results, &group{\n\t\t\tgrp: grp,\n\t\t\tparams: params,\n\t\t\tclientSet: clientSet,\n\t\t})\n\t}\n\n\treturn results\n}", "func (c *Client) newServicegroup(servicegroup *Servicegroup) ([]byte, error) {\n\tnagiosURL, err := c.buildURL(\"servicegroup\", \"POST\", \"\", \"\", \"\", \"\")\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tdata := setURLParams(servicegroup)\n\n\tbody, err := c.post(data, nagiosURL)\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn body, nil\n}", "func (_BaseGroupFactory *BaseGroupFactoryTransactor) CreateGroup(opts *bind.TransactOpts) (*types.Transaction, error) {\n\treturn _BaseGroupFactory.contract.Transact(opts, \"createGroup\")\n}", "func createEmptyTestGroup(t *testing.T, session *session.Session, name string) error {\n\tsvc := iam.New(session)\n\n\tgroupInput := &iam.CreateGroupInput{\n\t\tGroupName: awsgo.String(name),\n\t}\n\n\t_, err := svc.CreateGroup(groupInput)\n\trequire.NoError(t, err)\n\treturn nil\n}", "func (_RandomBeacon *RandomBeaconTransactor) UpdateGroupCreationParameters(opts *bind.TransactOpts, groupCreationFrequency *big.Int, groupLifetime *big.Int, dkgResultChallengePeriodLength *big.Int, dkgResultChallengeExtraGas *big.Int, dkgResultSubmissionTimeout *big.Int, dkgSubmitterPrecedencePeriodLength *big.Int) (*types.Transaction, error) {\n\treturn _RandomBeacon.contract.Transact(opts, \"updateGroupCreationParameters\", groupCreationFrequency, groupLifetime, dkgResultChallengePeriodLength, dkgResultChallengeExtraGas, dkgResultSubmissionTimeout, dkgSubmitterPrecedencePeriodLength)\n}", "func NewGroup(list []*Identity, threshold int, genesis int64, period, catchupPeriod time.Duration,\n\tsch *crypto.Scheme, beaconID string) *Group {\n\treturn &Group{\n\t\tNodes: copyAndSort(list),\n\t\tThreshold: threshold,\n\t\tGenesisTime: genesis,\n\t\tPeriod: period,\n\t\tCatchupPeriod: catchupPeriod,\n\t\tScheme: sch,\n\t\tID: beaconID,\n\t}\n}", "func CreateDescribeMultiContainerGroupMetricRequest() (request *DescribeMultiContainerGroupMetricRequest) {\n\trequest = &DescribeMultiContainerGroupMetricRequest{\n\t\tRpcRequest: &requests.RpcRequest{},\n\t}\n\trequest.InitWithApiInfo(\"Eci\", \"2018-08-08\", \"DescribeMultiContainerGroupMetric\", \"eci\", \"openAPI\")\n\treturn\n}", "func (c *myClient) createDatasetGroup(groupName string, wait bool) (results map[string]interface{}, err error) {\n\tnamespace := \"group\"\n\tgroupObj, err := c.findObjectByName(namespace, groupName)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tgroupObjRef := groupObj[\"reference\"]\n\tif groupObjRef == nil {\n\t\turl := fmt.Sprintf(\"%s\", namespace)\n\t\tpostBody := fmt.Sprintf(`{\n\t\t\"type\": \"Group\", \n\t\t\"name\": \"%s\"\n\t\t}`, groupName)\n\t\taction, _, err := c.httpPost(url, postBody)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tif wait {\n\t\t\tc.jobWaiter(action)\n\t\t}\n\t\treturn action, err\n\t}\n\tlog.Debug(groupObjRef)\n\tlog.Infof(\"%s already exists\", groupName)\n\treturn nil, err\n\n}", "func (s *BasevhdlListener) ExitGroup_constituent(ctx *Group_constituentContext) {}", "func (visual *Visual) NewGroup(parts []string, effect string) (*Group, error) {\n\tgroup, err := newGroup(parts, effect)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvisual.mux.Lock()\n\tvisual.groups = append(visual.groups, group)\n\tvisual.mux.Unlock()\n\n\treturn group, nil\n}", "func makeCgroup(t *testing.T) *cgroups.Cgroup {\n\tname := fmt.Sprintf(\"unittesting-%s\", uuid.Variant4().String())\n\tcgroup, err := cgroups.New(name)\n\tTestExpectSuccess(t, err)\n\tAddTestFinalizer(func() {\n\t\tTestExpectSuccess(t, cgroup.Destroy())\n\t})\n\treturn cgroup\n}", "func NewGroup(ctx context.Context, options ...GroupOption) *Group {\n\tctx, cancel := context.WithCancel(ctx)\n\tg := &Group{\n\t\tctx: ctx,\n\t\tcancel: cancel,\n\t\tpool: dummyPool{},\n\t\trecover: false,\n\t}\n\tfor _, opt := range options {\n\t\topt(g)\n\t}\n\treturn g\n}", "func NewGroup(ctx context.Context) *Group {\n\tctx, cancel := context.WithCancel(ctx)\n\n\tg := &Group{\n\t\tctx: ctx,\n\t\tcancel: cancel,\n\t\tdone: make(chan struct{}),\n\t}\n\n\tgo g.wait()\n\n\treturn g\n}", "func New(ctx context.Context) *Group {\n\t// Monitor goroutine context and cancelation.\n\tmctx, cancel := context.WithCancel(ctx)\n\n\tg := &Group{\n\t\tctx: ctx,\n\t\tcancel: cancel,\n\n\t\taddC: make(chan struct{}),\n\t\tlenC: make(chan int),\n\t}\n\n\tg.wg.Add(1)\n\tgo func() {\n\t\tdefer g.wg.Done()\n\t\tg.monitor(mctx)\n\t}()\n\n\treturn g\n}", "func (client *PolicyDefinitionsClient) createOrUpdateAtManagementGroupCreateRequest(ctx context.Context, policyDefinitionName string, managementGroupID string, parameters PolicyDefinition, options *PolicyDefinitionsCreateOrUpdateAtManagementGroupOptions) (*policy.Request, error) {\n\turlPath := \"/providers/Microsoft.Management/managementGroups/{managementGroupId}/providers/Microsoft.Authorization/policyDefinitions/{policyDefinitionName}\"\n\tif policyDefinitionName == \"\" {\n\t\treturn nil, errors.New(\"parameter policyDefinitionName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{policyDefinitionName}\", url.PathEscape(policyDefinitionName))\n\tif managementGroupID == \"\" {\n\t\treturn nil, errors.New(\"parameter managementGroupID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{managementGroupId}\", url.PathEscape(managementGroupID))\n\treq, err := runtime.NewRequest(ctx, http.MethodPut, runtime.JoinPaths(client.ep, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2021-06-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, runtime.MarshalAsJSON(req, parameters)\n}", "func (n *Node) CreateKeygroup(kgname string, mutable bool, expiry int, expectError bool) {\n\tstatus, err := n.Client.CreateKeygroup(context.Background(), &client.CreateKeygroupRequest{Keygroup: kgname, Mutable: mutable, Expiry: int64(expiry)})\n\n\tif err != nil && !expectError {\n\t\tlog.Warn().Msgf(\"CreateKeygroup: error %s\", err)\n\t\tn.Errors++\n\t\treturn\n\t}\n\n\tif err == nil && expectError {\n\t\tlog.Warn().Msg(\"CreateKeygroup: Expected Error bot got no error :(\")\n\t\tn.Errors++\n\t\treturn\n\t}\n\n\tif err == nil && !expectError && status.Status == client.EnumStatus_ERROR {\n\t\tlog.Warn().Msgf(\"CreateKeygroup: error %s with status %s\", err, status.Status)\n\t\tn.Errors++\n\t\treturn\n\t}\n}", "func (m *MultiRaft) CreateGroup(groupID uint64, initialMembers []uint64) error {\n\tfor _, id := range initialMembers {\n\t\tif id == 0 {\n\t\t\treturn util.Error(\"Invalid NodeID\")\n\t\t}\n\t}\n\top := &createGroupOp{\n\t\tgroupID,\n\t\tinitialMembers,\n\t\tmake(chan error),\n\t}\n\tm.ops <- op\n\treturn <-op.ch\n}", "func NewGroupFileOps(t mockConstructorTestingTNewGroupFileOps) *GroupFileOps {\n\tmock := &GroupFileOps{}\n\tmock.Mock.Test(t)\n\n\tt.Cleanup(func() { mock.AssertExpectations(t) })\n\n\treturn mock\n}", "func CreateModifyDesktopsPolicyGroupRequest() (request *ModifyDesktopsPolicyGroupRequest) {\n\trequest = &ModifyDesktopsPolicyGroupRequest{\n\t\tRpcRequest: &requests.RpcRequest{},\n\t}\n\trequest.InitWithApiInfo(\"ecd\", \"2020-09-30\", \"ModifyDesktopsPolicyGroup\", \"\", \"\")\n\trequest.Method = requests.POST\n\treturn\n}", "func (s *BasevhdlListener) EnterGroup_constituent(ctx *Group_constituentContext) {}", "func WindowGroupNew() (*WindowGroup, error) {\n\tc := C.gtk_window_group_new()\n\tif c == nil {\n\t\treturn nil, nilPtrErr\n\t}\n\treturn wrapWindowGroup(glib.Take(unsafe.Pointer(c))), nil\n}", "func NewGroup()(*Group) {\n m := &Group{\n DirectoryObject: *NewDirectoryObject(),\n }\n odataTypeValue := \"#microsoft.graph.group\";\n m.SetOdataType(&odataTypeValue);\n return m\n}", "func CreateUserGroup(w http.ResponseWriter, r *http.Request) {\n\tfLog := userMgmtLogger.WithField(\"func\", \"CreateUserGroup\").WithField(\"RequestID\", r.Context().Value(constants.RequestID)).WithField(\"path\", r.URL.Path).WithField(\"method\", r.Method)\n\n\tiauthctx := r.Context().Value(constants.HansipAuthentication)\n\tif iauthctx == nil {\n\t\thelper.WriteHTTPResponse(r.Context(), w, http.StatusUnauthorized, \"You are not authorized to access this resource\", nil, nil)\n\t\treturn\n\t}\n\n\tparams, err := helper.ParsePathParams(fmt.Sprintf(\"%s/management/user/{userRecId}/group/{groupRecId}\", apiPrefix), r.URL.Path)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\tgroup, err := GroupRepo.GetGroupByRecID(r.Context(), params[\"groupRecId\"])\n\tif err != nil {\n\t\tfLog.Errorf(\"GroupRepo.GetGroupByRecID got %s\", err.Error())\n\t\thelper.WriteHTTPResponse(r.Context(), w, http.StatusInternalServerError, err.Error(), nil, nil)\n\t\treturn\n\t}\n\tif group == nil {\n\t\thelper.WriteHTTPResponse(r.Context(), w, http.StatusNotFound, fmt.Sprintf(\"Group recid %s not found\", params[\"groupRecId\"]), nil, nil)\n\t\treturn\n\t}\n\n\tauthCtx := iauthctx.(*hansipcontext.AuthenticationContext)\n\tif !authCtx.IsAdminOfDomain(group.GroupDomain) {\n\t\thelper.WriteHTTPResponse(r.Context(), w, http.StatusForbidden, \"You don't have the right to access group with the specified domain\", nil, nil)\n\t\treturn\n\t}\n\n\tuser, err := UserRepo.GetUserByRecID(r.Context(), params[\"userRecId\"])\n\tif err != nil {\n\t\tfLog.Errorf(\"UserRepo.GetUserByRecID got %s\", err.Error())\n\t\thelper.WriteHTTPResponse(r.Context(), w, http.StatusInternalServerError, err.Error(), nil, nil)\n\t\treturn\n\t}\n\tif user == nil {\n\t\thelper.WriteHTTPResponse(r.Context(), w, http.StatusNotFound, fmt.Sprintf(\"User recid %s not found\", params[\"userRecId\"]), nil, nil)\n\t\treturn\n\t}\n\n\t_, err = UserGroupRepo.CreateUserGroup(r.Context(), user, group)\n\tif err != nil {\n\t\tfLog.Errorf(\"UserGroupRepo.CreateUserGroup got %s\", err.Error())\n\t\thelper.WriteHTTPResponse(r.Context(), w, http.StatusBadRequest, err.Error(), nil, nil)\n\t\treturn\n\t}\n\tRevocationRepo.Revoke(r.Context(), user.Email)\n\thelper.WriteHTTPResponse(r.Context(), w, http.StatusOK, \"User-Group created\", nil, nil)\n}", "func (c *Client) newHostgroup(hostgroup *Hostgroup) ([]byte, error) {\n\tnagiosURL, err := c.buildURL(\"hostgroup\", \"POST\", \"\", \"\", \"\", \"\")\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tdata := setURLParams(hostgroup)\n\n\tbody, err := c.post(data, nagiosURL)\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn body, nil\n}", "func (rm *resourceManager) newCreateRequestPayload(\n\tr *resource,\n) (*svcsdk.CreateReplicationGroupInput, error) {\n\tres := &svcsdk.CreateReplicationGroupInput{}\n\n\tif r.ko.Spec.AtRestEncryptionEnabled != nil {\n\t\tres.SetAtRestEncryptionEnabled(*r.ko.Spec.AtRestEncryptionEnabled)\n\t}\n\tif r.ko.Spec.AuthToken != nil {\n\t\tres.SetAuthToken(*r.ko.Spec.AuthToken)\n\t}\n\tif r.ko.Spec.AutoMinorVersionUpgrade != nil {\n\t\tres.SetAutoMinorVersionUpgrade(*r.ko.Spec.AutoMinorVersionUpgrade)\n\t}\n\tif r.ko.Spec.AutomaticFailoverEnabled != nil {\n\t\tres.SetAutomaticFailoverEnabled(*r.ko.Spec.AutomaticFailoverEnabled)\n\t}\n\tif r.ko.Spec.CacheNodeType != nil {\n\t\tres.SetCacheNodeType(*r.ko.Spec.CacheNodeType)\n\t}\n\tif r.ko.Spec.CacheParameterGroupName != nil {\n\t\tres.SetCacheParameterGroupName(*r.ko.Spec.CacheParameterGroupName)\n\t}\n\tif r.ko.Spec.CacheSecurityGroupNames != nil {\n\t\tf6 := []*string{}\n\t\tfor _, f6iter := range r.ko.Spec.CacheSecurityGroupNames {\n\t\t\tvar f6elem string\n\t\t\tf6elem = *f6iter\n\t\t\tf6 = append(f6, &f6elem)\n\t\t}\n\t\tres.SetCacheSecurityGroupNames(f6)\n\t}\n\tif r.ko.Spec.CacheSubnetGroupName != nil {\n\t\tres.SetCacheSubnetGroupName(*r.ko.Spec.CacheSubnetGroupName)\n\t}\n\tif r.ko.Spec.Engine != nil {\n\t\tres.SetEngine(*r.ko.Spec.Engine)\n\t}\n\tif r.ko.Spec.EngineVersion != nil {\n\t\tres.SetEngineVersion(*r.ko.Spec.EngineVersion)\n\t}\n\tif r.ko.Spec.GlobalReplicationGroupID != nil {\n\t\tres.SetGlobalReplicationGroupId(*r.ko.Spec.GlobalReplicationGroupID)\n\t}\n\tif r.ko.Spec.KMSKeyID != nil {\n\t\tres.SetKmsKeyId(*r.ko.Spec.KMSKeyID)\n\t}\n\tif r.ko.Spec.MultiAZEnabled != nil {\n\t\tres.SetMultiAZEnabled(*r.ko.Spec.MultiAZEnabled)\n\t}\n\tif r.ko.Spec.NodeGroupConfiguration != nil {\n\t\tf13 := []*svcsdk.NodeGroupConfiguration{}\n\t\tfor _, f13iter := range r.ko.Spec.NodeGroupConfiguration {\n\t\t\tf13elem := &svcsdk.NodeGroupConfiguration{}\n\t\t\tif f13iter.NodeGroupID != nil {\n\t\t\t\tf13elem.SetNodeGroupId(*f13iter.NodeGroupID)\n\t\t\t}\n\t\t\tif f13iter.PrimaryAvailabilityZone != nil {\n\t\t\t\tf13elem.SetPrimaryAvailabilityZone(*f13iter.PrimaryAvailabilityZone)\n\t\t\t}\n\t\t\tif f13iter.PrimaryOutpostARN != nil {\n\t\t\t\tf13elem.SetPrimaryOutpostArn(*f13iter.PrimaryOutpostARN)\n\t\t\t}\n\t\t\tif f13iter.ReplicaAvailabilityZones != nil {\n\t\t\t\tf13elemf3 := []*string{}\n\t\t\t\tfor _, f13elemf3iter := range f13iter.ReplicaAvailabilityZones {\n\t\t\t\t\tvar f13elemf3elem string\n\t\t\t\t\tf13elemf3elem = *f13elemf3iter\n\t\t\t\t\tf13elemf3 = append(f13elemf3, &f13elemf3elem)\n\t\t\t\t}\n\t\t\t\tf13elem.SetReplicaAvailabilityZones(f13elemf3)\n\t\t\t}\n\t\t\tif f13iter.ReplicaCount != nil {\n\t\t\t\tf13elem.SetReplicaCount(*f13iter.ReplicaCount)\n\t\t\t}\n\t\t\tif f13iter.ReplicaOutpostARNs != nil {\n\t\t\t\tf13elemf5 := []*string{}\n\t\t\t\tfor _, f13elemf5iter := range f13iter.ReplicaOutpostARNs {\n\t\t\t\t\tvar f13elemf5elem string\n\t\t\t\t\tf13elemf5elem = *f13elemf5iter\n\t\t\t\t\tf13elemf5 = append(f13elemf5, &f13elemf5elem)\n\t\t\t\t}\n\t\t\t\tf13elem.SetReplicaOutpostArns(f13elemf5)\n\t\t\t}\n\t\t\tif f13iter.Slots != nil {\n\t\t\t\tf13elem.SetSlots(*f13iter.Slots)\n\t\t\t}\n\t\t\tf13 = append(f13, f13elem)\n\t\t}\n\t\tres.SetNodeGroupConfiguration(f13)\n\t}\n\tif r.ko.Spec.NotificationTopicARN != nil {\n\t\tres.SetNotificationTopicArn(*r.ko.Spec.NotificationTopicARN)\n\t}\n\tif r.ko.Spec.NumCacheClusters != nil {\n\t\tres.SetNumCacheClusters(*r.ko.Spec.NumCacheClusters)\n\t}\n\tif r.ko.Spec.NumNodeGroups != nil {\n\t\tres.SetNumNodeGroups(*r.ko.Spec.NumNodeGroups)\n\t}\n\tif r.ko.Spec.Port != nil {\n\t\tres.SetPort(*r.ko.Spec.Port)\n\t}\n\tif r.ko.Spec.PreferredCacheClusterAZs != nil {\n\t\tf18 := []*string{}\n\t\tfor _, f18iter := range r.ko.Spec.PreferredCacheClusterAZs {\n\t\t\tvar f18elem string\n\t\t\tf18elem = *f18iter\n\t\t\tf18 = append(f18, &f18elem)\n\t\t}\n\t\tres.SetPreferredCacheClusterAZs(f18)\n\t}\n\tif r.ko.Spec.PreferredMaintenanceWindow != nil {\n\t\tres.SetPreferredMaintenanceWindow(*r.ko.Spec.PreferredMaintenanceWindow)\n\t}\n\tif r.ko.Spec.PrimaryClusterID != nil {\n\t\tres.SetPrimaryClusterId(*r.ko.Spec.PrimaryClusterID)\n\t}\n\tif r.ko.Spec.ReplicasPerNodeGroup != nil {\n\t\tres.SetReplicasPerNodeGroup(*r.ko.Spec.ReplicasPerNodeGroup)\n\t}\n\tif r.ko.Spec.ReplicationGroupDescription != nil {\n\t\tres.SetReplicationGroupDescription(*r.ko.Spec.ReplicationGroupDescription)\n\t}\n\tif r.ko.Spec.ReplicationGroupID != nil {\n\t\tres.SetReplicationGroupId(*r.ko.Spec.ReplicationGroupID)\n\t}\n\tif r.ko.Spec.SecurityGroupIDs != nil {\n\t\tf24 := []*string{}\n\t\tfor _, f24iter := range r.ko.Spec.SecurityGroupIDs {\n\t\t\tvar f24elem string\n\t\t\tf24elem = *f24iter\n\t\t\tf24 = append(f24, &f24elem)\n\t\t}\n\t\tres.SetSecurityGroupIds(f24)\n\t}\n\tif r.ko.Spec.SnapshotARNs != nil {\n\t\tf25 := []*string{}\n\t\tfor _, f25iter := range r.ko.Spec.SnapshotARNs {\n\t\t\tvar f25elem string\n\t\t\tf25elem = *f25iter\n\t\t\tf25 = append(f25, &f25elem)\n\t\t}\n\t\tres.SetSnapshotArns(f25)\n\t}\n\tif r.ko.Spec.SnapshotName != nil {\n\t\tres.SetSnapshotName(*r.ko.Spec.SnapshotName)\n\t}\n\tif r.ko.Spec.SnapshotRetentionLimit != nil {\n\t\tres.SetSnapshotRetentionLimit(*r.ko.Spec.SnapshotRetentionLimit)\n\t}\n\tif r.ko.Spec.SnapshotWindow != nil {\n\t\tres.SetSnapshotWindow(*r.ko.Spec.SnapshotWindow)\n\t}\n\tif r.ko.Spec.Tags != nil {\n\t\tf29 := []*svcsdk.Tag{}\n\t\tfor _, f29iter := range r.ko.Spec.Tags {\n\t\t\tf29elem := &svcsdk.Tag{}\n\t\t\tif f29iter.Key != nil {\n\t\t\t\tf29elem.SetKey(*f29iter.Key)\n\t\t\t}\n\t\t\tif f29iter.Value != nil {\n\t\t\t\tf29elem.SetValue(*f29iter.Value)\n\t\t\t}\n\t\t\tf29 = append(f29, f29elem)\n\t\t}\n\t\tres.SetTags(f29)\n\t}\n\tif r.ko.Spec.TransitEncryptionEnabled != nil {\n\t\tres.SetTransitEncryptionEnabled(*r.ko.Spec.TransitEncryptionEnabled)\n\t}\n\tif r.ko.Spec.UserGroupIDs != nil {\n\t\tf31 := []*string{}\n\t\tfor _, f31iter := range r.ko.Spec.UserGroupIDs {\n\t\t\tvar f31elem string\n\t\t\tf31elem = *f31iter\n\t\t\tf31 = append(f31, &f31elem)\n\t\t}\n\t\tres.SetUserGroupIds(f31)\n\t}\n\n\treturn res, nil\n}", "func NewAttachGroupPolicyRequestWithoutParam() *AttachGroupPolicyRequest {\n\n return &AttachGroupPolicyRequest{\n JDCloudRequest: core.JDCloudRequest{\n URL: \"/group/{groupName}:attachGroupPolicy\",\n Method: \"POST\",\n Header: nil,\n Version: \"v1\",\n },\n }\n}", "func (s *Server) NewTest(ctx context.Context, request *NewTest_Request) (response *NewTest_Response, err error) {\n\tlogging.Log(fmt.Sprintf(\"NewTest - incoming request: %+v\", request))\n\tresponse = new(NewTest_Response)\n\n\tif s.Groups[request.GroupName] == nil {\n\t\treturn response, logging.LogErr(errors.New(ErrNotInGroup))\n\t}\n\n\ttest := &config.Test{\n\t\tTypeInternal: request.Type,\n\t\tSizeInternal: int(request.Size),\n\t\tIntervalInternal: int(request.Interval),\n\t\tAmountInternal: int(request.Amount),\n\t}\n\n\ts.Lock.Lock()\n\t_, ok := s.Tests[request.GroupName]\n\tif !ok {\n\t\ts.Tests[request.GroupName] = make(map[int64]*config.Test)\n\t}\n\n\ts.Tests[request.GroupName][request.TestN] = test\n\n\ts.Lock.Unlock()\n\n\treturn response, logging.LogErr(err)\n}", "func (c *testCluster) createGroup(groupID roachpb.RangeID, firstNode, numReplicas int) {\n\tvar replicaIDs []uint64\n\tfor i := 0; i < numReplicas; i++ {\n\t\tnodeIndex := firstNode + i\n\t\treplicaIDs = append(replicaIDs, uint64(c.nodes[nodeIndex].nodeID))\n\t\tc.groups[groupID] = append(c.groups[groupID], nodeIndex)\n\t}\n\tfor i := 0; i < numReplicas; i++ {\n\t\tgs, err := c.storages[firstNode+i].GroupStorage(groupID, 0)\n\t\tif err != nil {\n\t\t\tc.t.Fatal(err)\n\t\t}\n\t\tmemStorage := gs.(*blockableGroupStorage).s.(*raft.MemoryStorage)\n\t\tif err := memStorage.SetHardState(raftpb.HardState{\n\t\t\tCommit: 10,\n\t\t\tTerm: 5,\n\t\t}); err != nil {\n\t\t\tc.t.Fatal(err)\n\t\t}\n\t\tif err := memStorage.ApplySnapshot(raftpb.Snapshot{\n\t\t\tMetadata: raftpb.SnapshotMetadata{\n\t\t\t\tConfState: raftpb.ConfState{\n\t\t\t\t\tNodes: replicaIDs,\n\t\t\t\t},\n\t\t\t\tIndex: 10,\n\t\t\t\tTerm: 5,\n\t\t\t},\n\t\t}); err != nil {\n\t\t\tc.t.Fatal(err)\n\t\t}\n\n\t\tnode := c.nodes[firstNode+i]\n\t\tif err := node.CreateGroup(groupID); err != nil {\n\t\t\tc.t.Fatal(err)\n\t\t}\n\t}\n}", "func NewCloudPcBulkActionSummary()(*CloudPcBulkActionSummary) {\n m := &CloudPcBulkActionSummary{\n }\n m.backingStore = ie8677ce2c7e1b4c22e9c3827ecd078d41185424dd9eeb92b7d971ed2d49a392e.BackingStoreFactoryInstance();\n m.SetAdditionalData(make(map[string]any))\n return m\n}", "func newAwsWafregionalRuleGroups(c *TrussleV1Client, namespace string) *awsWafregionalRuleGroups {\n\treturn &awsWafregionalRuleGroups{\n\t\tclient: c.RESTClient(),\n\t\tns: namespace,\n\t}\n}", "func NewGroups(filename string, bad BadLineHandler) (*HTGroup, error) {\n\thtGroup := HTGroup{\n\t\tfilePath: filename,\n\t}\n\treturn &htGroup, htGroup.ReloadGroups(bad)\n}", "func (x Go) AddToGroup(wg *sync.WaitGroup) Go {\n\tx.wg = wg\n\treturn x\n}", "func New(opt ...option) *Group {\n\tr := &Group{wait_register: map[int]bool{}}\n\tfor _, o := range opt {\n\t\to(r)\n\t}\n\tif r.CancelFunc == nil {\n\t\tWith_cancel_nowait(nil)(r)\n\t}\n\tif r.parent == nil {\n\t\tr.local_wg = &sync.WaitGroup{}\n\t}\n\tif r.sig != nil {\n\t\tr.wg().Add(1)\n\t\tgo func() {\n\t\t\tdefer r.wg().Done()\n\t\t\tch := make(chan os.Signal, 1)\n\t\t\tdefer close(ch)\n\t\t\tsignal.Notify(ch, r.sig...)\n\t\t\tdefer signal.Stop(ch)\n\t\t\tselect {\n\t\t\tcase <-r.Done():\n\t\t\tcase <-ch:\n\t\t\t\tr.Interrupted = true\n\t\t\t\tfmt.Fprintf(os.Stderr, \"%v\", r.line_end)\n\t\t\t}\n\t\t\tr.Cancel()\n\t\t}()\n\t}\n\treturn r\n}", "func makeGroupCalculation(group Common.IssueGroup) IssueGroupWithTimeMetrics {\n\tallTimeSpend := Common.Sum(group.Issues, func(issue jmod.IssueEntity) int { return issue.Fields.TimeSpend })\n\tallEstimate := Common.Sum(group.Issues, func(issue jmod.IssueEntity) int { return issue.Fields.Estimate })\n\tvar accuracy float64\n\n\tif allTimeSpend == 0 {\n\t\taccuracy = -1\n\t} else {\n\t\taccuracy = float64(allEstimate) / float64(allTimeSpend)\n\t}\n\n\treturn IssueGroupWithTimeMetrics{\n\t\tGroup: group,\n\t\tSpendSum: allTimeSpend,\n\t\tEstimateSum: allEstimate,\n\t\tAccuracy: accuracy,\n\t}\n}", "func NewDkgGroup(dishonestThreshold int, size int) *Group {\n\tmemberIDs := make([]MemberIndex, size)\n\tfor i := 0; i < size; i++ {\n\t\tmemberIDs[i] = MemberIndex(i + 1)\n\t}\n\n\treturn &Group{\n\t\tdishonestThreshold: dishonestThreshold,\n\t\tdisqualifiedMemberIDs: []MemberIndex{},\n\t\tinactiveMemberIDs: []MemberIndex{},\n\t\tmemberIDs: memberIDs,\n\t}\n}", "func New(ds datastore.Datastore, parameters Parameters) (Group, error) {\n\thandler, err := NewFSMHandler(parameters)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\td := handler.(fsmHandler)\n\treturn &stateGroup{StateGroup: statemachine.New(ds, handler, parameters.StateType), d: d}, nil\n}", "func NewAudioRoutingGroup()(*AudioRoutingGroup) {\n m := &AudioRoutingGroup{\n Entity: *NewEntity(),\n }\n return m\n}", "func newRoutine(wg *sync.WaitGroup) {\n\tfmt.Println(\"New Routine\")\n\twg.Done()\n}", "func (g *Group) FromTOML(i interface{}) error {\n\tgt, ok := i.(*GroupTOML)\n\tif !ok {\n\t\treturn fmt.Errorf(\"grouptoml unknown\")\n\t}\n\tg.Threshold = gt.Threshold\n\n\t// migration path from < v1.4, gt.SchemeID might not be contained in the group file, in which case it's the default\n\tsch, err := crypto.GetSchemeByIDWithDefault(gt.SchemeID)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"unable to instantiate group with crypto Scheme named '%s'\", gt.SchemeID)\n\t}\n\tg.Scheme = sch\n\n\tg.Nodes = make([]*Node, len(gt.Nodes))\n\tfor i, ptoml := range gt.Nodes {\n\t\tg.Nodes[i] = new(Node)\n\t\tg.Nodes[i].Identity = &Identity{Scheme: sch}\n\t\tif err := g.Nodes[i].FromTOML(ptoml); err != nil {\n\t\t\treturn fmt.Errorf(\"group: unwrapping node[%d]: %w\", i, err)\n\t\t}\n\t}\n\n\tif g.Threshold < dkg.MinimumT(len(gt.Nodes)) {\n\t\treturn errors.New(\"group file have threshold 0\")\n\t} else if g.Threshold > g.Len() {\n\t\treturn errors.New(\"group file threshold greater than number of participants\")\n\t}\n\n\tif gt.PublicKey != nil {\n\t\t// dist key only if dkg ran\n\t\tg.PublicKey = new(DistPublic)\n\t\tif err = g.PublicKey.FromTOML(sch, gt.PublicKey); err != nil {\n\t\t\treturn fmt.Errorf(\"group: unwrapping distributed public key: %w\", err)\n\t\t}\n\t}\n\tg.Period, err = time.ParseDuration(gt.Period)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif gt.CatchupPeriod == \"\" {\n\t\tg.CatchupPeriod = 0\n\t} else {\n\t\tg.CatchupPeriod, err = time.ParseDuration(gt.CatchupPeriod)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\tg.GenesisTime = gt.GenesisTime\n\tif gt.TransitionTime != 0 {\n\t\tg.TransitionTime = gt.TransitionTime\n\t}\n\tif gt.GenesisSeed != \"\" {\n\t\tif g.GenesisSeed, err = hex.DecodeString(gt.GenesisSeed); err != nil {\n\t\t\treturn fmt.Errorf(\"group: decoding genesis seed %w\", err)\n\t\t}\n\t}\n\n\t// for backward compatibility we make sure to write \"default\" as beacon id if not set\n\tg.ID = commonutils.GetCanonicalBeaconID(gt.ID)\n\n\treturn nil\n}", "func NewAggregator(t testing.TB) *Aggregator {\n\tmock := &Aggregator{}\n\n\tt.Cleanup(func() { mock.AssertExpectations(t) })\n\n\treturn mock\n}", "func New(gid string) *Group {\n return &Group{\n Client: client.New().Init(),\n GroupID: gid,\n }\n}", "func (rm *resourceManager) newDeleteRequestPayload(\n\tr *resource,\n) (*svcsdk.DeleteReplicationGroupInput, error) {\n\tres := &svcsdk.DeleteReplicationGroupInput{}\n\n\tif r.ko.Spec.ReplicationGroupID != nil {\n\t\tres.SetReplicationGroupId(*r.ko.Spec.ReplicationGroupID)\n\t}\n\n\treturn res, nil\n}", "func (_RandomBeacon *RandomBeaconTransactorSession) UpdateGroupCreationParameters(groupCreationFrequency *big.Int, groupLifetime *big.Int, dkgResultChallengePeriodLength *big.Int, dkgResultChallengeExtraGas *big.Int, dkgResultSubmissionTimeout *big.Int, dkgSubmitterPrecedencePeriodLength *big.Int) (*types.Transaction, error) {\n\treturn _RandomBeacon.Contract.UpdateGroupCreationParameters(&_RandomBeacon.TransactOpts, groupCreationFrequency, groupLifetime, dkgResultChallengePeriodLength, dkgResultChallengeExtraGas, dkgResultSubmissionTimeout, dkgSubmitterPrecedencePeriodLength)\n}", "func (m *GraphBaseServiceClient) GroupLifecyclePolicies()(*i1d6652ecc686b20c37a9a3448b26db8187e284e1a4017cab8876b02b97557436.GroupLifecyclePoliciesRequestBuilder) {\n return i1d6652ecc686b20c37a9a3448b26db8187e284e1a4017cab8876b02b97557436.NewGroupLifecyclePoliciesRequestBuilderInternal(m.pathParameters, m.requestAdapter);\n}", "func (m *GraphBaseServiceClient) GroupLifecyclePolicies()(*i1d6652ecc686b20c37a9a3448b26db8187e284e1a4017cab8876b02b97557436.GroupLifecyclePoliciesRequestBuilder) {\n return i1d6652ecc686b20c37a9a3448b26db8187e284e1a4017cab8876b02b97557436.NewGroupLifecyclePoliciesRequestBuilderInternal(m.pathParameters, m.requestAdapter);\n}", "func NewGetGroupUnauthorized() *GetGroupUnauthorized {\n\treturn &GetGroupUnauthorized{}\n}", "func NewGroup(m *algebra.Matrix) *Group {\n\tmat := m\n\tif m == nil || len(m.Get()) != 4 || len(m.Get()[0]) != 4 {\n\t\tmat = algebra.IdentityMatrix(4)\n\t}\n\temptyShapes := make([]Shape, 0, 0)\n\treturn &Group{transform: mat, parent: nil, shapes: emptyShapes, bounds: [2]*algebra.Vector{}}\n}", "func (client *VirtualMachineScaleSetsClient) convertToSinglePlacementGroupCreateRequest(ctx context.Context, resourceGroupName string, vmScaleSetName string, parameters VMScaleSetConvertToSinglePlacementGroupInput, options *VirtualMachineScaleSetsConvertToSinglePlacementGroupOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}/convertToSinglePlacementGroup\"\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif vmScaleSetName == \"\" {\n\t\treturn nil, errors.New(\"parameter vmScaleSetName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{vmScaleSetName}\", url.PathEscape(vmScaleSetName))\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodPost, runtime.JoinPaths(client.ep, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2021-07-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treturn req, runtime.MarshalAsJSON(req, parameters)\n}", "func (c *UDBClient) NewUploadUDBParamGroupRequest() *UploadUDBParamGroupRequest {\n\treq := &UploadUDBParamGroupRequest{}\n\n\t// setup request with client config\n\tc.Client.SetupRequest(req)\n\n\t// setup retryable with default retry policy (retry for non-create action and common error)\n\treq.SetRetryable(true)\n\treturn req\n}", "func (client *GroupClient) createOrUpdateCreateRequest(ctx context.Context, resourceGroupName string, serviceName string, groupID string, parameters GroupCreateParameters, options *GroupCreateOrUpdateOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ApiManagement/service/{serviceName}/groups/{groupId}\"\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif serviceName == \"\" {\n\t\treturn nil, errors.New(\"parameter serviceName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{serviceName}\", url.PathEscape(serviceName))\n\tif groupID == \"\" {\n\t\treturn nil, errors.New(\"parameter groupID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{groupId}\", url.PathEscape(groupID))\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodPut, runtime.JoinPaths(client.ep, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2021-08-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\tif options != nil && options.IfMatch != nil {\n\t\treq.Raw().Header.Set(\"If-Match\", *options.IfMatch)\n\t}\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, runtime.MarshalAsJSON(req, parameters)\n}", "func (a *IAMApiService) CreateGroup(ctx context.Context, gid string, iamGroupCreate IamGroupCreate) (*http.Response, error) {\n\tvar (\n\t\tlocalVarHttpMethod = http.MethodPut\n\t\tlocalVarPostBody interface{}\n\t\tlocalVarFormFileName string\n\t\tlocalVarFileName string\n\t\tlocalVarFileBytes []byte\n\t)\n\n\t// create path and map variables\n\tlocalVarPath := a.client.cfg.BasePath + \"/acs/api/v1/groups/{gid}\"\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"gid\"+\"}\", fmt.Sprintf(\"%v\", gid), -1)\n\n\tlocalVarHeaderParams := make(map[string]string)\n\tlocalVarQueryParams := url.Values{}\n\tlocalVarFormParams := url.Values{}\n\n\t// to determine the Content-Type header\n\tlocalVarHttpContentTypes := []string{\"application/json\"}\n\n\t// set Content-Type header\n\tlocalVarHttpContentType := selectHeaderContentType(localVarHttpContentTypes)\n\tif localVarHttpContentType != \"\" {\n\t\tlocalVarHeaderParams[\"Content-Type\"] = localVarHttpContentType\n\t}\n\n\t// to determine the Accept header\n\tlocalVarHttpHeaderAccepts := []string{\"application/json\"}\n\n\t// set Accept header\n\tlocalVarHttpHeaderAccept := selectHeaderAccept(localVarHttpHeaderAccepts)\n\tif localVarHttpHeaderAccept != \"\" {\n\t\tlocalVarHeaderParams[\"Accept\"] = localVarHttpHeaderAccept\n\t}\n\t// body params\n\tlocalVarPostBody = &iamGroupCreate\n\tr, err := a.client.prepareRequest(ctx, localVarPath, localVarHttpMethod, localVarPostBody, localVarHeaderParams, localVarQueryParams, localVarFormParams, localVarFormFileName, localVarFileName, localVarFileBytes)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tlocalVarHttpResponse, err := a.client.callAPI(r)\n\tif err != nil || localVarHttpResponse == nil {\n\t\treturn localVarHttpResponse, err\n\t}\n\n\tlocalVarBody, err := ioutil.ReadAll(localVarHttpResponse.Body)\n\tlocalVarHttpResponse.Body.Close()\n\tif err != nil {\n\t\treturn localVarHttpResponse, err\n\t}\n\n\tif localVarHttpResponse.StatusCode >= 300 {\n\t\tnewErr := GenericOpenAPIError{\n\t\t\tbody: localVarBody,\n\t\t\terror: localVarHttpResponse.Status,\n\t\t}\n\t\tif localVarHttpResponse.StatusCode == 0 {\n\t\t\tvar v IamError\n\t\t\terr = a.client.decode(&v, localVarBody, localVarHttpResponse.Header.Get(\"Content-Type\"))\n\t\t\tif err != nil {\n\t\t\t\tnewErr.error = err.Error()\n\t\t\t\treturn localVarHttpResponse, newErr\n\t\t\t}\n\t\t\tnewErr.model = v\n\t\t\treturn localVarHttpResponse, newErr\n\t\t}\n\t\treturn localVarHttpResponse, newErr\n\t}\n\n\treturn localVarHttpResponse, nil\n}", "func newBarGroupMutation(c config, op Op, opts ...bargroupOption) *BarGroupMutation {\n\tm := &BarGroupMutation{\n\t\tconfig: c,\n\t\top: op,\n\t\ttyp: TypeBarGroup,\n\t\tclearedFields: make(map[string]struct{}),\n\t}\n\tfor _, opt := range opts {\n\t\topt(m)\n\t}\n\treturn m\n}", "func NewOrderedGroup[T any, U any](pool WorkerPool[T, U], size int) WorkGroup[T, U] {\n\treturn &ordered[T, U]{\n\t\tworkPool: pool,\n\t\tresults: make([]U, size),\n\t\twg: new(sync.WaitGroup),\n\t\tcount: 0,\n\t}\n}", "func TestGroup(t *testing.T) {\n\tn := 5\n\ttmpPath := path.Join(os.TempDir(), \"drand\")\n\tos.Mkdir(tmpPath, 0740)\n\tdefer os.RemoveAll(tmpPath)\n\n\tnames := make([]string, n, n)\n\tprivs := make([]*key.Pair, n, n)\n\tfor i := 0; i < n; i++ {\n\t\tnames[i] = path.Join(tmpPath, fmt.Sprintf(\"drand-%d.public\", i))\n\t\tprivs[i] = key.NewKeyPair(\"127.0.0.1\")\n\t\trequire.NoError(t, key.Save(names[i], privs[i].Public, false))\n\t\tif yes, err := fs.Exists(names[i]); !yes || err != nil {\n\t\t\tt.Fatal(err.Error())\n\t\t}\n\t}\n\n\t//test not enough keys\n\tcmd := exec.Command(\"drand\", \"--folder\", tmpPath, \"group\", names[0])\n\tout, err := cmd.CombinedOutput()\n\texpectedOut := \"group command take at least 3 keys as arguments\"\n\tfmt.Println(string(out))\n\trequire.Error(t, err)\n\n\t//test valid creation\n\tgroupPath := path.Join(tmpPath, key.GroupFolderName)\n\targs := []string{\"drand\", \"--folder\", tmpPath, \"group\"}\n\targs = append(args, names...)\n\tcmd = exec.Command(args[0], args[1:]...)\n\tout, err = cmd.CombinedOutput()\n\texpectedOut = \"Copy the following snippet into a new group.toml file \" +\n\t\t\"and distribute it to all the participants:\"\n\tfmt.Println(string(out))\n\trequire.True(t, strings.Contains(string(out), expectedOut))\n\trequire.Nil(t, err)\n\n\t//recreates exactly like in main and saves the group\n\tvar threshold = key.DefaultThreshold(n)\n\tpublics := make([]*key.Identity, n)\n\tfor i, str := range names {\n\t\tpub := &key.Identity{}\n\t\tif err := key.Load(str, pub); err != nil {\n\t\t\tslog.Fatal(err)\n\t\t}\n\t\tpublics[i] = pub\n\t}\n\tgroup := key.NewGroup(publics, threshold)\n\tgroup.PublicKey = &key.DistPublic{\n\t\tCoefficients: []kyber.Point{publics[0].Key},\n\t}\n\trequire.Nil(t, key.Save(groupPath, group, false))\n\n\textraName := path.Join(tmpPath, fmt.Sprintf(\"drand-%d.public\", n))\n\textraPriv := key.NewKeyPair(\"127.0.0.1\")\n\trequire.NoError(t, key.Save(extraName, extraPriv.Public, false))\n\tif yes, err := fs.Exists(extraName); !yes || err != nil {\n\t\tt.Fatal(err.Error())\n\t}\n\n\t//test valid merge\n\tcmd = exec.Command(\"drand\", \"--folder\", tmpPath, \"group\", \"--group\", groupPath, extraName)\n\tout, err = cmd.CombinedOutput()\n\tfmt.Println(string(out))\n\n\t//expectedOut = \"Copy the following snippet into a new_group.toml file and give it to the upgrade command to do the resharing.\"\n\trequire.True(t, strings.Contains(string(out), expectedOut))\n\n\t//test could not load group file\n\twrongGroupPath := \"not_here\"\n\tcmd = exec.Command(\"drand\", \"--folder\", tmpPath, \"group\", \"--group\", wrongGroupPath, names[0])\n\tout, err = cmd.CombinedOutput()\n\tfmt.Println(string(out))\n\trequire.Error(t, err)\n\n\t//test reject empty group file\n\temptyGroupPath := path.Join(tmpPath, \"empty.toml\")\n\temptyFile, err := os.Create(emptyGroupPath)\n\tif err != nil {\n\t\tslog.Fatal(err)\n\t}\n\tdefer emptyFile.Close()\n\tcmd = exec.Command(\"drand\", \"--folder\", tmpPath, \"group\", \"--group\", emptyGroupPath, names[0])\n\tout, err = cmd.CombinedOutput()\n\tfmt.Println(string(out))\n\trequire.Error(t, err)\n}", "func ExampleActionGroupsClient_CreateOrUpdate() {\n\tcred, err := azidentity.NewDefaultAzureCredential(nil)\n\tif err != nil {\n\t\tlog.Fatalf(\"failed to obtain a credential: %v\", err)\n\t}\n\tctx := context.Background()\n\tclientFactory, err := armmonitor.NewClientFactory(\"<subscription-id>\", cred, nil)\n\tif err != nil {\n\t\tlog.Fatalf(\"failed to create client: %v\", err)\n\t}\n\tres, err := clientFactory.NewActionGroupsClient().CreateOrUpdate(ctx, \"Default-NotificationRules\", \"SampleActionGroup\", armmonitor.ActionGroupResource{\n\t\tLocation: to.Ptr(\"Global\"),\n\t\tTags: map[string]*string{},\n\t\tProperties: &armmonitor.ActionGroup{\n\t\t\tArmRoleReceivers: []*armmonitor.ArmRoleReceiver{\n\t\t\t\t{\n\t\t\t\t\tName: to.Ptr(\"Sample armRole\"),\n\t\t\t\t\tRoleID: to.Ptr(\"8e3af657-a8ff-443c-a75c-2fe8c4bcb635\"),\n\t\t\t\t\tUseCommonAlertSchema: to.Ptr(true),\n\t\t\t\t}},\n\t\t\tAutomationRunbookReceivers: []*armmonitor.AutomationRunbookReceiver{\n\t\t\t\t{\n\t\t\t\t\tName: to.Ptr(\"testRunbook\"),\n\t\t\t\t\tAutomationAccountID: to.Ptr(\"/subscriptions/187f412d-1758-44d9-b052-169e2564721d/resourceGroups/runbookTest/providers/Microsoft.Automation/automationAccounts/runbooktest\"),\n\t\t\t\t\tIsGlobalRunbook: to.Ptr(false),\n\t\t\t\t\tRunbookName: to.Ptr(\"Sample runbook\"),\n\t\t\t\t\tServiceURI: to.Ptr(\"<serviceUri>\"),\n\t\t\t\t\tUseCommonAlertSchema: to.Ptr(true),\n\t\t\t\t\tWebhookResourceID: to.Ptr(\"/subscriptions/187f412d-1758-44d9-b052-169e2564721d/resourceGroups/runbookTest/providers/Microsoft.Automation/automationAccounts/runbooktest/webhooks/Alert1510184037084\"),\n\t\t\t\t}},\n\t\t\tAzureAppPushReceivers: []*armmonitor.AzureAppPushReceiver{\n\t\t\t\t{\n\t\t\t\t\tName: to.Ptr(\"Sample azureAppPush\"),\n\t\t\t\t\tEmailAddress: to.Ptr(\"johndoe@email.com\"),\n\t\t\t\t}},\n\t\t\tAzureFunctionReceivers: []*armmonitor.AzureFunctionReceiver{\n\t\t\t\t{\n\t\t\t\t\tName: to.Ptr(\"Sample azureFunction\"),\n\t\t\t\t\tFunctionAppResourceID: to.Ptr(\"/subscriptions/5def922a-3ed4-49c1-b9fd-05ec533819a3/resourceGroups/aznsTest/providers/Microsoft.Web/sites/testFunctionApp\"),\n\t\t\t\t\tFunctionName: to.Ptr(\"HttpTriggerCSharp1\"),\n\t\t\t\t\tHTTPTriggerURL: to.Ptr(\"http://test.me\"),\n\t\t\t\t\tUseCommonAlertSchema: to.Ptr(true),\n\t\t\t\t}},\n\t\t\tEmailReceivers: []*armmonitor.EmailReceiver{\n\t\t\t\t{\n\t\t\t\t\tName: to.Ptr(\"John Doe's email\"),\n\t\t\t\t\tEmailAddress: to.Ptr(\"johndoe@email.com\"),\n\t\t\t\t\tUseCommonAlertSchema: to.Ptr(false),\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tName: to.Ptr(\"Jane Smith's email\"),\n\t\t\t\t\tEmailAddress: to.Ptr(\"janesmith@email.com\"),\n\t\t\t\t\tUseCommonAlertSchema: to.Ptr(true),\n\t\t\t\t}},\n\t\t\tEnabled: to.Ptr(true),\n\t\t\tEventHubReceivers: []*armmonitor.EventHubReceiver{\n\t\t\t\t{\n\t\t\t\t\tName: to.Ptr(\"Sample eventHub\"),\n\t\t\t\t\tEventHubName: to.Ptr(\"testEventHub\"),\n\t\t\t\t\tEventHubNameSpace: to.Ptr(\"testEventHubNameSpace\"),\n\t\t\t\t\tSubscriptionID: to.Ptr(\"187f412d-1758-44d9-b052-169e2564721d\"),\n\t\t\t\t\tTenantID: to.Ptr(\"68a4459a-ccb8-493c-b9da-dd30457d1b84\"),\n\t\t\t\t}},\n\t\t\tGroupShortName: to.Ptr(\"sample\"),\n\t\t\tItsmReceivers: []*armmonitor.ItsmReceiver{\n\t\t\t\t{\n\t\t\t\t\tName: to.Ptr(\"Sample itsm\"),\n\t\t\t\t\tConnectionID: to.Ptr(\"a3b9076c-ce8e-434e-85b4-aff10cb3c8f1\"),\n\t\t\t\t\tRegion: to.Ptr(\"westcentralus\"),\n\t\t\t\t\tTicketConfiguration: to.Ptr(\"{\\\"PayloadRevision\\\":0,\\\"WorkItemType\\\":\\\"Incident\\\",\\\"UseTemplate\\\":false,\\\"WorkItemData\\\":\\\"{}\\\",\\\"CreateOneWIPerCI\\\":false}\"),\n\t\t\t\t\tWorkspaceID: to.Ptr(\"5def922a-3ed4-49c1-b9fd-05ec533819a3|55dfd1f8-7e59-4f89-bf56-4c82f5ace23c\"),\n\t\t\t\t}},\n\t\t\tLogicAppReceivers: []*armmonitor.LogicAppReceiver{\n\t\t\t\t{\n\t\t\t\t\tName: to.Ptr(\"Sample logicApp\"),\n\t\t\t\t\tCallbackURL: to.Ptr(\"https://prod-27.northcentralus.logic.azure.com/workflows/68e572e818e5457ba898763b7db90877/triggers/manual/paths/invoke/azns/test?api-version=2016-10-01&sp=%2Ftriggers%2Fmanual%2Frun&sv=1.0&sig=Abpsb72UYJxPPvmDo937uzofupO5r_vIeWEx7KVHo7w\"),\n\t\t\t\t\tResourceID: to.Ptr(\"/subscriptions/187f412d-1758-44d9-b052-169e2564721d/resourceGroups/LogicApp/providers/Microsoft.Logic/workflows/testLogicApp\"),\n\t\t\t\t\tUseCommonAlertSchema: to.Ptr(false),\n\t\t\t\t}},\n\t\t\tSmsReceivers: []*armmonitor.SmsReceiver{\n\t\t\t\t{\n\t\t\t\t\tName: to.Ptr(\"John Doe's mobile\"),\n\t\t\t\t\tCountryCode: to.Ptr(\"1\"),\n\t\t\t\t\tPhoneNumber: to.Ptr(\"1234567890\"),\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tName: to.Ptr(\"Jane Smith's mobile\"),\n\t\t\t\t\tCountryCode: to.Ptr(\"1\"),\n\t\t\t\t\tPhoneNumber: to.Ptr(\"0987654321\"),\n\t\t\t\t}},\n\t\t\tVoiceReceivers: []*armmonitor.VoiceReceiver{\n\t\t\t\t{\n\t\t\t\t\tName: to.Ptr(\"Sample voice\"),\n\t\t\t\t\tCountryCode: to.Ptr(\"1\"),\n\t\t\t\t\tPhoneNumber: to.Ptr(\"1234567890\"),\n\t\t\t\t}},\n\t\t\tWebhookReceivers: []*armmonitor.WebhookReceiver{\n\t\t\t\t{\n\t\t\t\t\tName: to.Ptr(\"Sample webhook 1\"),\n\t\t\t\t\tServiceURI: to.Ptr(\"http://www.example.com/webhook1\"),\n\t\t\t\t\tUseCommonAlertSchema: to.Ptr(true),\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tName: to.Ptr(\"Sample webhook 2\"),\n\t\t\t\t\tIdentifierURI: to.Ptr(\"http://someidentifier/d7811ba3-7996-4a93-99b6-6b2f3f355f8a\"),\n\t\t\t\t\tObjectID: to.Ptr(\"d3bb868c-fe44-452c-aa26-769a6538c808\"),\n\t\t\t\t\tServiceURI: to.Ptr(\"http://www.example.com/webhook2\"),\n\t\t\t\t\tTenantID: to.Ptr(\"68a4459a-ccb8-493c-b9da-dd30457d1b84\"),\n\t\t\t\t\tUseAADAuth: to.Ptr(true),\n\t\t\t\t\tUseCommonAlertSchema: to.Ptr(true),\n\t\t\t\t}},\n\t\t},\n\t}, nil)\n\tif err != nil {\n\t\tlog.Fatalf(\"failed to finish the request: %v\", err)\n\t}\n\t// You could use response here. We use blank identifier for just demo purposes.\n\t_ = res\n\t// If the HTTP response code is 200 as defined in example definition, your response structure would look as follows. Please pay attention that all the values in the output are fake values for just demo purposes.\n\t// res.ActionGroupResource = armmonitor.ActionGroupResource{\n\t// \tName: to.Ptr(\"SampleActionGroup\"),\n\t// \tType: to.Ptr(\"Microsoft.Insights/ActionGroups\"),\n\t// \tID: to.Ptr(\"/subscriptions/187f412d-1758-44d9-b052-169e2564721d/resourceGroups/Default-NotificationRules/providers/microsoft.insights/actionGroups/SampleActionGroup\"),\n\t// \tLocation: to.Ptr(\"Global\"),\n\t// \tTags: map[string]*string{\n\t// \t},\n\t// \tProperties: &armmonitor.ActionGroup{\n\t// \t\tArmRoleReceivers: []*armmonitor.ArmRoleReceiver{\n\t// \t\t\t{\n\t// \t\t\t\tName: to.Ptr(\"Sample armRole\"),\n\t// \t\t\t\tRoleID: to.Ptr(\"8e3af657-a8ff-443c-a75c-2fe8c4bcb635\"),\n\t// \t\t\t\tUseCommonAlertSchema: to.Ptr(true),\n\t// \t\t}},\n\t// \t\tAutomationRunbookReceivers: []*armmonitor.AutomationRunbookReceiver{\n\t// \t\t\t{\n\t// \t\t\t\tName: to.Ptr(\"testRunbook\"),\n\t// \t\t\t\tAutomationAccountID: to.Ptr(\"/subscriptions/187f412d-1758-44d9-b052-169e2564721d/resourceGroups/runbookTest/providers/Microsoft.Automation/automationAccounts/runbooktest\"),\n\t// \t\t\t\tIsGlobalRunbook: to.Ptr(false),\n\t// \t\t\t\tRunbookName: to.Ptr(\"Sample runbook\"),\n\t// \t\t\t\tServiceURI: to.Ptr(\"<serviceUri>\"),\n\t// \t\t\t\tUseCommonAlertSchema: to.Ptr(true),\n\t// \t\t\t\tWebhookResourceID: to.Ptr(\"/subscriptions/187f412d-1758-44d9-b052-169e2564721d/resourceGroups/runbookTest/providers/Microsoft.Automation/automationAccounts/runbooktest/webhooks/Alert1510184037084\"),\n\t// \t\t}},\n\t// \t\tAzureAppPushReceivers: []*armmonitor.AzureAppPushReceiver{\n\t// \t\t\t{\n\t// \t\t\t\tName: to.Ptr(\"Sample azureAppPush\"),\n\t// \t\t\t\tEmailAddress: to.Ptr(\"johndoe@email.com\"),\n\t// \t\t}},\n\t// \t\tAzureFunctionReceivers: []*armmonitor.AzureFunctionReceiver{\n\t// \t\t\t{\n\t// \t\t\t\tName: to.Ptr(\"Sample azureFunction\"),\n\t// \t\t\t\tFunctionAppResourceID: to.Ptr(\"/subscriptions/5def922a-3ed4-49c1-b9fd-05ec533819a3/resourceGroups/aznsTest/providers/Microsoft.Web/sites/testFunctionApp\"),\n\t// \t\t\t\tFunctionName: to.Ptr(\"HttpTriggerCSharp1\"),\n\t// \t\t\t\tHTTPTriggerURL: to.Ptr(\"<httpTriggerUrl>\"),\n\t// \t\t\t\tUseCommonAlertSchema: to.Ptr(true),\n\t// \t\t}},\n\t// \t\tEmailReceivers: []*armmonitor.EmailReceiver{\n\t// \t\t\t{\n\t// \t\t\t\tName: to.Ptr(\"John Doe's email\"),\n\t// \t\t\t\tEmailAddress: to.Ptr(\"johndoe@email.com\"),\n\t// \t\t\t\tStatus: to.Ptr(armmonitor.ReceiverStatusEnabled),\n\t// \t\t\t\tUseCommonAlertSchema: to.Ptr(false),\n\t// \t\t\t},\n\t// \t\t\t{\n\t// \t\t\t\tName: to.Ptr(\"Jane Smith's email\"),\n\t// \t\t\t\tEmailAddress: to.Ptr(\"janesmith@email.com\"),\n\t// \t\t\t\tStatus: to.Ptr(armmonitor.ReceiverStatusEnabled),\n\t// \t\t\t\tUseCommonAlertSchema: to.Ptr(true),\n\t// \t\t}},\n\t// \t\tEnabled: to.Ptr(true),\n\t// \t\tEventHubReceivers: []*armmonitor.EventHubReceiver{\n\t// \t\t\t{\n\t// \t\t\t\tName: to.Ptr(\"Sample eventHub\"),\n\t// \t\t\t\tEventHubName: to.Ptr(\"testEventHub\"),\n\t// \t\t\t\tEventHubNameSpace: to.Ptr(\"testEventHubNameSpace\"),\n\t// \t\t\t\tSubscriptionID: to.Ptr(\"187f412d-1758-44d9-b052-169e2564721d\"),\n\t// \t\t\t\tTenantID: to.Ptr(\"68a4459a-ccb8-493c-b9da-dd30457d1b84\"),\n\t// \t\t\t\tUseCommonAlertSchema: to.Ptr(false),\n\t// \t\t}},\n\t// \t\tGroupShortName: to.Ptr(\"sample\"),\n\t// \t\tItsmReceivers: []*armmonitor.ItsmReceiver{\n\t// \t\t\t{\n\t// \t\t\t\tName: to.Ptr(\"Sample itsm\"),\n\t// \t\t\t\tConnectionID: to.Ptr(\"a3b9076c-ce8e-434e-85b4-aff10cb3c8f1\"),\n\t// \t\t\t\tRegion: to.Ptr(\"westcentralus\"),\n\t// \t\t\t\tTicketConfiguration: to.Ptr(\"{\\\"PayloadRevision\\\":0,\\\"WorkItemType\\\":\\\"Incident\\\",\\\"UseTemplate\\\":false,\\\"WorkItemData\\\":\\\"{}\\\",\\\"CreateOneWIPerCI\\\":false}\"),\n\t// \t\t\t\tWorkspaceID: to.Ptr(\"5def922a-3ed4-49c1-b9fd-05ec533819a3|55dfd1f8-7e59-4f89-bf56-4c82f5ace23c\"),\n\t// \t\t}},\n\t// \t\tLogicAppReceivers: []*armmonitor.LogicAppReceiver{\n\t// \t\t\t{\n\t// \t\t\t\tName: to.Ptr(\"Sample logicApp\"),\n\t// \t\t\t\tCallbackURL: to.Ptr(\"https://prod-27.northcentralus.logic.azure.com/workflows/68e572e818e5457ba898763b7db90877/triggers/manual/paths/invoke/azns/test?api-version=2016-10-01&sp=%2Ftriggers%2Fmanual%2Frun&sv=1.0&sig=Abpsb72UYJxPPvmDo937uzofupO5r_vIeWEx7KVHo7w\"),\n\t// \t\t\t\tResourceID: to.Ptr(\"/subscriptions/187f412d-1758-44d9-b052-169e2564721d/resourceGroups/LogicApp/providers/Microsoft.Logic/workflows/testLogicApp\"),\n\t// \t\t\t\tUseCommonAlertSchema: to.Ptr(false),\n\t// \t\t}},\n\t// \t\tSmsReceivers: []*armmonitor.SmsReceiver{\n\t// \t\t\t{\n\t// \t\t\t\tName: to.Ptr(\"John Doe's mobile\"),\n\t// \t\t\t\tCountryCode: to.Ptr(\"1\"),\n\t// \t\t\t\tPhoneNumber: to.Ptr(\"1234567890\"),\n\t// \t\t\t\tStatus: to.Ptr(armmonitor.ReceiverStatusEnabled),\n\t// \t\t\t},\n\t// \t\t\t{\n\t// \t\t\t\tName: to.Ptr(\"Jane Smith's mobile\"),\n\t// \t\t\t\tCountryCode: to.Ptr(\"1\"),\n\t// \t\t\t\tPhoneNumber: to.Ptr(\"0987654321\"),\n\t// \t\t\t\tStatus: to.Ptr(armmonitor.ReceiverStatusEnabled),\n\t// \t\t}},\n\t// \t\tVoiceReceivers: []*armmonitor.VoiceReceiver{\n\t// \t\t\t{\n\t// \t\t\t\tName: to.Ptr(\"Sample voice\"),\n\t// \t\t\t\tCountryCode: to.Ptr(\"1\"),\n\t// \t\t\t\tPhoneNumber: to.Ptr(\"1234567890\"),\n\t// \t\t}},\n\t// \t\tWebhookReceivers: []*armmonitor.WebhookReceiver{\n\t// \t\t\t{\n\t// \t\t\t\tName: to.Ptr(\"Sample webhook 1\"),\n\t// \t\t\t\tServiceURI: to.Ptr(\"http://www.example.com/webhook1\"),\n\t// \t\t\t\tUseCommonAlertSchema: to.Ptr(true),\n\t// \t\t\t},\n\t// \t\t\t{\n\t// \t\t\t\tName: to.Ptr(\"Sample webhook 2\"),\n\t// \t\t\t\tIdentifierURI: to.Ptr(\"http://someidentifier/d7811ba3-7996-4a93-99b6-6b2f3f355f8a\"),\n\t// \t\t\t\tObjectID: to.Ptr(\"d3bb868c-fe44-452c-aa26-769a6538c808\"),\n\t// \t\t\t\tServiceURI: to.Ptr(\"http://www.example.com/webhook2\"),\n\t// \t\t\t\tTenantID: to.Ptr(\"68a4459a-ccb8-493c-b9da-dd30457d1b84\"),\n\t// \t\t\t\tUseAADAuth: to.Ptr(true),\n\t// \t\t\t\tUseCommonAlertSchema: to.Ptr(true),\n\t// \t\t}},\n\t// \t},\n\t// }\n}", "func GenerateGroupKeys(initialMessage []byte, transportPrivateKey *big.Int, transportPublicKey [2]*big.Int, privateCoefficients []*big.Int, encryptedShares [][]*big.Int, index int, participants ParticipantList, threshold int) (*big.Int, [4]*big.Int, [2]*big.Int, error) {\n\n\t// setup\n\tn := len(participants)\n\n\t// build portions of group secret key\n\tpublicKeyG1s := make([]*cloudflare.G1, n)\n\n\tfor idx := 0; idx < n; idx++ {\n\t\tpublicKeyG1, err := bn256.BigIntArrayToG1(participants[idx].PublicKey)\n\t\tif err != nil {\n\t\t\treturn nil, empty4Big, empty2Big, fmt.Errorf(\"error converting public key to g1: %v\", err)\n\t\t}\n\t\tpublicKeyG1s[idx] = publicKeyG1\n\t}\n\n\ttransportPublicKeyG1, err := bn256.BigIntArrayToG1(transportPublicKey)\n\tif err != nil {\n\t\treturn nil, empty4Big, empty2Big, fmt.Errorf(\"error converting transport public key to g1: %v\", err)\n\t}\n\n\tsharedEncrypted, err := cloudflare.CondenseCommitments(transportPublicKeyG1, encryptedShares, publicKeyG1s)\n\tif err != nil {\n\t\treturn nil, empty4Big, empty2Big, fmt.Errorf(\"error condensing commitments: %v\", err)\n\t}\n\n\tsharedSecrets, err := cloudflare.GenerateDecryptedShares(transportPrivateKey, sharedEncrypted, publicKeyG1s)\n\tif err != nil {\n\t\treturn nil, empty4Big, empty2Big, fmt.Errorf(\"error generating decrypted shares: %v\", err)\n\t}\n\n\t// here's the final group secret\n\tgskj := cloudflare.PrivatePolyEval(privateCoefficients, 1+index)\n\tfor idx := 0; idx < len(sharedSecrets); idx++ {\n\t\tgskj.Add(gskj, sharedSecrets[idx])\n\t}\n\tgskj.Mod(gskj, cloudflare.Order)\n\n\t// here's the group public\n\tgpkj := new(cloudflare.G2).ScalarBaseMult(gskj)\n\tgpkjBig := bn256.G2ToBigIntArray(gpkj)\n\n\t// create sig\n\tsig, err := cloudflare.Sign(initialMessage, gskj, cloudflare.HashToG1)\n\tif err != nil {\n\t\treturn nil, empty4Big, empty2Big, fmt.Errorf(\"error signing message: %v\", err)\n\t}\n\tsigBig := bn256.G1ToBigIntArray(sig)\n\n\t// verify signature\n\tvalidSig, err := cloudflare.Verify(initialMessage, sig, gpkj, cloudflare.HashToG1)\n\tif err != nil {\n\t\treturn nil, empty4Big, empty2Big, fmt.Errorf(\"error verifying signature: %v\", err)\n\t}\n\n\tif !validSig {\n\t\treturn nil, empty4Big, empty2Big, errors.New(\"not a valid group signature\")\n\t}\n\n\treturn gskj, gpkjBig, sigBig, nil\n}", "func (m *ItemTermStoreGroupsGroupItemRequestBuilder) Get(ctx context.Context, requestConfiguration *ItemTermStoreGroupsGroupItemRequestBuilderGetRequestConfiguration)(ia3c27b33aa3d3ed80f9de797c48fbb8ed73f13887e301daf51f08450e9a634a3.Groupable, error) {\n requestInfo, err := m.ToGetRequestInformation(ctx, requestConfiguration);\n if err != nil {\n return nil, err\n }\n errorMapping := i2ae4187f7daee263371cb1c977df639813ab50ffa529013b7437480d1ec0158f.ErrorMappings {\n \"4XX\": ia572726a95efa92ddd544552cd950653dc691023836923576b2f4bf716cf204a.CreateODataErrorFromDiscriminatorValue,\n \"5XX\": ia572726a95efa92ddd544552cd950653dc691023836923576b2f4bf716cf204a.CreateODataErrorFromDiscriminatorValue,\n }\n res, err := m.BaseRequestBuilder.RequestAdapter.Send(ctx, requestInfo, ia3c27b33aa3d3ed80f9de797c48fbb8ed73f13887e301daf51f08450e9a634a3.CreateGroupFromDiscriminatorValue, errorMapping)\n if err != nil {\n return nil, err\n }\n if res == nil {\n return nil, nil\n }\n return res.(ia3c27b33aa3d3ed80f9de797c48fbb8ed73f13887e301daf51f08450e9a634a3.Groupable), nil\n}", "func NewGroup(client *gosip.SPClient, endpoint string, config *RequestConfig) *Group {\n\treturn &Group{\n\t\tclient: client,\n\t\tendpoint: endpoint,\n\t\tconfig: config,\n\t\tmodifiers: NewODataMods(),\n\t}\n}", "func (a *API) CreateGroup(name, language string, agentPriorities map[string]GroupPriority) (int32, error) {\n\tvar resp createGroupResponse\n\terr := a.Call(\"create_group\", &createGroupRequest{\n\t\tName: name,\n\t\tLanguageCode: language,\n\t\tAgentPriorities: agentPriorities,\n\t}, &resp)\n\n\treturn resp.ID, err\n}", "func NewSectionGroup()(*SectionGroup) {\n m := &SectionGroup{\n OnenoteEntityHierarchyModel: *NewOnenoteEntityHierarchyModel(),\n }\n odataTypeValue := \"#microsoft.graph.sectionGroup\";\n m.SetOdataType(&odataTypeValue);\n return m\n}", "func createActivityGroup(info *ActivityInfo) (activityGroupStruct, error) {\n\terrResult := error(nil)\n\trunningDuration := info.RunningDuration\n\trunningDistanceKMString := info.RunningDistanceKM\n\trunningDistanceKM, err := parseUnsignedFixed(runningDistanceKMString, 1)\n\n\t/*\n\t * Check if this is the first error.\n\t */\n\tif errResult == nil && err != nil {\n\t\tmsg := err.Error()\n\t\terrResult = fmt.Errorf(\"Failed to parse running distance: %s\", msg)\n\t}\n\n\trunningStepCount := info.RunningStepCount\n\trunningEnergyKJ := info.RunningEnergyKJ\n\n\t/*\n\t * Create running activity.\n\t */\n\trunningActivity := runningActivityStruct{\n\t\tduration: runningDuration,\n\t\tdistanceKM: runningDistanceKM,\n\t\tstepCount: runningStepCount,\n\t\tenergyKJ: runningEnergyKJ,\n\t}\n\n\tcyclingDuration := info.CyclingDuration\n\tcyclingDistanceKMString := info.CyclingDistanceKM\n\tcyclingDistanceKM, err := parseUnsignedFixed(cyclingDistanceKMString, 1)\n\n\t/*\n\t * Check if this is the first error.\n\t */\n\tif errResult == nil && err != nil {\n\t\tmsg := err.Error()\n\t\terrResult = fmt.Errorf(\"Failed to parse cycling distance: %s\", msg)\n\t}\n\n\tcyclingEnergyKJ := info.CyclingEnergyKJ\n\n\t/*\n\t * Create cycling activity.\n\t */\n\tcyclingActivity := cyclingActivityStruct{\n\t\tduration: cyclingDuration,\n\t\tdistanceKM: cyclingDistanceKM,\n\t\tenergyKJ: cyclingEnergyKJ,\n\t}\n\n\totherEnergyKJ := info.OtherEnergyKJ\n\n\t/*\n\t * Create other activity.\n\t */\n\totherActivity := otherActivityStruct{\n\t\tenergyKJ: otherEnergyKJ,\n\t}\n\n\tbegin := info.Begin\n\tweightKGString := info.WeightKG\n\tweightKG, err := parseUnsignedFixed(weightKGString, 1)\n\n\t/*\n\t * Check if this is the first error.\n\t */\n\tif errResult == nil && err != nil {\n\t\tmsg := err.Error()\n\t\terrResult = fmt.Errorf(\"Failed to parse weight: %s\", msg)\n\t}\n\n\t/*\n\t * Create activity group.\n\t */\n\tg := activityGroupStruct{\n\t\tbegin: begin,\n\t\tweightKG: weightKG,\n\t\trunning: runningActivity,\n\t\tcycling: cyclingActivity,\n\t\tother: otherActivity,\n\t}\n\n\treturn g, errResult\n}", "func newRouteGroup(prefix string, router *Router, handlers []Handler) *RouteGroup {\n\treturn &RouteGroup{\n\t\tprefix: prefix,\n\t\trouter: router,\n\t\thandlers: handlers,\n\t}\n}", "func NewActionSet(actions ...authorizer.Action) ActionSet {\n\tactionSet := make(ActionSet)\n\tfor _, action := range actions {\n\t\tactionSet.Add(action)\n\t}\n\n\treturn actionSet\n}", "func (_RandomBeacon *RandomBeaconSession) UpdateGroupCreationParameters(groupCreationFrequency *big.Int, groupLifetime *big.Int, dkgResultChallengePeriodLength *big.Int, dkgResultChallengeExtraGas *big.Int, dkgResultSubmissionTimeout *big.Int, dkgSubmitterPrecedencePeriodLength *big.Int) (*types.Transaction, error) {\n\treturn _RandomBeacon.Contract.UpdateGroupCreationParameters(&_RandomBeacon.TransactOpts, groupCreationFrequency, groupLifetime, dkgResultChallengePeriodLength, dkgResultChallengeExtraGas, dkgResultSubmissionTimeout, dkgSubmitterPrecedencePeriodLength)\n}", "func NewErrGroup(ctx context.Context) (*ErrGroup, context.Context) {\n\tg, ctx := errgroup.WithContext(ctx)\n\treturn &ErrGroup{Group: g}, ctx\n}" ]
[ "0.523805", "0.51416165", "0.48481917", "0.48094192", "0.47957078", "0.4758264", "0.47503814", "0.47461075", "0.46588066", "0.46588066", "0.4653089", "0.4646152", "0.4639611", "0.4613281", "0.4588941", "0.4585807", "0.45691368", "0.45633745", "0.45534816", "0.4546183", "0.454311", "0.45414883", "0.44826326", "0.44541207", "0.44406393", "0.44134232", "0.4413135", "0.44103393", "0.44103393", "0.43925616", "0.43884996", "0.43658504", "0.4365064", "0.4350946", "0.43499893", "0.4344588", "0.43351653", "0.43189704", "0.43161598", "0.43125916", "0.4301306", "0.42948005", "0.42861944", "0.4285858", "0.42806247", "0.427819", "0.42780763", "0.42650425", "0.42487985", "0.4231768", "0.4229195", "0.4227178", "0.42082298", "0.4204572", "0.4198927", "0.41972864", "0.4177734", "0.4172788", "0.4167256", "0.4153266", "0.41514543", "0.41493082", "0.41428742", "0.41412696", "0.41358715", "0.41337696", "0.4127245", "0.41191196", "0.41190416", "0.41187495", "0.41131753", "0.41111082", "0.41091356", "0.4106763", "0.41044614", "0.4099687", "0.40955454", "0.4089573", "0.40890843", "0.40890843", "0.40880096", "0.4078752", "0.40771854", "0.40753734", "0.40646437", "0.4051873", "0.40505442", "0.4050095", "0.40453812", "0.4040407", "0.4038089", "0.40321574", "0.403017", "0.40280145", "0.40208566", "0.40187147", "0.4015855", "0.40086088", "0.39923948", "0.3989935" ]
0.6746478
0
ByteOrder returns the byte order for the CPU's native endianness.
func ByteOrder() binary.ByteOrder { return byteOrder }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func GetByteOrder() binary.ByteOrder {\n\tbuf := [2]byte{}\n\t*(*uint16)(unsafe.Pointer(&buf[0])) = uint16(0xABCD)\n\n\tswitch buf {\n\tcase [2]byte{0xCD, 0xAB}:\n\t\treturn binary.LittleEndian\n\tcase [2]byte{0xAB, 0xCD}:\n\t\treturn binary.BigEndian\n\tdefault:\n\t\tpanic(\"Could not determine native endianness.\")\n\t}\n}", "func (bio *BinaryIO) ByteOrder() binary.ByteOrder {\n\treturn bio.order\n}", "func (jbobject *JavaNioCharBuffer) Order() *JavaNioByteOrder {\n\tjret, err := jbobject.CallMethod(javabind.GetEnv(), \"order\", \"java/nio/ByteOrder\")\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tretconv := javabind.NewJavaToGoCallable()\n\tdst := &javabind.Callable{}\n\tretconv.Dest(dst)\n\tif err := retconv.Convert(javabind.ObjectRef(jret)); err != nil {\n\t\tpanic(err)\n\t}\n\tretconv.CleanUp()\n\tunique_x := &JavaNioByteOrder{}\n\tunique_x.Callable = dst\n\treturn unique_x\n}", "func GetEndian() ProtobufArchType {\n\tmu.RLock()\n\tdefer mu.RUnlock()\n\treturn endian_type\n\n}", "func (r *Reader) ByteOrder() binary.ByteOrder {\n\treturn r.b.order\n}", "func (r *Raster) GetByteEndianness() binary.ByteOrder {\n\tif r.Endianness == 0 {\n\t\treturn binary.BigEndian\n\t}\n\treturn binary.LittleEndian\n}", "func (d *Decoder) ByteOrder(e Endian) binary.ByteOrder {\n\tif e == LittleEndian {\n\t\treturn binary.LittleEndian\n\t} else if e == BigEndian {\n\t\treturn binary.BigEndian\n\t} else if e == DynamicEndian {\n\t\treturn d.dynamicEndian\n\t}\n\tpanic(fmt.Sprintf(\"Unknown endian %v\", e))\n}", "func (p *MessageProcessor) SetByteOrder(littleEndian bool) {\n\tp.littleEndian = littleEndian\n}", "func (d *Debugger) Endianness() (Endianness, error) {\n\tregs, err := d.ReadRegAll()\n\tif err != nil {\n\t\treturn LittleEndian, err\n\t}\n\n\treturn d.arch.endianness(regs), nil\n}", "func (p *ServerProcessor) SetByteOrder(littleEndian bool) {\n\tp.littleEndian = littleEndian\n}", "func BinaryOrder(buf []byte) binary.ByteOrder {\n\tif isTiffBigEndian(buf[:4]) {\n\t\treturn binary.BigEndian\n\t}\n\tif isTiffLittleEndian(buf[:4]) {\n\t\treturn binary.LittleEndian\n\t}\n\treturn nil\n}", "func StringToByteOrder(s string) binary.ByteOrder {\n\tswitch strings.ToLower(s) {\n\tcase \"ndr\":\n\t\treturn binary.LittleEndian\n\tcase \"xdr\":\n\t\treturn binary.BigEndian\n\tdefault:\n\t\treturn DefaultEWKBEncodingFormat\n\t}\n}", "func (p *ProtobufCodec) SetByteOrder(littleEndian bool) {\n\tp.littleEndian = littleEndian\n}", "func init() {\n\ti := 0x1\n\tb := (*[intSize]byte)(unsafe.Pointer(&i))\n\tif b[0] == 1 {\n\t\t// LittleEndian is the little-endian implementation of ByteOrder\n\t\tNativeEndian = binary.LittleEndian\n\t} else {\n\t\t// BigEndian is the Big-endian implementation of ByteOrder\n\t\tNativeEndian = binary.BigEndian\n\t}\n}", "func ToNetworkOrder32(n uint32) uint32 {\n\tif native == networkOrder {\n\t\treturn n\n\t} else {\n\t\treturn ntohl(htohl(n))\n\t}\n}", "func getBigEndian() ([]string, error) {\n\tfilename := runtime.GOROOT() + \"/src/internal/goarch/goarch.go\"\n\tfs := token.NewFileSet()\n\tfileAST, err := parser.ParseFile(fs, filename, nil, parser.Mode(0))\n\t//fileAST, err := parser.ParseFile(fs, filename, nil, parser.Trace)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t/*\n\n\t\t// BigEndian reports whether the architecture is big-endian.\n\t\tconst BigEndian = IsArmbe|IsArm64be|IsMips|IsMips64|IsPpc|IsPpc64|IsS390|IsS390x|IsSparc|IsSparc64 == 1\n\n\t*/\n\n\tif len(fileAST.Decls) == 0 {\n\t\treturn nil, fmt.Errorf(\"%s: no Decls in AST\", filename)\n\t}\n\t// fmt.Printf(\"%#v\\n\", fileAST.Decls)\n\tfor _, decl := range fileAST.Decls {\n\t\tdecl, ok := decl.(*ast.GenDecl)\n\t\tif !ok {\n\t\t\tcontinue\n\t\t}\n\t\tif decl.Tok != token.CONST {\n\t\t\tcontinue\n\t\t}\n\t\tspec := decl.Specs[0].(*ast.ValueSpec)\n\t\tif len(spec.Names) != 1 || spec.Names[0].Name != \"BigEndian\" {\n\t\t\tcontinue\n\t\t}\n\t\t// We found the const \"BigEndian\"\n\t\t// Let's extract its value!\n\t\tif len(spec.Values) != 1 {\n\t\t\treturn nil, fmt.Errorf(\"%s: single value expected for const BigEndian\", filename)\n\t\t}\n\n\t\tvar archs []string\n\n\t\tlist := spec.Values[0].(*ast.BinaryExpr).X.(*ast.BinaryExpr)\n\t\tfor {\n\t\t\tarch := strings.ToLower(strings.TrimPrefix(list.Y.(*ast.Ident).Name, \"Is\"))\n\t\t\tarchs = append(archs, arch)\n\n\t\t\tvar ok bool\n\t\t\tlist2, ok := list.X.(*ast.BinaryExpr)\n\t\t\tif !ok {\n\t\t\t\tarch = strings.ToLower(strings.TrimPrefix(list.X.(*ast.Ident).Name, \"Is\"))\n\t\t\t\tarchs = append(archs, arch)\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tlist = list2\n\t\t}\n\n\t\t// Reverse\n\t\tfor i, j := 0, len(archs)-1; i < j; i, j = i+1, j-1 {\n\t\t\tarchs[i], archs[j] = archs[j], archs[i]\n\t\t}\n\n\t\treturn archs, nil\n\t}\n\n\treturn nil, fmt.Errorf(\"%s: const BigEndian not found\", filename)\n}", "func ToNetworkOrder16(n uint16) uint16 {\n\tif native == networkOrder {\n\t\treturn n\n\t} else {\n\t\treturn ntohs(htohs(n))\n\t}\n}", "func LittleEndian(v uint32) []byte {\n\tvar b [4]byte\n\tb[0] = byte(v)\n\tb[1] = byte(v >> 8)\n\tb[2] = byte(v >> 16)\n\tb[3] = byte(v >> 24)\n\treturn b[:]\n}", "func SwapEndianness(xs []byte) []byte {\n\tys := make([]byte, len(xs))\n\tfor i, b := range xs {\n\t\tys[len(xs)-1-i] = b\n\t}\n\treturn ys\n}", "func Uint32EndiannessSwap(v uint32) uint32 {\n\treturn (v&0x000000FF)<<24 | (v&0x0000FF00)<<8 |\n\t\t(v&0x00FF0000)>>8 | (v&0xFF000000)>>24\n}", "func GetBigEndianData(data interface{}) (b []byte, err error) {\n\tvar (\n\t\tbuffer bytes.Buffer\n\t)\n\terr = binary.Write(&buffer, binary.BigEndian, data)\n\tb = buffer.Bytes()\n\treturn\n}", "func (dbv DiskBlockDevice) Order() string {\n\treturn dbv.lsd.Order()\n}", "func isLittleEndian() bool {\n\tn := uint32(0x01020304)\n\treturn *(*byte)(unsafe.Pointer(&n)) == 0x04\n}", "func isLittleEndian() bool {\n\tn := uint32(0x01020304)\n\treturn *(*byte)(unsafe.Pointer(&n)) == 0x04\n}", "func readBigEndian(buf []byte) uint64 {\n\tvar x uint64\n\tfor i := 1; i <= len(buf); i++ {\n\t\tx |= uint64(buf[i-1]) << uint(8*(len(buf)-i))\n\t}\n\treturn x\n}", "func (md MappedDisk) Order() string {\n\treturn md.sectorDisk.Order()\n}", "func GetLittleEndianBytes(v uint32, l uint32) []byte {\n\t// TODO: is v hex encoded?\n\tbuf := make([]byte, l)\n\n\tbinary.LittleEndian.PutUint32(buf, v)\n\n\treturn buf\n}", "func GetArch() []string {\n\tres := []string{runtime.GOARCH}\n\tif runtime.GOARCH == \"amd64\" {\n\t\t// Adding x86_64 manually since the uname syscall (man 2 uname)\n\t\t// is not implemented in all systems\n\t\tres = append(res, \"x86_64\")\n\t\tres = append(res, \"x64\")\n\t\tres = append(res, \"64\")\n\t}\n\treturn res\n}", "func SetEndian(val ProtobufArchType) {\n\tmu.Lock()\n\tdefer mu.Unlock()\n\tendian_type = val\n}", "func IsLittleEndian() bool {\n\tvar i int32 = 0x01020304\n\tu := unsafe.Pointer(&i)\n\tpb := (*byte)(u)\n\tb := *pb\n\treturn b == 0x04\n}", "func UseJavaEncoding() {\r\n\tbyteOrder = binary.BigEndian\r\n}", "func (m *PrintConnector) GetOperatingSystem()(*string) {\n val, err := m.GetBackingStore().Get(\"operatingSystem\")\n if err != nil {\n panic(err)\n }\n if val != nil {\n return val.(*string)\n }\n return nil\n}", "func (bp *BusPirate) GetMode() (int, int) {\n\treturn bp.mode, bp.modeversion\n}", "func GetOperatingSystemVersion() (string, error) {\n\treturn getValueFromOsRelease(\"VERSION_ID\")\n}", "func init() {\n\tif C.MQENC_NATIVE%2 == 0 {\n\t\tendian = binary.LittleEndian\n\t} else {\n\t\tendian = binary.BigEndian\n\t}\n}", "func (o *V0037Node) GetOperatingSystem() string {\n\tif o == nil || o.OperatingSystem == nil {\n\t\tvar ret string\n\t\treturn ret\n\t}\n\treturn *o.OperatingSystem\n}", "func UseBedrockEncoding() {\r\n\tbyteOrder = binary.LittleEndian\r\n}", "func GetOperatingSystem() (string, error) {\n\tcmd := exec.Command(\"uname\", \"-s\")\n\tosName, err := cmd.Output()\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\treturn string(osName), nil\n}", "func (o *Application) GetOrder() int32 {\n\tif o == nil {\n\t\tvar ret int32\n\t\treturn ret\n\t}\n\n\treturn o.Order\n}", "func GetSystemCodePage() (cp int) {\r\n\tvar cpInfoEx C.CPINFOEX\r\n\r\n\tif ok := C.GetCPInfoEx(C.CP_ACP, 0, &cpInfoEx); ok == C.TRUE {\r\n\t\tcp = (int)(cpInfoEx.CodePage)\r\n\t}\r\n\r\n\treturn\r\n}", "func IsOrderly(data []int32) byte {\n\tif len(data) == 0 || len(data) == 1 {\n\t\treturn 1\n\t}\n\tvar order byte = 0\n\ttemp := data[0]\n\tfor i := 1;i < len(data);i ++ {\n\t\tif data[i] < temp {\n\t\t\tif order == 0 {\n\t\t\t\torder = 2\n\t\t\t} else if order == 1 {\n\t\t\t\treturn 0\n\t\t\t} else {\n\n\t\t\t}\n\t\t\ttemp = data[i]\n\t\t} else if data[i] > temp {\n\t\t\tif order == 0 {\n\t\t\t\torder = 1\n\t\t\t} else if order == 2 {\n\t\t\t\treturn 0\n\t\t\t} else {\n\n\t\t\t}\n\t\t\ttemp = data[i]\n\t\t} else {\n\n\t\t}\n\t}\n\tif order == 0 {\n\t\treturn 1\n\t}\n\treturn order\n}", "func writeBigEndian(buf []byte, x uint64, n int) {\n\tfor i := 1; i <= n; i++ {\n\t\tbuf[i-1] = byte(x >> uint(8*(n-i)))\n\t}\n}", "func read(b []byte, order binary.ByteOrder, v reflect.Value, o *Offset) error {\n\tvar off Offset\n\tvar err error\n\tvar val reflect.Value\n\n\tif !v.CanInterface() {\n\t\t// skip unexported field\n\t\tvar size int\n\t\tsizeOfValueInBits(&size, v, false)\n\t\t*o, err = o.AddOffset(Offset{Bit: uint64(size)})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn errCannotInterface\n\t}\n\td := v.Interface()\n\n\tswitch d.(type) {\n\tcase uint8:\n\t\tret, err := GetBitsAsByte(b, *o, 8, binary.LittleEndian)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tval = reflect.ValueOf(ret[0])\n\t\toff = Offset{1, 0}\n\tcase uint16:\n\t\tret, err := GetBitsAsByte(b, *o, 16, binary.LittleEndian)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tval = reflect.ValueOf(order.Uint16(ret))\n\t\toff = Offset{2, 0}\n\tcase uint32:\n\t\tret, err := GetBitsAsByte(b, *o, 32, binary.LittleEndian)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tval = reflect.ValueOf(order.Uint32(ret))\n\t\toff = Offset{4, 0}\n\tcase uint64:\n\t\tret, err := GetBitsAsByte(b, *o, 64, binary.LittleEndian)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tval = reflect.ValueOf(order.Uint64(ret))\n\t\toff = Offset{8, 0}\n\n\tcase Bit:\n\t\tret, err := GetBitsBitEndian(b, *o, 1, order)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif ret[0] {\n\t\t\tval = reflect.ValueOf(Bit(true))\n\t\t} else {\n\t\t\tval = reflect.ValueOf(Bit(false))\n\t\t}\n\t\toff = Offset{0, 1}\n\tdefault: /* other data types */\n\t\tswitch v.Kind() {\n\t\tcase reflect.Array:\n\t\t\tif v.Len() > 0 {\n\t\t\t\tif v.Index(0).Kind() == reflect.Bool {\n\t\t\t\t\t/* Bit array */\n\t\t\t\t\t/* when order is Big Endian, we should read the entire bits at once */\n\t\t\t\t\tret, err := GetBitsBitEndian(b, *o, uint64(v.Len()), order)\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\treturn err\n\t\t\t\t\t}\n\t\t\t\t\t// set data\n\t\t\t\t\tfor i := 0; i < v.Len(); i++ {\n\t\t\t\t\t\tif v.Index(i).CanSet() {\n\t\t\t\t\t\t\tv.Index(i).Set(reflect.ValueOf(Bit(ret[i])))\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t\t*o, err = o.AddOffset(Offset{Bit: uint64(v.Len())})\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\treturn err\n\t\t\t\t\t}\n\t\t\t\t\treturn nil\n\t\t\t\t} else if v.Index(0).Kind() == reflect.Uint8 {\n\t\t\t\t\tret, err := GetBitsAsByte(b, *o, uint64(v.Len()*8), binary.LittleEndian)\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\treturn err\n\t\t\t\t\t}\n\t\t\t\t\tfor i := 0; i < v.Len(); i++ {\n\t\t\t\t\t\tif v.Index(i).CanSet() {\n\t\t\t\t\t\t\tif order == binary.BigEndian {\n\t\t\t\t\t\t\t\t// workaround! binary.Read doesn't support []byte in BigEndian\n\t\t\t\t\t\t\t\tv.Index(i).Set(reflect.ValueOf(ret[v.Len()-1-i]))\n\t\t\t\t\t\t\t} else {\n\t\t\t\t\t\t\t\tv.Index(i).Set(reflect.ValueOf(ret[i]))\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t\t*o, err = o.AddOffset(Offset{Byte: uint64(v.Len())})\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\treturn err\n\t\t\t\t\t}\n\t\t\t\t\treturn nil\n\t\t\t\t} else {\n\t\t\t\t\tfor i := 0; i < v.Len(); i++ {\n\t\t\t\t\t\terr := read(b, order, v.Index(i), o)\n\t\t\t\t\t\tif err != nil && err != errCannotInterface {\n\t\t\t\t\t\t\treturn err\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t\treturn nil\n\t\t\t\t}\n\t\t\t}\n\t\tcase reflect.Struct:\n\t\t\tfor i := 0; i < v.Type().NumField(); i++ {\n\t\t\t\tf := v.Type().Field(i)\n\t\t\t\tcnf := parseStructTag(f.Tag)\n\t\t\t\tif cnf != nil {\n\t\t\t\t\t/* struct tag is defined */\n\t\t\t\t\tif cnf.ignore {\n\t\t\t\t\t\tcontinue\n\t\t\t\t\t} else if cnf.skip {\n\t\t\t\t\t\tvar bitSize int\n\t\t\t\t\t\t/* only updates offset. not fill. */\n\t\t\t\t\t\tsizeOfValueInBits(&bitSize, v.Field(i), true)\n\t\t\t\t\t\t*o, err = o.AddOffset(Offset{Bit: uint64(bitSize)})\n\t\t\t\t\t\tif err != nil {\n\t\t\t\t\t\t\treturn err\n\t\t\t\t\t\t}\n\t\t\t\t\t\tcontinue\n\t\t\t\t\t} else if cnf.endian != nil {\n\t\t\t\t\t\terr := read(b, cnf.endian, v.Field(i), o)\n\t\t\t\t\t\tif err != nil && err != errCannotInterface {\n\t\t\t\t\t\t\treturn err\n\t\t\t\t\t\t}\n\t\t\t\t\t\tcontinue\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\terr := read(b, order, v.Field(i), o)\n\t\t\t\tif err != nil && err != errCannotInterface {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t}\n\t\t\treturn nil\n\t\tdefault:\n\t\t\treturn fmt.Errorf(\"Not Supported %s\", v.Kind())\n\t\t}\n\t}\n\n\t// primitives\n\tif v.CanSet() {\n\t\tv.Set(val)\n\t} else {\n\t\treturn fmt.Errorf(\"can not set %v\\n\", v)\n\t}\n\t*o, err = o.AddOffset(off)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}", "func ReadStructBigEndian(s interface{}, r io.Reader) error {\r\n\treturn ReadStruct(s, r, binary.BigEndian)\r\n}", "func (p *Config) GetNativeProcessorType(programHash hashing.HashValue) (string, bool) {\n\tif _, ok := p.coreContracts[programHash]; ok {\n\t\treturn vmtypes.Core, true\n\t}\n\tif _, ok := p.GetNativeProcessor(programHash); ok {\n\t\treturn vmtypes.Native, true\n\t}\n\treturn \"\", false\n}", "func GetOperatingSystem() (string, error) {\n\tif prettyName, err := getValueFromOsRelease(\"PRETTY_NAME\"); err != nil {\n\t\treturn \"\", err\n\t} else if prettyName != \"\" {\n\t\treturn prettyName, nil\n\t}\n\n\t// If not set, defaults to PRETTY_NAME=\"Linux\"\n\t// c.f. http://www.freedesktop.org/software/systemd/man/os-release.html\n\treturn \"Linux\", nil\n}", "func (bd *BlockDAG) GetOrder() map[uint]*hash.Hash {\n\tbd.stateLock.Lock()\n\tdefer bd.stateLock.Unlock()\n\n\treturn bd.order\n}", "func order(i *x86csv.Inst) OperandOrder {\n\t// Is it Intel order already?\n\tintel := i.IntelArgs()\n\tif reflect.DeepEqual(i.GoArgs(), intel) {\n\t\treturn IntelOrder\n\t}\n\n\t// Check if it's reverse Intel.\n\tfor l, r := 0, len(intel)-1; l < r; l, r = l+1, r-1 {\n\t\tintel[l], intel[r] = intel[r], intel[l]\n\t}\n\tif reflect.DeepEqual(i.GoArgs(), intel) {\n\t\treturn ReverseIntelOrder\n\t}\n\n\t// Otherwise we could be in the bizarre special-case of 3-argument CMP instructions.\n\t//\n\t// Reference: https://github.com/golang/arch/blob/b19384d3c130858bb31a343ea8fce26be71b5998/x86/x86spec/format.go#L138-L144\n\t//\n\t//\t\t\tcase \"CMPPD\", \"CMPPS\", \"CMPSD\", \"CMPSS\":\n\t//\t\t\t\t// rotate destination to end but don't swap comparison operands\n\t//\t\t\t\tif len(args) == 3 {\n\t//\t\t\t\t\targs[0], args[1], args[2] = args[2], args[0], args[1]\n\t//\t\t\t\t\tbreak\n\t//\t\t\t\t}\n\t//\t\t\t\tfallthrough\n\t//\n\tswitch i.GoOpcode() {\n\tcase \"CMPPD\", \"CMPPS\", \"CMPSD\", \"CMPSS\":\n\t\tif len(i.GoArgs()) == 3 {\n\t\t\treturn CMP3Order\n\t\t}\n\t}\n\n\treturn UnknownOrder\n}", "func (b *BananaPhone) GetSysIDOrd(ordinal uint32) (uint16, error) {\n\tr, e := b.getSysID(\"\", ordinal, true)\n\tif e != nil {\n\t\tvar err MayBeHookedError\n\t\tif b.isAuto && errors.Is(e, &err) {\n\t\t\tvar e2 error\n\t\t\tb.banana, e2 = pe.Open(`C:\\Windows\\system32\\ntdll.dll`)\n\t\t\tif e2 != nil {\n\t\t\t\treturn 0, e2\n\t\t\t}\n\t\t\tr, e = b.getSysID(\"\", ordinal, true)\n\t\t}\n\t}\n\treturn r, e\n}", "func (r *Reg) getOSMode() Mode {\n\treturn Mode(r.CPSR & 0b11111)\n}", "func (r *ChannelsReorderPinnedForumTopicsRequest) GetOrder() (value []int) {\n\tif r == nil {\n\t\treturn\n\t}\n\treturn r.Order\n}", "func (o *SingleSelectFieldField) GetOrder() int32 {\n\tif o == nil {\n\t\tvar ret int32\n\t\treturn ret\n\t}\n\n\treturn o.Order\n}", "func TestInt16LittleBigEndian(t *testing.T) {\n\tsixteenBits := []byte{0x01, 0x00, 0x01, 0x00}\n\tsixteenBitsBuffer := bytes.NewBuffer(sixteenBits)\n\tvar intSixteen int16\n\terr := binary.Read(sixteenBitsBuffer, binary.LittleEndian, &intSixteen)\n\tif err != nil || intSixteen != 1 {\n\t\tt.Fail()\n\t}\n\terr = binary.Read(sixteenBitsBuffer, binary.BigEndian, &intSixteen)\n\tif err != nil || intSixteen != 256 {\n\t\tt.Fail()\n\t}\n}", "func (g LabeledAdjacencyList) Order() int {\n\t// Why a wrapper for len()? Mostly for Directed and Undirected.\n\t// u.Order() is a little nicer than len(u.LabeledAdjacencyList).\n\treturn len(g)\n}", "func Bcm2835_spi_setBitOrder(Order byte) {\n\tcOrder, _ := (C.uint8_t)(Order), cgoAllocsUnknown\n\tC.bcm2835_spi_setBitOrder(cOrder)\n}", "func (p Pack) Bytes() []byte {\n\treturn bigendian.Uint32ToBytes(uint32(p))\n}", "func (g AdjacencyList) Order() int {\n\t// Why a wrapper for len()? Mostly for Directed and Undirected.\n\t// u.Order() is a little nicer than len(u.LabeledAdjacencyList).\n\treturn len(g)\n}", "func ReadProcessMemoryAsUint32(hProcess HANDLE, lpBaseAddress uint32) (buffer uint32, err error) {\n\tdata, err := ReadProcessMemory(hProcess, lpBaseAddress, 4)\n\tif err != nil {\n\t\treturn\n\t}\n\tbuffer = binary.LittleEndian.Uint32(data)\n\treturn\n}", "func (c *Context) GetOrder() string {\n\treturn c.Order\n}", "func Write(w io.Writer, order ByteOrder, data interface{}) error {\n\t// Fast path for basic types and slices.\n\tif n := intDataSize(data); n != 0 {\n\t\tvar b [8]byte\n\t\tvar bs []byte\n\t\tif n > len(b) {\n\t\t\tbs = make([]byte, n)\n\t\t} else {\n\t\t\tbs = b[:n]\n\t\t}\n\t\tswitch v := data.(type) {\n\t\tcase *int8:\n\t\t\tbs = b[:1]\n\t\t\tb[0] = byte(*v)\n\t\tcase int8:\n\t\t\tbs = b[:1]\n\t\t\tb[0] = byte(v)\n\t\tcase []int8:\n\t\t\tfor i, x := range v {\n\t\t\t\tbs[i] = byte(x)\n\t\t\t}\n\t\tcase *uint8:\n\t\t\tbs = b[:1]\n\t\t\tb[0] = *v\n\t\tcase uint8:\n\t\t\tbs = b[:1]\n\t\t\tb[0] = byte(v)\n\t\tcase []uint8:\n\t\t\tbs = v\n\t\tcase *int16:\n\t\t\tbs = b[:2]\n\t\t\torder.PutUint16(bs, uint16(*v))\n\t\tcase int16:\n\t\t\tbs = b[:2]\n\t\t\torder.PutUint16(bs, uint16(v))\n\t\tcase []int16:\n\t\t\tfor i, x := range v {\n\t\t\t\torder.PutUint16(bs[2*i:], uint16(x))\n\t\t\t}\n\t\tcase *uint16:\n\t\t\tbs = b[:2]\n\t\t\torder.PutUint16(bs, *v)\n\t\tcase uint16:\n\t\t\tbs = b[:2]\n\t\t\torder.PutUint16(bs, v)\n\t\tcase []uint16:\n\t\t\tfor i, x := range v {\n\t\t\t\torder.PutUint16(bs[2*i:], x)\n\t\t\t}\n\t\tcase *int32:\n\t\t\tbs = b[:4]\n\t\t\torder.PutUint32(bs, uint32(*v))\n\t\tcase int32:\n\t\t\tbs = b[:4]\n\t\t\torder.PutUint32(bs, uint32(v))\n\t\tcase []int32:\n\t\t\tfor i, x := range v {\n\t\t\t\torder.PutUint32(bs[4*i:], uint32(x))\n\t\t\t}\n\t\tcase *uint32:\n\t\t\tbs = b[:4]\n\t\t\torder.PutUint32(bs, *v)\n\t\tcase uint32:\n\t\t\tbs = b[:4]\n\t\t\torder.PutUint32(bs, v)\n\t\tcase []uint32:\n\t\t\tfor i, x := range v {\n\t\t\t\torder.PutUint32(bs[4*i:], x)\n\t\t\t}\n\t\tcase *int64:\n\t\t\tbs = b[:8]\n\t\t\torder.PutUint64(bs, uint64(*v))\n\t\tcase int64:\n\t\t\tbs = b[:8]\n\t\t\torder.PutUint64(bs, uint64(v))\n\t\tcase []int64:\n\t\t\tfor i, x := range v {\n\t\t\t\torder.PutUint64(bs[8*i:], uint64(x))\n\t\t\t}\n\t\tcase *uint64:\n\t\t\tbs = b[:8]\n\t\t\torder.PutUint64(bs, *v)\n\t\tcase uint64:\n\t\t\tbs = b[:8]\n\t\t\torder.PutUint64(bs, v)\n\t\tcase []uint64:\n\t\t\tfor i, x := range v {\n\t\t\t\torder.PutUint64(bs[8*i:], x)\n\t\t\t}\n\t\t}\n\t\t_, err := w.Write(bs)\n\t\treturn err\n\t}\n\n\t// Fallback to reflect-based encoding.\n\tv := reflect.Indirect(reflect.ValueOf(data))\n\tsize := dataSize(v)\n\tif size < 0 {\n\t\treturn errors.New(\"binary.Write: invalid type \" + reflect.TypeOf(data).String())\n\t}\n\tbuf := make([]byte, size)\n\te := &encoder{order: order, buf: buf}\n\te.value(v)\n\t_, err := w.Write(buf)\n\treturn err\n}", "func bytesToUintLittleEndian(b []byte) (res uint64) {\n\tmul := uint64(1)\n\tfor i := 0; i < len(b); i++ {\n\t\tres += uint64(b[i]) * mul\n\t\tmul *= 256\n\t}\n\treturn res\n}", "func (sys *Sys) GetOsType() int {\n\tlog.Debugln(\"GetOsType ENTER\")\n\n\tosType := OsUnknown\n\tif sys.fs.DoesFileExist(\"/etc/redhat-release\") {\n\t\tosType = OsRhel\n\t} else if sys.fs.DoesFileExist(\"/etc/SuSE-release\") {\n\t\tosType = OsSuse\n\t} else if sys.fs.DoesFileExist(\"/etc/lsb-release\") {\n\t\tosType = OsUbuntu\n\t\t//\t} else if sys.fs.DoesFileExist(\"/etc/release\") {\n\t\t//\t\treturn OsCoreOs\n\t} else {\n\t\tout, err := sys.run.CommandOutput(\"uname -s\")\n\t\tif err == nil && strings.EqualFold(out, \"Darwin\") {\n\t\t\tosType = OsMac\n\t\t} else {\n\t\t\tlog.Warnln(\"Unable to determine OS type\")\n\t\t}\n\t}\n\n\tlog.Debugln(\"GetOsType =\", osType)\n\tlog.Debugln(\"GetOsType LEAVE\")\n\treturn osType\n}", "func Write(w io.Writer, order ByteOrder, data interface{}) error {\n\t// Fast path for basic types and slices.\n\tif n := intDataSize(data); n != 0 {\n\t\tvar b [8]byte\n\t\tvar bs []byte\n\t\tif n > len(b) {\n\t\t\tbs = make([]byte, n)\n\t\t} else {\n\t\t\tbs = b[:n]\n\t\t}\n\t\tswitch v := data.(type) {\n\t\tcase *bool:\n\t\t\tif *v {\n\t\t\t\tb[0] = 1\n\t\t\t} else {\n\t\t\t\tb[0] = 0\n\t\t\t}\n\t\tcase bool:\n\t\t\tif v {\n\t\t\t\tb[0] = 1\n\t\t\t} else {\n\t\t\t\tb[0] = 0\n\t\t\t}\n\t\tcase []bool:\n\t\t\tfor i, x := range v {\n\t\t\t\tif x {\n\t\t\t\t\tbs[i] = 1\n\t\t\t\t} else {\n\t\t\t\t\tbs[i] = 0\n\t\t\t\t}\n\t\t\t}\n\t\tcase *int8:\n\t\t\tb[0] = byte(*v)\n\t\tcase int8:\n\t\t\tb[0] = byte(v)\n\t\tcase []int8:\n\t\t\tfor i, x := range v {\n\t\t\t\tbs[i] = byte(x)\n\t\t\t}\n\t\tcase *uint8:\n\t\t\tb[0] = *v\n\t\tcase uint8:\n\t\t\tb[0] = v\n\t\tcase []uint8:\n\t\t\tbs = v\n\t\tcase *int16:\n\t\t\torder.PutUint16(bs, uint16(*v))\n\t\tcase int16:\n\t\t\torder.PutUint16(bs, uint16(v))\n\t\tcase []int16:\n\t\t\tfor i, x := range v {\n\t\t\t\torder.PutUint16(bs[2*i:], uint16(x))\n\t\t\t}\n\t\tcase *uint16:\n\t\t\torder.PutUint16(bs, *v)\n\t\tcase uint16:\n\t\t\torder.PutUint16(bs, v)\n\t\tcase []uint16:\n\t\t\tfor i, x := range v {\n\t\t\t\torder.PutUint16(bs[2*i:], x)\n\t\t\t}\n\t\tcase *int32:\n\t\t\torder.PutUint32(bs, uint32(*v))\n\t\tcase int32:\n\t\t\torder.PutUint32(bs, uint32(v))\n\t\tcase []int32:\n\t\t\tfor i, x := range v {\n\t\t\t\torder.PutUint32(bs[4*i:], uint32(x))\n\t\t\t}\n\t\tcase *uint32:\n\t\t\torder.PutUint32(bs, *v)\n\t\tcase uint32:\n\t\t\torder.PutUint32(bs, v)\n\t\tcase []uint32:\n\t\t\tfor i, x := range v {\n\t\t\t\torder.PutUint32(bs[4*i:], x)\n\t\t\t}\n\t\tcase *int64:\n\t\t\torder.PutUint64(bs, uint64(*v))\n\t\tcase int64:\n\t\t\torder.PutUint64(bs, uint64(v))\n\t\tcase []int64:\n\t\t\tfor i, x := range v {\n\t\t\t\torder.PutUint64(bs[8*i:], uint64(x))\n\t\t\t}\n\t\tcase *uint64:\n\t\t\torder.PutUint64(bs, *v)\n\t\tcase uint64:\n\t\t\torder.PutUint64(bs, v)\n\t\tcase []uint64:\n\t\t\tfor i, x := range v {\n\t\t\t\torder.PutUint64(bs[8*i:], x)\n\t\t\t}\n\t\t}\n\t\t_, err := w.Write(bs)\n\t\treturn err\n\t}\n\n\t// Fallback to reflect-based encoding.\n\tv := reflect.Indirect(reflect.ValueOf(data))\n\tsize := dataSize(v)\n\tif size < 0 {\n\t\treturn errors.New(\"binary.Write: invalid type \" + reflect.TypeOf(data).String())\n\t}\n\tbuf := make([]byte, size)\n\te := &encoder{order: order, buf: buf}\n\te.value(v)\n\t_, err := w.Write(buf)\n\treturn err\n}", "func (g *DiGraph) Order() int {\n\treturn len(g.fmx.Colptr) - 1\n}", "func (o Operator) DiskOrder() types.DiskOrder {\n\treturn types.DiskOrderRaw\n}", "func (b *Binary) Arch() string {\n\treturn b.arch\n}", "func (tsl *TrailingStopLoss) GetOrder() cbp.Order {\n\treturn tsl.Order\n}", "func (order *Order) ToBinary() ([]byte, error) {\n\treturn proto.Marshal(order)\n}", "func (o *TransactionSplit) GetOrder() int32 {\n\tif o == nil || o.Order.Get() == nil {\n\t\tvar ret int32\n\t\treturn ret\n\t}\n\treturn *o.Order.Get()\n}", "func GetBuiltinProcessorType(programHash hashing.HashValue) (string, bool) {\n\tif _, err := core.GetProcessor(programHash); err == nil {\n\t\treturn core.VMType, true\n\t}\n\tif _, ok := native.GetProcessor(programHash); ok {\n\t\treturn native.VMType, true\n\t}\n\treturn \"\", false\n}", "func (device *DCV2Bricklet) GetDriveMode() (mode DriveMode, err error) {\n\tvar buf bytes.Buffer\n\n\tresultBytes, err := device.device.Get(uint8(FunctionGetDriveMode), buf.Bytes())\n\tif err != nil {\n\t\treturn mode, err\n\t}\n\tif len(resultBytes) > 0 {\n\t\tvar header PacketHeader\n\n\t\theader.FillFromBytes(resultBytes)\n\n\t\tif header.Length != 9 {\n\t\t\treturn mode, fmt.Errorf(\"Received packet of unexpected size %d, instead of %d\", header.Length, 9)\n\t\t}\n\n\t\tif header.ErrorCode != 0 {\n\t\t\treturn mode, DeviceError(header.ErrorCode)\n\t\t}\n\n\t\tresultBuf := bytes.NewBuffer(resultBytes[8:])\n\t\tbinary.Read(resultBuf, binary.LittleEndian, &mode)\n\n\t}\n\n\treturn mode, nil\n}", "func (s *Uint32s) GetBinaryCodec() binary.Codec {\n\treturn &integerSliceCodec{\n\t\tsliceType: reflect.TypeOf(Uint32s{}),\n\t\tsizeOfInt: 4,\n\t}\n}", "func (p *Port) Direction() int {\n\treturn p.direction\n}", "func uint32Byte(u uint32) []byte {\n\tvar buf bytes.Buffer\n\terr := binary.Write(&buf, binary.LittleEndian, u)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\treturn buf.Bytes()\n}", "func (o *View) GetOrder() int32 {\n\tif o == nil {\n\t\tvar ret int32\n\t\treturn ret\n\t}\n\n\treturn o.Order\n}", "func GetOS() []string {\n\tres := []string{runtime.GOOS}\n\tif runtime.GOOS == \"windows\" {\n\t\t// Adding win since some repositories release with that as the indicator of a windows binary\n\t\tres = append(res, \"win\")\n\t}\n\treturn res\n}", "func (m *UserExperienceAnalyticsDeviceStartupHistory) GetOperatingSystemVersion()(*string) {\n val, err := m.GetBackingStore().Get(\"operatingSystemVersion\")\n if err != nil {\n panic(err)\n }\n if val != nil {\n return val.(*string)\n }\n return nil\n}", "func (r Virtual_Guest) GetOperatingSystem() (resp datatypes.Software_Component_OperatingSystem, err error) {\n\terr = r.Session.DoRequest(\"SoftLayer_Virtual_Guest\", \"getOperatingSystem\", nil, &r.Options, &resp)\n\treturn\n}", "func ReadStructLittleEndian(s interface{}, r io.Reader) error {\r\n\treturn ReadStruct(s, r, binary.LittleEndian)\r\n}", "func (o *TelemetryDruidScanRequestAllOf) GetOrder() string {\n\tif o == nil || o.Order == nil {\n\t\tvar ret string\n\t\treturn ret\n\t}\n\treturn *o.Order\n}", "func ReverseEndian(s string) (string) {\n var tempstring [64]string\n for i := 0; i < len(s) - 1; i+= 2 {\n tempstring[63 - i] = string(s[i]) + string(s[i+1])\n }\n var ret string\n for j := 0; j < len(tempstring); j++ {\n ret += tempstring[j]\n }\n return ret\n}", "func (z *Int) Bytes32() [32]byte {\n\tvar b [32]byte\n\tfor i := 0; i < 32; i++ {\n\t\tb[31-i] = byte(z[i/8] >> uint64(8*(i%8)))\n\t}\n\treturn b\n}", "func (h *header) getMode() mode {\n\treturn mode(h.LiVnMode & 0x07)\n}", "func (s *Float32s) GetBinaryCodec() binary.Codec {\n\treturn &integerSliceCodec{\n\t\tsliceType: reflect.TypeOf(Float32s{}),\n\t\tsizeOfInt: 4,\n\t}\n}", "func (*Order) Descriptor() ([]byte, []int) {\n\treturn file_waves_order_proto_rawDescGZIP(), []int{1}\n}", "func (o *KanbanViewView) GetOrder() int32 {\n\tif o == nil {\n\t\tvar ret int32\n\t\treturn ret\n\t}\n\n\treturn o.Order\n}", "func (sys *Sys) GetOsVersion() (int, int, error) {\n\tlog.Debugln(\"GetOsVersion ENTER\")\n\n\titype := sys.GetOsType()\n\tswitch itype {\n\tcase OsRhel:\n\t\tdata, errRead := ioutil.ReadFile(\"/etc/redhat-release\")\n\t\tif errRead != nil {\n\t\t\treturn 0, 0, errRead\n\t\t}\n\t\tlog.Debugln(string(data))\n\t\tneedles, errRegex := sys.str.RegexMatch(string(data), \" ([0-9]+\\\\.[0-9]+[\\\\.]*[0-9]*) \")\n\t\tif errRegex != nil {\n\t\t\treturn 0, 0, errRegex\n\t\t}\n\t\treturn parseVersionFromString(needles[0])\n\n\tcase OsSuse:\n\t\treturn 0, 0, common.ErrNotImplemented //TODO\n\n\tcase OsUbuntu:\n\t\tdata, errRead := ioutil.ReadFile(\"/etc/lsb-release\")\n\t\tif errRead != nil {\n\t\t\treturn 0, 0, errRead\n\t\t}\n\t\tlog.Debugln(string(data))\n\t\tneedles, errRegex := sys.str.RegexMatch(string(data), \"DISTRIB_RELEASE=([0-9]+\\\\.[0-9]+[\\\\.]*[0-9]*) \")\n\t\tif errRegex != nil {\n\t\t\treturn 0, 0, errRegex\n\t\t}\n\t\treturn parseVersionFromString(needles[0])\n\n\tcase OsCoreOs:\n\t\treturn 0, 0, common.ErrNotImplemented //TODO\n\n\tcase OsMac:\n\t\treturn 0, 0, common.ErrNotImplemented //TODO\n\t}\n\n\tlog.Debugln(\"GetOsVersion LEAVE\")\n\n\treturn 0, 0, ErrUnknownOsVersion\n}", "func DecodeRawLittleEndian(value bool) DecodeRawAttr {\n\treturn func(m optionalAttr) {\n\t\tm[\"little_endian\"] = value\n\t}\n}", "func (o *Application) GetOrderOk() (*int32, bool) {\n\tif o == nil {\n\t\treturn nil, false\n\t}\n\treturn &o.Order, true\n}", "func (res *respondent) GetCommProtocolInfo() utils.SocketType {\n\treturn res.socketType\n}", "func GetOSInfo() string {\n\treturn \"windows\"\n}", "func BoolToByteCode(b bool) uint32 {\n\tif b {\n\t\treturn 1\n\t}\n\treturn 0\n}", "func (self *Pages) ToUint32() uint32 {\n\treturn uint32(C.wasm_memory_pages_t(*self))\n}", "func (r Virtual_Guest) GetOperatingSystemReferenceCode() (resp string, err error) {\n\terr = r.Session.DoRequest(\"SoftLayer_Virtual_Guest\", \"getOperatingSystemReferenceCode\", nil, &r.Options, &resp)\n\treturn\n}", "func (Order) EnumDescriptor() ([]byte, []int) {\n\treturn file_protos_sortType_proto_rawDescGZIP(), []int{1}\n}", "func order(r rune) int {\n\tif unicode.IsDigit(r) {\n\t\treturn 0\n\t}\n\n\tif unicode.IsLetter(r) {\n\t\treturn int(r)\n\t}\n\n\tif r == '~' {\n\t\treturn -1\n\t}\n\n\treturn int(r) + 256\n}", "func (e Event) Bytes() []byte {\n\treturn bigendian.Uint32ToBytes(uint32(e))\n}", "func (p *ZunProvider) OperatingSystem() string {\n\tif p.operatingSystem != \"\" {\n\t\treturn p.operatingSystem\n\t}\n\treturn providers.OperatingSystemLinux\n}", "func Arch() string {\n\t// list of GOARCH from https://gist.github.com/asukakenji/f15ba7e588ac42795f421b48b8aede63\n\tswitch runtime.GOARCH {\n\tcase \"386\", \"amd64\":\n\t\treturn \"x86\"\n\tcase \"arm\":\n\t\treturn \"arm\"\n\tcase \"arm64\":\n\t\treturn \"arm64\"\n\tcase \"ppc64\", \"ppc64le\":\n\t\treturn \"powerpc\"\n\tcase \"mips\", \"mipsle\", \"mips64\", \"mips64le\":\n\t\treturn \"mips\"\n\tcase \"riscv64\":\n\t\treturn \"riscv\"\n\tcase \"s390x\":\n\t\treturn \"s390\"\n\tdefault:\n\t\treturn runtime.GOARCH\n\t}\n}", "func GetDirection(pinNo int) uint32 {\n\tindex := (pinNo) / 32\n\n\tregVal := readRegistry(index)\n\n\tgpio := uint32(pinNo % 32)\n\n\tval := ((regVal >> gpio) & 0x1)\n\n\treturn val\n\n}" ]
[ "0.80799955", "0.6644619", "0.66354847", "0.6532824", "0.6508645", "0.64949846", "0.60611993", "0.59378254", "0.591465", "0.58923656", "0.58208567", "0.5719463", "0.5529042", "0.5429169", "0.5246309", "0.5219467", "0.5196728", "0.5044286", "0.5037801", "0.49660262", "0.49030742", "0.4868772", "0.47753844", "0.47753844", "0.47685343", "0.46757817", "0.46757522", "0.4657683", "0.46171758", "0.45839918", "0.45718813", "0.45466855", "0.45136845", "0.45080304", "0.44905117", "0.44765845", "0.4472395", "0.44413015", "0.4402855", "0.43553936", "0.43368864", "0.43336505", "0.43261084", "0.4314027", "0.43062225", "0.42938107", "0.4287241", "0.42864072", "0.4262786", "0.42378935", "0.4233123", "0.4232053", "0.42306992", "0.4223693", "0.42142648", "0.41996434", "0.41921312", "0.4179171", "0.41775474", "0.41768023", "0.4171208", "0.4169725", "0.415344", "0.41489798", "0.41404232", "0.4129646", "0.41201457", "0.41196024", "0.41174543", "0.41099823", "0.41070703", "0.40961942", "0.4094167", "0.40850076", "0.40798488", "0.40714324", "0.40447527", "0.40443763", "0.40420726", "0.40366575", "0.40331668", "0.4027879", "0.4026064", "0.40179956", "0.40176383", "0.4011372", "0.40066376", "0.39983627", "0.39952534", "0.39938247", "0.39896488", "0.39873922", "0.39849532", "0.39831144", "0.39828616", "0.3982499", "0.3978831", "0.39787892", "0.39655712", "0.39626217" ]
0.68111
1
Init creates a new light string
func (ls *LightString) Init() { log.Infof("Creating %v pixel light string", ls.Count) ls.Last = ls.Count // default last pixel to count n := 0 for n < ls.Count { ls.Pixels = append(ls.Pixels, &Pixel{ Color: defaultPixelColor, Brightness: defaultPixelBrightness, }) n++ } }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (td *TextDisplay)Init(){\r\n\tfontBytes, err := ioutil.ReadFile(\"Blockstepped.ttf\")\r\n\tif err != nil {\r\n\t\tlog.Println(err)\r\n\t\treturn\r\n\t}\r\n\tvar err2 error\r\n\ttd.font, err2 = truetype.Parse(fontBytes)\r\n\tif err2 != nil {\r\n\t\tlog.Fatal(err)\r\n\t}\r\n\ttd.mplusNormalFont = truetype.NewFace(td.font, &truetype.Options{\r\n\t\tSize: 24,\r\n\t\tDPI: 72,\r\n\t\tHinting: font.HintingFull,\r\n\t})\r\n\ttd.text = \"Game Over!!!!!!!!!!!!! - Press Enter to restart!\"\r\n}", "func init(){\n\tfmt.Println(\"String tutorials\")\n}", "func (c *ChromaHighlight) init() (err error) {\n\n\t// Option handling registering formatters\n\tswitch c.formatter {\n\tcase \"gtkDirectToTextBuffer\":\n\t\tformatters.Register(\"gtkDirectToTextBuffer\", chroma.FormatterFunc(c.gtkDirectToTextBufferFormatter))\n\tcase \"gtkTextBuffer\":\n\t\tformatters.Register(\"gtkTextBuffer\", chroma.FormatterFunc(c.gtkTextBufferFormatter))\n\tcase \"pango\":\n\t\tformatters.Register(\"pango\", chroma.FormatterFunc(c.pangoFormatter))\n\t}\n\n\t// Used to parse GdkColor\n\tc.regBG = regexp.MustCompile(`bg:#[a-fA-F|0-9]{6}`)\n\tc.regFG = regexp.MustCompile(`#[a-fA-F|0-9]{6}`)\n\n\tc.RemoveTags()\n\n\t// To check if source text have been modified.\n\tc.md5SizeAnalyze = 1024 // Set to 0 means there is no limit\n\n\tswitch c.srcBuff {\n\tcase nil:\n\t\tc.textTagTable, err = c.txtBuff.GetTagTable()\n\tdefault:\n\t\tc.textTagTable, err = c.srcBuff.GetTagTable()\n\t}\n\treturn\n}", "func NewFromStr(contents string) *Render {\n\treturn &Render{\n\t\tLines: []Line{\n\t\t\tLine{\n\t\t\t\tContent: \"one\",\n\t\t\t},\n\t\t\tLine{\n\t\t\t\tContent: \"two\",\n\t\t\t},\n\t\t},\n\t}\n}", "func init() {\n\tflag.Usage = usage\n\t// NOTE: This next line is key you have to call flag.Parse() for the command line\n\t// options or \"flags\" that are defined in the glog module to be picked up.\n\tflag.Parse()\n\n\tmodelName = \"RGB-LIGHT\"\n\tDeviceName = \"rgb-light-device\"\n\tDeviceName = os.Getenv(\"DEVICE_NAME\")\n\n\tMQTTURL = \"tcp://127.0.0.1:1884\"\n\trgb = []string{\"red-pwm\", \"green-pwm\", \"blue-pwm\"}\n\tdefaultRGBValue = []int{50, 50, 50}\n\trpin, _ := strconv.Atoi(os.Getenv(\"RPIN\"))\n\tgpin, _ := strconv.Atoi(os.Getenv(\"GPIN\"))\n\tbpin, _ := strconv.Atoi(os.Getenv(\"BPIN\"))\n\trgbPinNumber = []int{rpin, gpin, bpin}\n\n\tglog.Info(\"Init MQTT client...\")\n\tClientOpts = HubClientInit(MQTTURL, \"eventbus\", \"\", \"\")\n\tClient = MQTT.NewClient(ClientOpts)\n\tif Token_client = Client.Connect(); Token_client.Wait() && Token_client.Error() != nil {\n\t\tglog.Error(\"client.Connect() Error is \", Token_client.Error())\n\t}\n\terr := LoadConfigMap()\n\tif err != nil {\n\t\tglog.Error(errors.New(\"Error while reading from config map \" + err.Error()))\n\t\tos.Exit(1)\n\t}\n}", "func (o *Sty) Init() {\n\to.Fc = \"#edf5ff\"\n\to.Ec = \"black\"\n\to.Lw = 1\n}", "func Init(debug bool) {\n\tif debug {\n\t\tlog.Println(colorInfo + \"Copy On Lan\")\n\t\tlog.Println(colorInfo + \"Debug Mode\\n\")\n\t\tlog.Println(colorInfo + \"Copy On Lan\")\n\t\tlog.Println(colorInfo + \"Debug Mode\")\n\t} else {\n\t\tfmt.Println(\"Copy On Lan\")\n\t}\n}", "func (ls *LightString) Render() {\n\tlog.Debug(\"Rendering string\")\n}", "func Init() (s Self) {\n\ts.Capitals = readJSON(capitalsEmbed)\n\treturn s\n}", "func (l LangPackString) construct() LangPackStringClass { return &l }", "func (i *Iter) InitString(f Form, src string) {\n\ti.p = 0\n\tif len(src) == 0 {\n\t\ti.setDone()\n\t\ti.rb.nsrc = 0\n\t\treturn\n\t}\n\ti.multiSeg = nil\n\ti.rb.initString(f, src)\n\ti.next = i.rb.f.nextMain\n\ti.asciiF = nextASCIIString\n\ti.info = i.rb.f.info(i.rb.src, i.p)\n\ti.rb.ss.first(i.info)\n}", "func (ld *LEDraw) Init() {\n\tif ld.ImgSize.X == 0 || ld.ImgSize.Y == 0 {\n\t\tld.Defaults()\n\t}\n\tif ld.Image != nil {\n\t\tcs := ld.Image.Bounds().Size()\n\t\tif cs != ld.ImgSize {\n\t\t\tld.Image = nil\n\t\t}\n\t}\n\tif ld.Image == nil {\n\t\tld.Image = image.NewRGBA(image.Rectangle{Max: ld.ImgSize})\n\t}\n\tld.Render.Init(ld.ImgSize.X, ld.ImgSize.Y, ld.Image)\n\tld.Paint.Defaults()\n\tld.Paint.StrokeStyle.Width.SetPct(ld.Width)\n\tld.Paint.StrokeStyle.Color.SetName(string(ld.LineColor))\n\tld.Paint.FillStyle.Color.SetName(string(ld.BgColor))\n\tld.Paint.SetUnitContextExt(ld.ImgSize)\n}", "func Init(title string) {\n\terr := termbox.Init()\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\tlines = []line{\n\t\t[]segment{{text: title}},\n\t}\n\tdraw(modeWorking)\n}", "func init() {\n\tconffyle := os.Args[1]\n\tfic, ficerr := ioutil.ReadFile(conffyle)\n\tif ficerr != nil {\n\t\tfmt.Printf(\"error ioutil : %v \\n\", ficerr)\n\t}\n\t_ = json.Unmarshal([]byte(fic), &Conf)\n\tswitcherfyle := Conf.Switcherconf\n\tfichier, _ := ioutil.ReadFile(switcherfyle)\n\t_ = json.Unmarshal([]byte(fichier), &switchList)\n\tfmt.Printf(\"Init summary\\n=================\\n\")\n\tfmt.Printf(\"Received parameter:%v\\n\", conffyle)\n\tfmt.Printf(\"Param file title :%v\\n\", string(fic))\n\tfmt.Printf(\"Switch config file :%v\\n\", switcherfyle)\n\tfmt.Printf(\"Switch file content:%v\\n\", string(fichier))\n}", "func (g *Glutton) Init() (err error) {\n\n\tctx := context.Background()\n\tg.ctx, g.cancel = context.WithCancel(ctx)\n\n\tgluttonServerPort := uint(g.conf.GetInt(\"glutton_server\"))\n\n\t// Initiate the freki processor\n\tg.processor, err = freki.New(viper.GetString(\"interface\"), g.rules, nil)\n\tif err != nil {\n\t\treturn\n\t}\n\n\t// Initiating glutton server\n\tg.processor.AddServer(freki.NewUserConnServer(gluttonServerPort))\n\t// Initiating log producer\n\tif g.conf.GetBool(\"enableGollum\") {\n\t\tg.producer = producer.Init(g.id.String(), g.conf.GetString(\"gollumAddress\"))\n\t}\n\t// Initiating protocol handlers\n\tg.mapProtocolHandlers()\n\tg.registerHandlers()\n\n\terr = g.processor.Init()\n\tif err != nil {\n\t\treturn\n\t}\n\n\treturn nil\n}", "func (shell *FishShell) Init() string {\n\treturn packr.NewBox(\"assets\").String(\"leader.fish.sh\")\n}", "func init() {\n\tinternal.TypeString = TypeString\n\tinternal.TypeStringOn = TypeStringOn\n}", "func (rndr *Renderer) Init(snatOnly bool) error {\n\trndr.snatOnly = snatOnly\n\trndr.natGlobalCfg = &vpp_nat.Nat44Global{\n\t\tForwarding: true,\n\t}\n\tif rndr.Config == nil {\n\t\trndr.Config = config.DefaultConfig()\n\t}\n\treturn nil\n}", "func (shell *POSIXShell) Init() string {\n\treturn packr.NewBox(\"assets\").String(fmt.Sprintf(\"leader.%s.sh\", shell.name))\n}", "func (t *Text) Init(e *Element) {\n\tt.ModuleBase.Init(e)\n\n\tident := t.Ident(\"text_markdown\", \"default\")\n\tif ident == \"inherit\" {\n\t\tident = \"default\"\n\t}\n\tmkd, ok := t.Scene.Assets.Markdowns[ident]\n\tif !ok {\n\t\tpanic(t.Path() + \": markdown with name '\" + ident + \"' is not present in assets\")\n\t}\n\n\tt.Markdown = mkd\n\tt.Align = t.Props.Align(\"text_align\", txt.Left)\n\tt.Scl = t.Vec(\"text_scale\", mat.V(1, 1))\n\tt.Mask = t.RGBA(\"text_color\", mat.White)\n\tt.SelectionColor = t.RGBA(\"text_selection_color\", mat.Alpha(.5))\n\tif !t.Composed {\n\t\tt.Props.Size = t.Vec(\"text_size\", mat.ZV)\n\t\tt.Props.Margin = t.AABB(\"text_margin\", mat.A(4, 4, 4, 4))\n\t\tt.Background = t.RGBA(\"text_background\", t.Background)\n\t\tt.Props.Padding = t.AABB(\"text_padding\", mat.ZA)\n\t}\n\tt.NoEffects = t.Bool(\"text_no_effects\", false)\n\tt.Content = str.NString(t.Raw.Attributes.Ident(\"text\", string(t.Content)))\n\n\tt.Dirty()\n}", "func (c *Conf) InitFromString(content string) error {\n\treturn c.InitFromBytes(([]byte)(content))\n}", "func NewFromString(htmlString string) (r *Recipe, err error) {\n\tr = &Recipe{FileName: \"string\"}\n\tr.FileContent = htmlString\n\terr = r.parseHTML()\n\treturn\n}", "func initialize() string {\n\trand.Seed(time.Now().UTC().UnixNano())\n\tdictionary := openFile()\n\treturn genWord(dictionary)\n}", "func (tx *TextureBase) Init(sc *Scene) error {\n\tif tx.Tex != nil {\n\t\ttx.Tex.SetBotZero(tx.Bot0)\n\t\ttx.Tex.Activate(0)\n\t}\n\treturn nil\n}", "func (m *ModuleBase) Init(div *Element) {\n\tm.Element = div\n\tm.Background = m.RGBA(\"background\", mat.Transparent)\n}", "func (o *Note) Init() {\n\to.diff = o.State().(noteDiff)\n}", "func init() { fmt.Printf(\"Hello \") }", "func (b *Button) Init(e *Element) {\n\tb.Patch.Init(e)\n\n\ttext := b.Raw.Attributes.Ident(\"all_text\", \"\")\n\tparsed := str.NString(text)\n\tmask := b.RGBA(\"all_masks\", mat.White)\n\tregion := e.Region(\"all_regions\", e.Scene.Assets.Regions, mat.ZA)\n\tpadding := b.AABB(\"all_padding\", mat.ZA)\n\tfor i := range b.States {\n\t\tbs := &b.States[i]\n\t\tbs.Mask = mask\n\t\tbs.Region = region\n\t\tbs.Padding = padding\n\t\tbs.Text = parsed\n\t}\n\n\tfor i, s := range buttonStates {\n\t\tbs := &b.States[i]\n\t\tbs.Text = str.NString(b.Raw.Attributes.Ident(s+\"_text\", text))\n\t\tbs.Mask = b.RGBA(s+\"_mask\", bs.Mask)\n\t\tbs.Region = e.Region(s+\"_region\", e.Scene.Assets.Regions, bs.Region)\n\t\tbs.Padding = b.AABB(s+\"_padding\", bs.Padding)\n\t}\n\n}", "func (l *Logger) Init() {}", "func (ps *PrjnStru) Init(prjn emer.Prjn) {\n\tps.LeabraPrj = prjn.(LeabraPrjn)\n}", "func Init() {\n\tAttrs.Classes = \"\"\n\tAttrs.ID = \"\"\n\tAttrs.css = \"\"\n\tAttrs.attributes = \"\"\n\tAttrs.Options = expansion.Options{}\n\tids = nil\n}", "func (lh *LoginHandler) Init() string {\n\treturn \"New\"\n}", "func (l *Logger) init() {\r\n\t// Set Testing flag to TRUE if testing detected\r\n\tl.Options.Testing = (flag.Lookup(\"test.v\") != nil)\r\n\r\n\tl.timeReset()\r\n\tl.started = l.timer\r\n\tinitColors()\r\n\tinitFormatPlaceholders()\r\n}", "func (C *character) Init(name string, class string, lvl int, xpac int, needxp int, maxlife int, life int, manamax int, mana int, initiative int, damage int, money int, slotinv int, inventory []string, skill []string, head string, body string, foot string, hand string) {\n\tC.name = name\n\tC.class = class\n\tC.lvl = lvl\n\tC.xpac = xpac\n\tC.needxp = needxp\n\tC.maxlife = maxlife\n\tC.life = life\n\tC.manamax = manamax\n\tC.mana = mana\n\tC.initiative = initiative\n\tC.slotinv = slotinv\n\tC.inventory = inventory\n\tC.money = money\n\tC.skill = skill\n\tC.equip.head = head\n\tC.equip.body = body\n\tC.equip.foot = foot\n\tC.equip.hand = hand\n}", "func (s *Flattener) Init(conf map[string]string) error {\n\treturn nil\n}", "func Init(level, format string) {\n\tsetLogLevel(level)\n\tsetLogFormat(format)\n}", "func createLark() *Lark {\n\tvar talk = NewLark(\"Title Prefix\", \"5ff9b6ab-fbe3-490f-8980-71509263efe2\")\n\treturn talk\n}", "func linit() {\n\tlog.SetFormatter(&logrus.TextFormatter{\n\t\tForceColors: true,\n\t\tForceQuote: true,\n\t\tFullTimestamp: true,\n\t\tTimestampFormat: time.RFC1123,\n\t\tDisableSorting: false,\n\t})\n\tlog.SetOutput(os.Stdout)\n\tl(\"Logger is created\")\n}", "func Init(eng *en.RealEngine) {\n\tengine = *eng\n}", "func init() {\r\n\tDEBUG = log.New(os.Stdout, \"\\x1b[36m[DEBU]\\x1b[0m \", log.Ldate|log.Lmicroseconds|log.Lshortfile)\r\n\tINFO = log.New(os.Stdout, \"[INFO] \", log.Ldate|log.Lmicroseconds|log.Lshortfile)\r\n\tWARN = log.New(os.Stdout, \"\\x1b[33m[WARN]\\x1b[0m \", log.Ldate|log.Lmicroseconds|log.Lshortfile)\r\n\tERROR = log.New(os.Stdout, \"\\x1b[31m[ERRO]\\x1b[0m \", log.Ldate|log.Lmicroseconds|log.Lshortfile)\r\n\tFATAL = log.New(os.Stdout, \"\\x1b[44;11m[FATA]\\x1b[0m \", log.Ldate|log.Lmicroseconds|log.Lshortfile)\r\n}", "func (ctx *Context) InitStr(args string) int {\n\tca := C.CString(args)\n\tdefer C.free(unsafe.Pointer(ca))\n\treturn int(C.avfilter_init_str((*C.struct_AVFilterContext)(ctx), ca))\n}", "func (l LangPackStringDeleted) construct() LangPackStringClass { return &l }", "func (a *Area) Init(e *Element) {\n\ta.Composed = true // important for next call\n\ta.Text.Init(e)\n\n\ta.drw = a.CursorDrawer(\"cursor_drawer\", a.Scene.Assets.Cursors, defaultCursor{})\n\ta.CursorThickness = e.Float(\"cursor_thickness\", 2)\n\ta.CursorMask = e.RGBA(\"cursor_mask\", mat.White)\n\ta.Blinker = timer.Period(e.Float(\"cursor_blinking_frequency\", .6))\n\n\ta.AutoFrequency = e.Float(\"auto_frequency\", .03)\n\ta.HoldResponceSpeed = e.Float(\"hold_responce_speed\", .5)\n\n\ta.binds = map[key.Key]float64{}\n}", "func Init(name, icon string, timeout time.Duration, urgency NotificationUrgency) {\n\tnote.Name = name\n\tnote.IconPath = icon\n\tnote.Timeout = timeout\n\tnote.Urgency = urgency\n}", "func Init(strings []string) *List {\n\tif strings == nil {\n\t\treturn nil\n\t}\n\tsl := List(strings)\n\treturn &sl\n}", "func Reset(sb *strings.Builder) {\n\tsb.WriteString(string(BgDefault))\n\tsb.WriteString(string(FgDefault))\n}", "func InitLager(log lager.Logger) {\n\tif log == nil {\n\t\tLogger = initialize(\"\", \"DEBUG\", \"\", \"size\", true, 1, 10, 7)\n\t} else {\n\t\tLogger = log\n\t}\n}", "func (l *Lexer) Init(source string) {\n\tl.source = source\n\n\tl.ch = 0\n\tl.offset = 0\n\tl.tokenOffset = 0\n\tl.line = 1\n\tl.tokenLine = 1\n\tl.State = 0\n\tl.commentLevel = 0\n\tl.invalidTokenClass = InvalidTokenUnknown\n\n\tif strings.HasPrefix(source, bomSeq) {\n\t\tl.offset += len(bomSeq)\n\t}\n\n\tl.rewind(l.offset)\n}", "func (s *Stat) Init(label []byte, value float64) *Stat {\n\ts.label = s.label[:0] // clear\n\ts.label = append(s.label, label...)\n\ts.value = value\n\ts.isWarm = false\n\treturn s\n}", "func (hem *GW) Init(rootURL string, username string, password string) {\n\them.rootURL = rootURL\n\them.username = username\n\them.password = password\n\them.loadSmartMeterAttribute()\n}", "func (uni *UniformMatrix3f) Init(name string) {\n\n\tuni.name = name\n}", "func (ui *UI) Init() {\n\tui.Palettes = make(map[int]*Palette)\n\tui.Palettes[0] = GetUIPalette()\n\tui.Palettes[1] = GetProjectMegaPalette(\"assets/sprites/projectmute.png\")\n\tui.Palettes[2] = GetProjectMegaPalette(\"assets/sprites/projectmuteG.png\")\n\tui.Palettes[3] = GetProjectMegaPalette(\"assets/sprites/projectmuteY.png\")\n\tui.Palettes[4] = GetProjectMegaPalette(\"assets/sprites/projectmuteR.png\")\n\n\tui.SoundConfirm = rl.LoadSound(\"assets/sounds/confirm.mp3\")\n\tui.SoundSelect = rl.LoadSound(\"assets/sounds/select.mp3\")\n\tui.SoundCancel = rl.LoadSound(\"assets/sounds/cancel.mp3\")\n\tui.Toggles = make(map[string]bool)\n\tui.BuildingCache = &Building{}\n}", "func (self *Resource) Init() *Resource {\n\tif self.Expected == \"\" {\n\t\tself.Expected = \"OK\"\n\t}\n\treturn self\n}", "func (this *ClockStr) Constructor() FunctionConstructor { return NewClockStr }", "func New(value interface{}) error {\n\tif value == nil {\n\t\treturn nil\n\t}\n\treturn NewText(gconv.String(value))\n}", "func Init(s string) string {\n\tif s == \"\" {\n\t\tpanic(\"empty list\")\n\t}\n\n\t_, sz := utf8.DecodeRuneInString(s)\n\tc := utf8.RuneCountInString(s)\n\treturn s[:(sz*c)-sz]\n}", "func (native *OpenGL) Str(str string) *uint8 {\n\treturn gl.Str(str)\n}", "func (s *Speaker) Init() error { return nil }", "func (c *Command) Init(schan chan string, log chan string) {\n\tc.SChan = schan\n\tc.Log = log\n\tc.Reg = regexp.MustCompile(c.Pattern)\n}", "func (r *Route) Init(lg *logger.Logger) {\n\tr.log = lg\n\tr.log.Debug.Log(\"Route '%v' found!\", r.Name())\n}", "func Init(s string) {\n\tpath := \"config.json\"\n\n\tif len(s) > 0 {\n\t\tpath = s\n\t}\n\n\tfile, err := ioutil.ReadFile(path)\n\n\tif err != nil {\n\t\tlog.Fatal(fmt.Sprintf(\"Config error: %v\\n\", err))\n\t\treturn\n\t}\n\n\tvar config *Config\n\n\tjson.Unmarshal(file, &config)\n\n\tc = config\n\n\tif c.Title == \"\" {\n\t\tc.Title = \"Farmer\"\n\t}\n}", "func Init() {\n\tlog.Printf(\"version - %s | branch - %s | commit hash - %s | build time - %s \\n\", version, branch, commitHash, buildTime)\n\n\tlogLevel, err := strconv.Atoi(os.Getenv(\"CONFIGURATION_LOG_LEVEL\"))\n\tif err != nil {\n\t\tlog.Println(\"ERROR - Init() - Convert configLevel\")\n\t\tlog.Println(err)\n\t}\n\tLogger, err = lightning.Init(logLevel)\n\tif err != nil {\n\t\tlog.Println(\"ERROR - Init() - Init Logger\")\n\t\tlog.Println(err)\n\t}\n}", "func (n LightControl) String() string {\n\treturn \"灯光\"\n}", "func init() {\n\ttag.Register(\"el-CY\", \"Greek (Cyprus)\")\n\ttag.Register(\"el-GR\", \"Greek (Greece)\")\n\ttag.Register(\"grc-GR\", \"Ancient Greek (Greece)\")\n\ttag.Register(\"gre-GR\", \"Modern Greek (Greece)\")\n}", "func (r *Reader) Init(src string) {\n\tr.p = 0\n\tr.v = []byte(src)\n}", "func Init(userPrefs ...language.Tag) {\n\ttag, _, _ := matcher.Match(userPrefs...)\n\tswitch tag {\n\tcase language.AmericanEnglish, language.English:\n\t\tinitEnUS(tag)\n\tcase language.SimplifiedChinese, language.Chinese:\n\t\tinitZhCN(tag)\n\tdefault:\n\t\tinitEnUS(tag)\n\t}\n\tp = message.NewPrinter(tag)\n}", "func (lt *Linter) RInit(mx *mg.Ctx) {\n\tl := &lt.Linter\n\tl.Actions = lt.Actions\n\tl.Name = lt.Name\n\tl.Args = lt.Args\n\tl.Tag = lt.Tag\n\tl.Label = lt.Label\n\tl.TempDir = lt.TempDir\n\n\tlt.Linter.RInit(mx)\n}", "func (js *jsonName) Init(s string) {\n\tjs.has = true\n\tjs.name = s\n}", "func (b *Bill) lightText() {\n\tb.pdf.SetTextColor(\n\t\tb.config.Colors.ColorLight.R,\n\t\tb.config.Colors.ColorLight.G,\n\t\tb.config.Colors.ColorLight.B,\n\t)\n}", "func (ls *LightString) FillString(color uint8) {\n\tlog.Debugf(\"Filling pixels with color %v\", color)\n\tn := 0\n\tfor n < ls.Last {\n\t\tls.Pixels[n].Color = color\n\t\tlog.Debugf(\"Coloring pixel %v %v\", n, color)\n\t\tn++\n\t}\n}", "func NewText(text string) *Text {\n\tt := &Text{\n\t\tBasicEntity: ecs.NewBasic(),\n\t\tButtonControlComponent: ButtonControlComponent{},\n\t\tMouseComponent: common.MouseComponent{},\n\t\tText: text,\n\t\tFont: &common.Font{\n\t\t\tURL: \"fonts/Undefined.ttf\",\n\t\t\tFG: color.White,\n\t\t\tBG: color.Transparent,\n\t\t\tSize: 14,\n\t\t},\n\t}\n\n\tt.Font.CreatePreloaded()\n\treturn t\n}", "func Initialize(file string) *LongLexto {\n\treturn &LongLexto{current : 0, text : \"\", pTree : CreateTree(file), tokens : []Token{}}\n}", "func (l *Loader) init() {\n\tif l.loading == nil {\n\t\tl.loading = stringset.New(1)\n\t\tl.sources = make(map[string]string, 1)\n\t\tl.symbols = make(map[string]*Struct, 1)\n\t}\n}", "func (c *Color) SetString(str string, base color.Color) error {\n\tif len(str) == 0 { // consider it null\n\t\tc.SetToNil()\n\t\treturn nil\n\t}\n\t// pr := prof.Start(\"Color.SetString\")\n\t// defer pr.End()\n\tlstr := strings.ToLower(str)\n\tswitch {\n\tcase lstr[0] == '#':\n\t\treturn c.ParseHex(str)\n\tcase strings.HasPrefix(lstr, \"hsl(\"):\n\t\tval := lstr[4:]\n\t\tval = strings.TrimRight(val, \")\")\n\t\tformat := \"%d,%d,%d\"\n\t\tvar h, s, l int\n\t\tfmt.Sscanf(val, format, &h, &s, &l)\n\t\tc.SetHSL(float32(h), float32(s)/100.0, float32(l)/100.0)\n\tcase strings.HasPrefix(lstr, \"rgb(\"):\n\t\tval := lstr[4:]\n\t\tval = strings.TrimRight(val, \")\")\n\t\tval = strings.Trim(val, \"%\")\n\t\tvar r, g, b, a int\n\t\ta = 255\n\t\tformat := \"%d,%d,%d\"\n\t\tif strings.Count(val, \",\") == 4 {\n\t\t\tformat = \"%d,%d,%d,%d\"\n\t\t\tfmt.Sscanf(val, format, &r, &g, &b, &a)\n\t\t} else {\n\t\t\tfmt.Sscanf(val, format, &r, &g, &b)\n\t\t}\n\t\tc.SetUInt8(uint8(r), uint8(g), uint8(b), uint8(a))\n\tcase strings.HasPrefix(lstr, \"rgba(\"):\n\t\tval := lstr[5:]\n\t\tval = strings.TrimRight(val, \")\")\n\t\tval = strings.Trim(val, \"%\")\n\t\tvar r, g, b, a int\n\t\tformat := \"%d,%d,%d,%d\"\n\t\tfmt.Sscanf(val, format, &r, &g, &b, &a)\n\t\tc.SetUInt8(uint8(r), uint8(g), uint8(b), uint8(a))\n\tcase strings.HasPrefix(lstr, \"pref(\"):\n\t\tval := lstr[5:]\n\t\tval = strings.TrimRight(val, \")\")\n\t\tclr := ThePrefs.PrefColor(val)\n\t\tif clr != nil {\n\t\t\t*c = *clr\n\t\t}\n\tdefault:\n\t\tif hidx := strings.Index(lstr, \"-\"); hidx > 0 {\n\t\t\tcmd := lstr[:hidx]\n\t\t\tpctstr := lstr[hidx+1:]\n\t\t\tpct, gotpct := kit.ToFloat32(pctstr)\n\t\t\tswitch cmd {\n\t\t\tcase \"lighter\":\n\t\t\t\tcvtPctStringErr(gotpct, pctstr)\n\t\t\t\tif base != nil {\n\t\t\t\t\tc.SetColor(base)\n\t\t\t\t}\n\t\t\t\tc.SetColor(c.Lighter(pct))\n\t\t\t\treturn nil\n\t\t\tcase \"darker\":\n\t\t\t\tcvtPctStringErr(gotpct, pctstr)\n\t\t\t\tif base != nil {\n\t\t\t\t\tc.SetColor(base)\n\t\t\t\t}\n\t\t\t\tc.SetColor(c.Darker(pct))\n\t\t\t\treturn nil\n\t\t\tcase \"highlight\":\n\t\t\t\tcvtPctStringErr(gotpct, pctstr)\n\t\t\t\tif base != nil {\n\t\t\t\t\tc.SetColor(base)\n\t\t\t\t}\n\t\t\t\tc.SetColor(c.Highlight(pct))\n\t\t\t\treturn nil\n\t\t\tcase \"samelight\":\n\t\t\t\tcvtPctStringErr(gotpct, pctstr)\n\t\t\t\tif base != nil {\n\t\t\t\t\tc.SetColor(base)\n\t\t\t\t}\n\t\t\t\tc.SetColor(c.Samelight(pct))\n\t\t\t\treturn nil\n\t\t\tcase \"saturate\":\n\t\t\t\tcvtPctStringErr(gotpct, pctstr)\n\t\t\t\tif base != nil {\n\t\t\t\t\tc.SetColor(base)\n\t\t\t\t}\n\t\t\t\tc.SetColor(c.Saturate(pct))\n\t\t\t\treturn nil\n\t\t\tcase \"pastel\":\n\t\t\t\tcvtPctStringErr(gotpct, pctstr)\n\t\t\t\tif base != nil {\n\t\t\t\t\tc.SetColor(base)\n\t\t\t\t}\n\t\t\t\tc.SetColor(c.Pastel(pct))\n\t\t\t\treturn nil\n\t\t\tcase \"clearer\":\n\t\t\t\tcvtPctStringErr(gotpct, pctstr)\n\t\t\t\tif base != nil {\n\t\t\t\t\tc.SetColor(base)\n\t\t\t\t}\n\t\t\t\tc.SetColor(c.Clearer(pct))\n\t\t\t\treturn nil\n\t\t\tcase \"opaquer\":\n\t\t\t\tcvtPctStringErr(gotpct, pctstr)\n\t\t\t\tif base != nil {\n\t\t\t\t\tc.SetColor(base)\n\t\t\t\t}\n\t\t\t\tc.SetColor(c.Opaquer(pct))\n\t\t\t\treturn nil\n\t\t\tcase \"blend\":\n\t\t\t\tif base != nil {\n\t\t\t\t\tc.SetColor(base)\n\t\t\t\t}\n\t\t\t\tclridx := strings.Index(pctstr, \"-\")\n\t\t\t\tif clridx < 0 {\n\t\t\t\t\terr := fmt.Errorf(\"gi.Color.SetString -- blend color spec not found -- format is: blend-PCT-color, got: %v -- PCT-color is: %v\", lstr, pctstr)\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\tpctstr = lstr[hidx+1 : clridx]\n\t\t\t\tpct, gotpct = kit.ToFloat32(pctstr)\n\t\t\t\tcvtPctStringErr(gotpct, pctstr)\n\t\t\t\tclrstr := lstr[clridx+1:]\n\t\t\t\tothc, err := ColorFromString(clrstr, base)\n\t\t\t\tc.SetColor(c.Blend(pct, &othc))\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t\tswitch lstr {\n\t\tcase \"none\", \"off\":\n\t\t\tc.SetToNil()\n\t\t\treturn nil\n\t\tcase \"transparent\":\n\t\t\tc.SetUInt8(0xFF, 0xFF, 0xFF, 0)\n\t\t\treturn nil\n\t\tcase \"inverse\":\n\t\t\tif base != nil {\n\t\t\t\tc.SetColor(base)\n\t\t\t}\n\t\t\tc.SetColor(c.Inverse())\n\t\t\treturn nil\n\t\tdefault:\n\t\t\treturn c.SetName(lstr)\n\t\t}\n\t}\n\treturn nil\n}", "func newMaterial(name string) *material {\n\tmat := &material{name: name, tag: mat + stringHash(name)<<32}\n\tmat.kd.R, mat.kd.G, mat.kd.B, mat.tr = 1, 1, 1, 1\n\treturn mat\n}", "func (lc *letterCounter) Init() {\n\tlc.re = regexp.MustCompile(\"[a-zA-Z]\")\n}", "func New() (g *Glutton, err error) {\n\tg = &Glutton{}\n\tg.protocolHandlers = make(map[string]protocolHandlerFunc, 0)\n\tviper.SetDefault(\"var-dir\", \"/var/lib/glutton\")\n\tif err = g.makeID(); err != nil {\n\t\treturn nil, err\n\t}\n\tg.logger = NewLogger(g.id.String())\n\n\t// Loading the congiguration\n\tg.logger.Info(\"Loading configurations from: config/conf.yaml\", zap.String(\"reporter\", \"glutton\"))\n\tg.conf, err = config.Init(g.logger)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\trulesPath := g.conf.GetString(\"rules_path\")\n\trulesFile, err := os.Open(rulesPath)\n\tdefer rulesFile.Close()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tg.rules, err = freki.ReadRulesFromFile(rulesFile)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn g, nil\n\n}", "func init() {\n\ttag.Register(\"ro-MD\", \"Romanian (Moldova)\")\n\ttag.Register(\"ro-RO\", \"Romanian (Romania)\")\n}", "func (a *Application) Init() {\r\n\r\n\ta.Version = oleutil.MustGetProperty(a._Application, \"Version\").ToString()\r\n\ta.Selection = &Selection{_Selection: oleutil.MustGetProperty(a._Application, \"Selection\").ToIDispatch()}\r\n}", "func Initialize(t string) error {\n\ttr, err := getTracer(t)\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"getting tracer\")\n\t}\n\ttracer = tr\n\treturn nil\n}", "func (tx *TextureFile) Init(sc *Scene) error {\n\tif tx.Tex != nil {\n\t\ttx.Tex.SetBotZero(tx.Bot0)\n\t\ttx.Tex.Activate(0)\n\t\treturn nil\n\t}\n\tif tx.File == \"\" {\n\t\terr := fmt.Errorf(\"gi3d.Texture: %v File must be set to a filename to load texture from\", tx.Nm)\n\t\tlog.Println(err)\n\t\treturn err\n\t}\n\ttx.Tex = gpu.TheGPU.NewTexture2D(tx.Nm)\n\ttx.Tex.SetBotZero(tx.Bot0)\n\terr := tx.Tex.Open(string(tx.File))\n\tif err != nil {\n\t\tlog.Println(err)\n\t\treturn err\n\t}\n\ttx.Tex.Activate(0)\n\treturn nil\n}", "func New() string {\n\tbytes := make([]byte, 16)\n\tsafeRandom(bytes)\n\tbytes[6] = (4 << 4) | (bytes[6] & 0xf)\n\tbytes[8] = bytes[8] & 0x3f\n\tbytes[8] = bytes[8] | 0x80\n\tbuf := make([]byte, 36)\n\thex.Encode(buf[0:8], bytes[0:4])\n\tbuf[8] = '-'\n\thex.Encode(buf[9:13], bytes[4:6])\n\tbuf[13] = '-'\n\thex.Encode(buf[14:18], bytes[6:8])\n\tbuf[18] = '-'\n\thex.Encode(buf[19:23], bytes[8:10])\n\tbuf[23] = '-'\n\thex.Encode(buf[24:], bytes[10:])\n\treturn string(buf)\n}", "func initOpenGL() uint32 {\n\tif err := gl.Init(); err != nil {\n\t\tpanic(err)\n\t}\n\tversion := gl.GoStr(gl.GetString(gl.VERSION))\n\tlog.Println(\"OpenGL version\", version)\n\n\tvar vertexShaderSource string\n\tvar fragmentShaderSource string\n\n\tvertexShaderSource = `\n\t#version 410\n\tlayout (location=0) in vec3 position;\n\tlayout (location=1) in vec2 texcoord;\n\tout vec2 tCoord;\n\tuniform mat4 projection;\n\tuniform mat4 world;\n\tuniform mat4 view;\n\tuniform vec2 texScale;\n\tuniform vec2 texOffset;\n\tvoid main() {\n\t\tgl_Position = projection * world * vec4(position, 1.0);\n\t\ttCoord = (texcoord+texOffset) * texScale;\n\t}\n\t` + \"\\x00\"\n\t//gl_Position = vec4(position, 10.0, 1.0) * camera * projection;\n\n\tfragmentShaderSource = `\n\t#version 410\n\tin vec2 tCoord;\n\tout vec4 frag_colour;\n\tuniform sampler2D ourTexture;\n\tuniform vec4 color;\n\tvoid main() {\n\t\t\tfrag_colour = texture(ourTexture, tCoord) * color;\n\t}\n\t` + \"\\x00\"\n\n\tprog := CreateProgram(vertexShaderSource, fragmentShaderSource)\n\n\tgl.UseProgram(prog)\n\tgl.Uniform2f(\n\t\tgl.GetUniformLocation(prog, gl.Str(\"texScale\\x00\")),\n\t\t1.0, 1.0,\n\t)\n\tgl.Uniform4f(\n\t\tgl.GetUniformLocation(prog, gl.Str(\"color\\x00\")),\n\t\t1, 1, 1, 1,\n\t)\n\n\t// line opengl program\n\tvertexShaderSource = `\n\t#version 330 core\n\tlayout (location = 0) in vec3 aPos;\n\tuniform mat4 uProjection;\n\tuniform mat4 uWorld;\n\n\tvoid main()\n\t{\n\t gl_Position = uProjection * vec4(aPos, 1.0);\n\t}` + \"\\x00\"\n\n\tfragmentShaderSource = `\n\t#version 330 core\n\tout vec4 FragColor;\n\tuniform vec3 uColor;\n\n\tvoid main()\n\t{\n\t FragColor = vec4(uColor, 1.0f);\n\t}` + \"\\x00\"\n\n\tlineProgram = CreateProgram(vertexShaderSource, fragmentShaderSource)\n\n\treturn prog\n}", "func Init() {\n\tdisplay.Clear(Screen, Background)\n\n\tresetBall(true, true)\n}", "func (sc *scanner) Init(s string) {\n\tsc.Line = \"\"\n\tsc.Column = \"\"\n\tsc.Error = nil\n\tsc.isLastColumn = false\n\tsc.s = s\n}", "func init() {\n\tlog = logging.MustGetLogger(\"bosswave\")\n\tvar format = \"%{color}%{level} %{time:Jan 02 15:04:05} %{shortfile}%{color:reset} ▶ %{message}\"\n\tvar logBackend = logging.NewLogBackend(os.Stderr, \"\", 0)\n\tlogBackendLeveled := logging.AddModuleLevel(logBackend)\n\tlogging.SetBackend(logBackendLeveled)\n\tlogging.SetFormatter(logging.MustStringFormatter(format))\n}", "func initOpenGL(drawer Drawer) uint32 {\n\terr := gl.Init()\n\tcheckNoError(err)\n\tversion := gl.GoStr(gl.GetString(gl.VERSION))\n\tlog.Println(\"OpenGL version\", version)\n\n\tprog := gl.CreateProgram()\n\tcheckNoError(drawer.LoadProgram(prog))\n\tgl.LinkProgram(prog)\n\treturn prog\n}", "func Init() {\n // host flag (required)\n Pattern.Flags().StringVarP(&host, \"host\", \"H\", \"\",\n \"the target machine's IP address\")\n Pattern.MarkFlagRequired(\"host\")\n\n // port flag (required)\n Pattern.Flags().IntVarP(&port, \"port\", \"P\", 0,\n \"the port the target service is running on\")\n Pattern.MarkFlagRequired(\"port\")\n\n // prefix and suffix flags (optional)\n Pattern.Flags().StringVarP(&pref, \"prefix\", \"p\", \"\",\n \"(optional) prefix to put before payload\")\n Pattern.Flags().StringVarP(&suff, \"suffix\", \"s\", \"\",\n \"(optional) suffix to put after payload\")\n\n // length flag (required)\n Pattern.Flags().IntVarP(&length, \"length\", \"l\", 0,\n \"the length of the cyclic pattern sent to the target\")\n Pattern.MarkFlagRequired(\"length\")\n}", "func (c *Cmd) Init() {}", "func init() {\n\tparser.SharedParser().RegisterFabric(UpdateFabric{})\n}", "func (b *Board) Init() {\n\t*b = Board{\n\t\t{\"● R\", \"● K\", \"● B\", \"● Q\", \"● G\", \"● B\", \"● K\", \"● R\"},\n\t\t{\"● P\", \"● P\", \"● P\", \"● P\", \"● P\", \"● P\", \"● P\", \"● P\"},\n\t\t{\" \", \" \", \" \", \" \", \" \", \" \", \" \", \" \"},\n\t\t{\" \", \" \", \" \", \" \", \" \", \" \", \" \", \" \"},\n\t\t{\" \", \" \", \" \", \" \", \" \", \" \", \" \", \" \"},\n\t\t{\" \", \" \", \" \", \" \", \" \", \" \", \" \", \" \"},\n\t\t{\"○ P\", \"○ P\", \"○ P\", \"○ P\", \"○ P\", \"○ P\", \"○ P\", \"○ P\"},\n\t\t{\"○ R\", \"○ K\", \"○ B\", \"○ Q\", \"○ G\", \"○ B\", \"○ K\", \"○ R\"},\n\t}\n}", "func init() {\n\ttag.Register(\"tn-BW\", \"Setswana (Botswana)\")\n\ttag.Register(\"tn-ZA\", \"Setswana (South Africa)\")\n}", "func init() {\n\n\tflag.StringVar(&Token, \"t\", \"\", \"Bot Token\")\n}", "func (this *NowStr) Constructor() FunctionConstructor { return NewNowStr }", "func Init(config Config, l *logrus.Logger) {\n\tif config.Level == \"\" {\n\t\tconfig.Level = \"info\"\n\t}\n\n\tlvl, err := logrus.ParseLevel(config.Level)\n\tif err != nil {\n\t\tlog.Fatalln(err)\n\t}\n\n\tl.SetLevel(lvl)\n\tif config.formatter() != nil {\n\t\tl.SetFormatter(config.formatter())\n\t}\n}", "func (s *Scene) Initialize() (e error) {\n\tfor i, object := range s.Objects {\n\t\tmaterialID := object.MaterialID()\n\t\tif materialID < 0 || materialID >= len(s.Materials) {\n\t\t\tmsg := fmt.Sprintf(\"invalid material id in object %d\", i)\n\t\t\te = errors.New(msg)\n\t\t\treturn\n\t\t}\n\t}\n\n\ts.ambientLight = raytracing.Color{}\n\tfor _, light := range s.Lights {\n\t\ts.ambientLight.Red += light.Ambient.Red\n\t\ts.ambientLight.Green += light.Ambient.Green\n\t\ts.ambientLight.Blue += light.Ambient.Blue\n\t}\n\ts.ambientLight.Red /= float64(len(s.Lights))\n\ts.ambientLight.Green /= float64(len(s.Lights))\n\ts.ambientLight.Blue /= float64(len(s.Lights))\n\treturn\n}", "func InitNew() {\n\tPos = image.Point{}\n}", "func Init() {\n\tfmt.Println(fmt.Sprintf(\"Frain version %s\", Version))\n\tfmt.Println(\"\\nA status checker for various developer tools.\")\n}", "func (me *TColorType) Set(s string) { (*xsdt.String)(me).Set(s) }", "func (s *SmartContract) InitLedger(ctx contractapi.TransactionContextInterface) error {\n\n dataAsBytes := []byte(`{}`)\n\terr := ctx.GetStub().PutState(\"MNOs\", dataAsBytes)\n\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Failed to put to world state. %s\", err.Error())\n\t}\n\n dataAsBytes = []byte(`{}`)\n\terr = ctx.GetStub().PutState(\"privateColDisplayName\", dataAsBytes)\n\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Failed to put to world state. %s\", err.Error())\n\t}\n\n dataAsBytes = []byte(`{\"collection\": [], \"sequence\": 1,\"version\": 1}`)\n\terr = ctx.GetStub().PutState(\"privateCollection\", dataAsBytes)\n\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Failed to put to world state. %s\", err.Error())\n\t}\n\n dataAsBytes = []byte(`{}`)\n\terr = ctx.GetStub().PutState(\"InteractionAreaSet\", dataAsBytes)\n\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Failed to put to world state. %s\", err.Error())\n\t}\n\n\treturn nil\n}" ]
[ "0.5770172", "0.56220156", "0.55416876", "0.55140245", "0.54732513", "0.53269196", "0.52365375", "0.520851", "0.51928884", "0.5182738", "0.5087613", "0.5065547", "0.5049686", "0.5035323", "0.50059944", "0.49973142", "0.49936852", "0.4987295", "0.497426", "0.49706402", "0.49703985", "0.49527946", "0.49486405", "0.4944976", "0.4933678", "0.49326363", "0.49248263", "0.48863372", "0.48842916", "0.48765752", "0.48664758", "0.48579773", "0.4845576", "0.48353845", "0.48332974", "0.48221907", "0.48058566", "0.480566", "0.47952995", "0.47865665", "0.4767918", "0.4754042", "0.4753607", "0.47519326", "0.47280085", "0.4724467", "0.4717533", "0.47160944", "0.47123316", "0.47064808", "0.46896863", "0.468962", "0.46886352", "0.4681044", "0.46800077", "0.4669613", "0.46683434", "0.46683407", "0.4664393", "0.466435", "0.4663939", "0.46579793", "0.46578598", "0.46546853", "0.46538827", "0.46477497", "0.4641037", "0.46375257", "0.46314535", "0.46302313", "0.46295822", "0.46280226", "0.4626336", "0.46255726", "0.46219417", "0.4618291", "0.4617594", "0.4606267", "0.46032286", "0.46024895", "0.4592421", "0.4569368", "0.4564359", "0.4564067", "0.4560917", "0.4552668", "0.45456514", "0.4541911", "0.45415273", "0.4527992", "0.45260972", "0.45254105", "0.4521371", "0.4519196", "0.45176584", "0.45173496", "0.4504494", "0.45037982", "0.45037556", "0.4502968" ]
0.7305143
0
FillString writes a color to all pixels
func (ls *LightString) FillString(color uint8) { log.Debugf("Filling pixels with color %v", color) n := 0 for n < ls.Last { ls.Pixels[n].Color = color log.Debugf("Coloring pixel %v %v", n, color) n++ } }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func PaintFill(image [][]rune, row, col int, newColor rune) {\n\tpaintFillHelper(image, row, col, image[row][col], newColor)\n}", "func (tb *Textbox) Fill(u rune) error {\n\tif !utf8.ValidRune(u) {\n\t\treturn errors.New(\"invalid rune\")\n\t}\n\n\tfor i := range tb.pixels {\n\t\ttb.pixels[i] = u\n\t}\n\treturn nil\n}", "func AsStringFill(value string) AsStringAttr {\n\treturn func(m optionalAttr) {\n\t\tm[\"fill\"] = value\n\t}\n}", "func (cons *VgaTextConsole) Fill(x, y, width, height uint32, fg, bg uint8) {\n\tvar (\n\t\tclr = (((uint16(bg) << 4) | uint16(fg)) << 8) | cons.clearChar\n\t\trowOffset, colOffset uint32\n\t)\n\n\t// clip rectangle\n\tif x == 0 {\n\t\tx = 1\n\t} else if x >= cons.width {\n\t\tx = cons.width\n\t}\n\n\tif y == 0 {\n\t\ty = 1\n\t} else if y >= cons.height {\n\t\ty = cons.height\n\t}\n\n\tif x+width-1 > cons.width {\n\t\twidth = cons.width - x + 1\n\t}\n\n\tif y+height-1 > cons.height {\n\t\theight = cons.height - y + 1\n\t}\n\n\trowOffset = ((y - 1) * cons.width) + (x - 1)\n\tfor ; height > 0; height, rowOffset = height-1, rowOffset+cons.width {\n\t\tfor colOffset = rowOffset; colOffset < rowOffset+width; colOffset++ {\n\t\t\tcons.fb[colOffset] = clr\n\t\t}\n\t}\n}", "func fill(pix []byte, c color.RGBA) {\n\tfor i := 0; i < len(pix); i += 4 {\n\t\tpix[i] = c.R\n\t\tpix[i+1] = c.G\n\t\tpix[i+2] = c.B\n\t\tpix[i+3] = c.A\n\t}\n}", "func (ele *ELEMENT) FillStyle(eval string) *ELEMENT { return ele.Attr(\"fillStyle\", eval) }", "func DrawString(d Drawable, x, y int, s string, c color.NRGBA) int {\n\tfor _, r := range s {\n\t\tDraw(d, r, x, y, c.R, c.G, c.B)\n\t\tx += 8\n\t}\n\treturn x\n}", "func WhiteString(format string, a ...interface{}) string { return colorString(format, FgWhite, a...) }", "func (b Buffer) Fill(ch rune, fg, bg Attribute) {\n\tfor x := b.Area.Min.X; x < b.Area.Max.X; x++ {\n\t\tfor y := b.Area.Min.Y; y < b.Area.Max.Y; y++ {\n\t\t\tb.Set(x, y, Cell{ch, fg, bg})\n\t\t}\n\t}\n}", "func (cv *Canvas) FillText(str string, x, y float64) {\n\tif cv.state.font.font == nil {\n\t\treturn\n\t}\n\n\tscaleX := backendbase.Vec{cv.state.transform[0], cv.state.transform[1]}.Len()\n\tscaleY := backendbase.Vec{cv.state.transform[2], cv.state.transform[3]}.Len()\n\tscale := (scaleX + scaleY) * 0.5\n\tfontSize := fixed.Int26_6(math.Round(float64(cv.state.fontSize) * scale))\n\n\t// if the font size is large or rotated or skewed in some way, use the\n\t// triangulated font rendering\n\tif fontSize > fixed.I(25) {\n\t\tcv.fillText2(str, x, y)\n\t\treturn\n\t}\n\tmat := cv.state.transform\n\tif mat[1] != 0 || mat[2] != 0 || mat[0] != mat[3] {\n\t\tcv.fillText2(str, x, y)\n\t\treturn\n\t}\n\n\tfrc := cv.getFRContext(cv.state.font, fontSize)\n\tfnt := cv.state.font.font\n\n\tstrWidth, strHeight, textOffset, str := cv.measureTextRendering(str, &x, &y, frc, scale)\n\tif strWidth <= 0 || strHeight <= 0 {\n\t\treturn\n\t}\n\n\t// make sure textImage is large enough for the rendered string\n\tif textImage == nil || textImage.Bounds().Dx() < strWidth || textImage.Bounds().Dy() < strHeight {\n\t\tvar size int\n\t\tfor size = 2; size < alphaTexSize; size *= 2 {\n\t\t\tif size >= strWidth && size >= strHeight {\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t\tif size > alphaTexSize {\n\t\t\tsize = alphaTexSize\n\t\t}\n\t\ttextImage = image.NewAlpha(image.Rect(0, 0, size, size))\n\t}\n\n\t// clear the render region in textImage\n\tfor y := 0; y < strHeight; y++ {\n\t\toff := textImage.PixOffset(0, y)\n\t\tline := textImage.Pix[off : off+strWidth]\n\t\tfor i := range line {\n\t\t\tline[i] = 0\n\t\t}\n\t}\n\n\t// render the string into textImage\n\tcurX := x\n\tp := fixed.Point26_6{}\n\tprev, hasPrev := truetype.Index(0), false\n\tfor _, rn := range str {\n\t\tidx := fnt.Index(rn)\n\t\tif idx == 0 {\n\t\t\tprev = 0\n\t\t\thasPrev = false\n\t\t\tcontinue\n\t\t}\n\t\tif hasPrev {\n\t\t\tkern := fnt.Kern(fontSize, prev, idx)\n\t\t\tif frc.hinting != font.HintingNone {\n\t\t\t\tkern = (kern + 32) &^ 63\n\t\t\t}\n\t\t\tcurX += float64(kern) / 64\n\t\t}\n\t\tadvance, mask, offset, err := frc.glyph(idx, p)\n\t\tif err != nil {\n\t\t\tprev = 0\n\t\t\thasPrev = false\n\t\t\tcontinue\n\t\t}\n\t\tp.X += advance\n\n\t\tdraw.Draw(textImage, mask.Bounds().Add(offset).Sub(textOffset), mask, image.ZP, draw.Over)\n\n\t\tcurX += float64(advance) / 64\n\t}\n\n\t// render textImage to the screen\n\tvar pts [4]backendbase.Vec\n\tpts[0] = cv.tf(backendbase.Vec{float64(textOffset.X)/scale + x, float64(textOffset.Y)/scale + y})\n\tpts[1] = cv.tf(backendbase.Vec{float64(textOffset.X)/scale + x, float64(textOffset.Y)/scale + float64(strHeight)/scale + y})\n\tpts[2] = cv.tf(backendbase.Vec{float64(textOffset.X)/scale + float64(strWidth)/scale + x, float64(textOffset.Y)/scale + float64(strHeight)/scale + y})\n\tpts[3] = cv.tf(backendbase.Vec{float64(textOffset.X)/scale + float64(strWidth)/scale + x, float64(textOffset.Y)/scale + y})\n\n\tmask := textImage.SubImage(image.Rect(0, 0, strWidth, strHeight)).(*image.Alpha)\n\n\tcv.drawShadow(pts[:], mask, false)\n\n\tstl := cv.backendFillStyle(&cv.state.fill, 1)\n\tcv.b.FillImageMask(&stl, mask, pts)\n}", "func (tm *Term) DrawString(x, y int, s string, maxlen int, fg, bg termbox.Attribute) {\n\tif y >= tm.Size.Y || y < 0 {\n\t\treturn\n\t}\n\tfor i, r := range s {\n\t\tif i >= maxlen {\n\t\t\tbreak\n\t\t}\n\t\txp := x + i\n\t\tif xp >= tm.Size.X || xp < 0 {\n\t\t\tcontinue\n\t\t}\n\t\ttermbox.SetCell(xp, y, r, fg, bg)\n\t}\n}", "func Paint(c Color, s string) string {\n\treturn fmt.Sprintf(seq, c, s, reset)\n}", "func ColorString(code int, str string) string {\n\treturn fmt.Sprintf(\"\\x1b[%d;1m%s\\x1b[39;22m\", code, str)\n}", "func (me *TxsdPresentationAttributesColorColorInterpolation) Set(s string) { (*xsdt.String)(me).Set(s) }", "func RedString(format string, a ...interface{}) string { return colorString(format, FgRed, a...) }", "func Color(str string, color int) string {\n\treturn applyTransform(str, func(idx int, line string) string {\n\t\treturn fmt.Sprintf(\"%s%s%s\", getColor(color), line, RESET)\n\t})\n}", "func (p *Page) FillRGB(r, g, b float64) {\n\tfmt.Fprint(p.contents, r, g, b, \" rg \")\n}", "func ColorString(mode RenderMode, px Pixel) string {\n\tif px.alpha < TRANSPARENCY_THRESHOLD {\n\t\treturn \"\" // sentinel for transparent colors\n\t}\n\tif mode == term24bit {\n\t\tr, g, b := px.color.RGB255()\n\t\treturn fmt.Sprintf(\"%d;%d;%d\", r, g, b)\n\t}\n\tresult := 0\n\tlast := len(colors[mode]) - 1\n\tdist := ColorDistance(mode, px.color, colors[mode][last])\n\t// start from the end so higher color indices are favored in the irc palette\n\tfor i := last - 1; i >= 0; i-- {\n\t\td := ColorDistance(mode, px.color, colors[mode][i])\n\t\tif d < dist {\n\t\t\tdist = d\n\t\t\tresult = i\n\t\t}\n\t}\n\treturn strconv.Itoa(result)\n}", "func (fig figure) ColorString() string {\n\ts := \"\"\n\tfor _, printRow := range fig.Slicify() {\n\t\tif fig.color != \"\" {\n\t\t\tprintRow = colors[fig.color] + printRow + colors[\"reset\"]\n\t\t}\n\t\ts += fmt.Sprintf(\"%s\\n\", printRow)\n\t}\n\treturn s\n}", "func PaintStr(text string, fg FgColor, bg BgColor) string {\n\treturn fmt.Sprintf(\"%s%s%s%s%s\", fg, bg, text, BgDefault, FgDefault)\n}", "func (c *Color) SetString(str string, base color.Color) error {\n\tif len(str) == 0 { // consider it null\n\t\tc.SetToNil()\n\t\treturn nil\n\t}\n\t// pr := prof.Start(\"Color.SetString\")\n\t// defer pr.End()\n\tlstr := strings.ToLower(str)\n\tswitch {\n\tcase lstr[0] == '#':\n\t\treturn c.ParseHex(str)\n\tcase strings.HasPrefix(lstr, \"hsl(\"):\n\t\tval := lstr[4:]\n\t\tval = strings.TrimRight(val, \")\")\n\t\tformat := \"%d,%d,%d\"\n\t\tvar h, s, l int\n\t\tfmt.Sscanf(val, format, &h, &s, &l)\n\t\tc.SetHSL(float32(h), float32(s)/100.0, float32(l)/100.0)\n\tcase strings.HasPrefix(lstr, \"rgb(\"):\n\t\tval := lstr[4:]\n\t\tval = strings.TrimRight(val, \")\")\n\t\tval = strings.Trim(val, \"%\")\n\t\tvar r, g, b, a int\n\t\ta = 255\n\t\tformat := \"%d,%d,%d\"\n\t\tif strings.Count(val, \",\") == 4 {\n\t\t\tformat = \"%d,%d,%d,%d\"\n\t\t\tfmt.Sscanf(val, format, &r, &g, &b, &a)\n\t\t} else {\n\t\t\tfmt.Sscanf(val, format, &r, &g, &b)\n\t\t}\n\t\tc.SetUInt8(uint8(r), uint8(g), uint8(b), uint8(a))\n\tcase strings.HasPrefix(lstr, \"rgba(\"):\n\t\tval := lstr[5:]\n\t\tval = strings.TrimRight(val, \")\")\n\t\tval = strings.Trim(val, \"%\")\n\t\tvar r, g, b, a int\n\t\tformat := \"%d,%d,%d,%d\"\n\t\tfmt.Sscanf(val, format, &r, &g, &b, &a)\n\t\tc.SetUInt8(uint8(r), uint8(g), uint8(b), uint8(a))\n\tcase strings.HasPrefix(lstr, \"pref(\"):\n\t\tval := lstr[5:]\n\t\tval = strings.TrimRight(val, \")\")\n\t\tclr := ThePrefs.PrefColor(val)\n\t\tif clr != nil {\n\t\t\t*c = *clr\n\t\t}\n\tdefault:\n\t\tif hidx := strings.Index(lstr, \"-\"); hidx > 0 {\n\t\t\tcmd := lstr[:hidx]\n\t\t\tpctstr := lstr[hidx+1:]\n\t\t\tpct, gotpct := kit.ToFloat32(pctstr)\n\t\t\tswitch cmd {\n\t\t\tcase \"lighter\":\n\t\t\t\tcvtPctStringErr(gotpct, pctstr)\n\t\t\t\tif base != nil {\n\t\t\t\t\tc.SetColor(base)\n\t\t\t\t}\n\t\t\t\tc.SetColor(c.Lighter(pct))\n\t\t\t\treturn nil\n\t\t\tcase \"darker\":\n\t\t\t\tcvtPctStringErr(gotpct, pctstr)\n\t\t\t\tif base != nil {\n\t\t\t\t\tc.SetColor(base)\n\t\t\t\t}\n\t\t\t\tc.SetColor(c.Darker(pct))\n\t\t\t\treturn nil\n\t\t\tcase \"highlight\":\n\t\t\t\tcvtPctStringErr(gotpct, pctstr)\n\t\t\t\tif base != nil {\n\t\t\t\t\tc.SetColor(base)\n\t\t\t\t}\n\t\t\t\tc.SetColor(c.Highlight(pct))\n\t\t\t\treturn nil\n\t\t\tcase \"samelight\":\n\t\t\t\tcvtPctStringErr(gotpct, pctstr)\n\t\t\t\tif base != nil {\n\t\t\t\t\tc.SetColor(base)\n\t\t\t\t}\n\t\t\t\tc.SetColor(c.Samelight(pct))\n\t\t\t\treturn nil\n\t\t\tcase \"saturate\":\n\t\t\t\tcvtPctStringErr(gotpct, pctstr)\n\t\t\t\tif base != nil {\n\t\t\t\t\tc.SetColor(base)\n\t\t\t\t}\n\t\t\t\tc.SetColor(c.Saturate(pct))\n\t\t\t\treturn nil\n\t\t\tcase \"pastel\":\n\t\t\t\tcvtPctStringErr(gotpct, pctstr)\n\t\t\t\tif base != nil {\n\t\t\t\t\tc.SetColor(base)\n\t\t\t\t}\n\t\t\t\tc.SetColor(c.Pastel(pct))\n\t\t\t\treturn nil\n\t\t\tcase \"clearer\":\n\t\t\t\tcvtPctStringErr(gotpct, pctstr)\n\t\t\t\tif base != nil {\n\t\t\t\t\tc.SetColor(base)\n\t\t\t\t}\n\t\t\t\tc.SetColor(c.Clearer(pct))\n\t\t\t\treturn nil\n\t\t\tcase \"opaquer\":\n\t\t\t\tcvtPctStringErr(gotpct, pctstr)\n\t\t\t\tif base != nil {\n\t\t\t\t\tc.SetColor(base)\n\t\t\t\t}\n\t\t\t\tc.SetColor(c.Opaquer(pct))\n\t\t\t\treturn nil\n\t\t\tcase \"blend\":\n\t\t\t\tif base != nil {\n\t\t\t\t\tc.SetColor(base)\n\t\t\t\t}\n\t\t\t\tclridx := strings.Index(pctstr, \"-\")\n\t\t\t\tif clridx < 0 {\n\t\t\t\t\terr := fmt.Errorf(\"gi.Color.SetString -- blend color spec not found -- format is: blend-PCT-color, got: %v -- PCT-color is: %v\", lstr, pctstr)\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\tpctstr = lstr[hidx+1 : clridx]\n\t\t\t\tpct, gotpct = kit.ToFloat32(pctstr)\n\t\t\t\tcvtPctStringErr(gotpct, pctstr)\n\t\t\t\tclrstr := lstr[clridx+1:]\n\t\t\t\tothc, err := ColorFromString(clrstr, base)\n\t\t\t\tc.SetColor(c.Blend(pct, &othc))\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t\tswitch lstr {\n\t\tcase \"none\", \"off\":\n\t\t\tc.SetToNil()\n\t\t\treturn nil\n\t\tcase \"transparent\":\n\t\t\tc.SetUInt8(0xFF, 0xFF, 0xFF, 0)\n\t\t\treturn nil\n\t\tcase \"inverse\":\n\t\t\tif base != nil {\n\t\t\t\tc.SetColor(base)\n\t\t\t}\n\t\t\tc.SetColor(c.Inverse())\n\t\t\treturn nil\n\t\tdefault:\n\t\t\treturn c.SetName(lstr)\n\t\t}\n\t}\n\treturn nil\n}", "func (me *TxsdPresentationAttributesColorColorRendering) Set(s string) { (*xsdt.String)(me).Set(s) }", "func (fb *FrameBuffer) Fill(rect _core.Rect, c color.Color) {\n\tfor i := rect.Min.Y; i < rect.Max.Y; i++ {\n\t\tfor j := rect.Min.X; j < rect.Max.X; j++ {\n\t\t\tfb.SetColorAt(int(j), int(i), c)\n\t\t}\n\t}\n}", "func (me *TSVGColorType) Set(s string) { (*xsdt.String)(me).Set(s) }", "func FillString(message string, size int) string {\n\tmissingPositions := size - len(message)\n\treturn message + strings.Repeat(PADDING_CHARACTER, missingPositions)\n}", "func (ls *LightString) SetColor(position int, color uint8) error {\n\tif position > ls.Last {\n\t\tlog.Errorf(\"SetColor: last valid pixel is #%v\", ls.Last)\n\t\treturn fmt.Errorf(\"SetColor: last valid pixel is #%v\", ls.Last)\n\t}\n\tlog.Debugf(\"Coloring pixel %v %v\", position, color)\n\tls.Pixels[position].Color = color\n\treturn nil\n}", "func fillString(t *testing.T, testname string, b *Builder, s string, n int, fus string) string {\n\tcheckRead(t, testname+\" (fill 1)\", b, s)\n\tfor ; n > 0; n-- {\n\t\tm, err := b.WriteString(fus)\n\t\tif m != len(fus) {\n\t\t\tt.Errorf(testname+\" (fill 2): m == %d, expected %d\", m, len(fus))\n\t\t}\n\t\tif err != nil {\n\t\t\tt.Errorf(testname+\" (fill 3): err should always be nil, found err == %s\", err)\n\t\t}\n\t\ts += fus\n\t\tcheckRead(t, testname+\" (fill 4)\", b, s)\n\t}\n\treturn s\n}", "func (me *TColorType) Set(s string) { (*xsdt.String)(me).Set(s) }", "func (s statement) colorString() string {\n\tout := make([]string, 0, len(s)+2)\n\tfor _, t := range s {\n\t\tout = append(out, t.formatColor())\n\t}\n\treturn strings.Join(out, \"\")\n}", "func paintFill(image [][]string, c string, y, x int) [][]string {\n\t//range check\n\tif y > len(image)-1 {\n\t\treturn image\n\t}\n\tif x > len(image[y])-1 {\n\t\treturn image\n\t}\n\t//dupe color check\n\tif image[y][x] == c {\n\t\treturn image\n\t}\n\t//identify origin color\n\torig := image[y][x]\n\t//color origin\n\tmImage := image\n\tmImage[y][x] = c\n\t//check for valid left\n\tif x > 0 && mImage[y][x-1] == orig {\n\t\tmImage = paintFill(mImage, c, y, x-1)\n\t}\n\t//check for valid right\n\tif x < len(mImage[y])-1 && mImage[y][x+1] == orig {\n\t\tmImage = paintFill(mImage, c, y, x+1)\n\t}\n\t//check for valid up\n\tif y > 0 && mImage[y-1][x] == orig {\n\t\tmImage = paintFill(mImage, c, y-1, x)\n\t}\n\t//check for valid down\n\tif y < len(mImage)-1 && mImage[y+1][x] == orig {\n\t\tmImage = paintFill(mImage, c, y+1, x)\n\t}\n\treturn mImage\n}", "func GreenString(format string, a ...interface{}) string { return colorString(format, FgGreen, a...) }", "func (c *Color) String() string {\n\treturn fmt.Sprintf(\"#%02x%02x%02x,\", c.R, c.G, c.B)\n}", "func (win *Window) Fill(c color.Color) {\n\tC.sfRenderWindow_clear(win.win, sfmlColor(c))\n}", "func PaintText(color string, text string) string {\n\treturn color + text + ANSIReset\n}", "func C(str, color string) string {\n\tif runtime.GOOS == \"windows\" {\n\t\treturn str\n\t}\n\treturn ansi.Color(fmt.Sprint(str), color)\n}", "func hexstyle(s string) string {\n\tif len(s) == 9 {\n\t\to, err := hex.DecodeString(s[7:9])\n\t\tif err == nil {\n\t\t\top := float64(o[0]) / 255.0\n\t\t\treturn fmt.Sprintf(hexfillfmt, s[0:7], op)\n\t\t}\n\t}\n\treturn \"fill:\" + s\n}", "func HiRedString(format string, a ...interface{}) string { return colorString(format, FgHiRed, a...) }", "func (dw *DrawingWand) SetFillPatternURL(fillUrl string) {\n\tcstr := C.CString(fillUrl)\n\tdefer C.free(unsafe.Pointer(cstr))\n\tC.MagickDrawSetFillPatternURL(dw.dw, cstr)\n}", "func YellowString(format string, a ...interface{}) string { return colorString(format, FgYellow, a...) }", "func regionFill(x, y, w, h openvg.VGfloat, color string) {\n\topenvg.FillColor(color)\n\topenvg.Rect(x, y, w, h)\n\topenvg.FillColor(textcolor)\n}", "func PrintString(x, y int, fg, bg termbox.Attribute, msg string) {\n\tfor _, c := range msg {\n\t\ttermbox.SetCell(x, y, c, termbox.ColorWhite, bg)\n\t\tx++\n\t}\n}", "func Paint(sb *strings.Builder, text string, fg FgColor, bg BgColor) {\n\tsb.WriteString(string(fg))\n\tsb.WriteString(string(bg))\n\ttrimmed := strings.TrimSuffix(text, \"\\n\")\n\tsb.WriteString(trimmed)\n\tsb.WriteString(string(BgDefault))\n\tsb.WriteString(string(FgDefault))\n\tif trimmed != text {\n\t\tsb.WriteByte('\\n')\n\t}\n}", "func (m RGBImage) Fill(vl RGB) {\n\tl := m.Area()\n\tfor i := 0; i < l; i++ {\n\t\tm.Pixels[i] = vl\n\t}\n}", "func (d *Device) FillScreen(c color.RGBA) {\n\td.startWrite()\n\td.fillScreen(c)\n\td.endWrite()\n}", "func (c *Color) String() string {\n\tif c == nil {\n\t\treturn \"nil\"\n\t}\n\treturn fmt.Sprintf(\"R: %v G: %v B: %v A: %v\", c.R, c.G, c.B, c.A)\n}", "func (m GrayImage) Fill(vl byte) {\n\tl := m.Area()\n\tfor i := 0; i < l; i++ {\n\t\tm.Pixels[i] = vl\n\t}\n}", "func PaintStrBg(text string, bg BgColor) string {\n\treturn fmt.Sprintf(\"%s%s%s\", bg, text, BgDefault)\n}", "func randcolor() string {\n\trgb := []byte{0, 0, 0} // read error returns black\n\trand.Read(rgb)\n\treturn fmt.Sprintf(\"fill:rgb(%d,%d,%d)\", rgb[0], rgb[1], rgb[2])\n}", "func (me TxsdPresentationAttributesColorColorInterpolation) String() string {\n\treturn xsdt.String(me).String()\n}", "func (obj *Device) ColorFill(\n\tsurface *Surface,\n\trect *RECT,\n\tcolor COLOR,\n) Error {\n\tret, _, _ := syscall.Syscall6(\n\t\tobj.vtbl.ColorFill,\n\t\t4,\n\t\tuintptr(unsafe.Pointer(obj)),\n\t\tuintptr(unsafe.Pointer(surface)),\n\t\tuintptr(unsafe.Pointer(rect)),\n\t\tuintptr(color),\n\t\t0,\n\t\t0,\n\t)\n\treturn toErr(ret)\n}", "func (Screen *ScreenManager) Color(str string, color int) string {\n\treturn applyScreenTransform(str, func(idx int, line string) string {\n\t\treturn fmt.Sprintf(\"%s%s%s\", getScreenColor(color), line, RESET)\n\t})\n}", "func decorate(s string, color string) string {\n\tswitch color {\n\tcase \"green\":\n\t\ts = \"\\x1b[0;32m\" + s\n\tcase \"red\":\n\t\ts = \"\\x1b[0;31m\" + s\n\tdefault:\n\t\treturn s\n\t}\n\treturn s + \"\\x1b[0m\"\n}", "func (color Color) ansiString(foreground bool) string {\n\tvalue := color.colorValue()\n\n\tfgBgMarker := \"3\"\n\tif !foreground {\n\t\tfgBgMarker = \"4\"\n\t}\n\n\tif color.colorType() == colorType16 {\n\t\tif value < 8 {\n\t\t\treturn fmt.Sprint(\"\\x1b[\", fgBgMarker, value, \"m\")\n\t\t} else if value <= 15 {\n\t\t\tfgBgMarker := \"9\"\n\t\t\tif !foreground {\n\t\t\t\tfgBgMarker = \"10\"\n\t\t\t}\n\t\t\treturn fmt.Sprint(\"\\x1b[\", fgBgMarker, value-8, \"m\")\n\t\t}\n\t}\n\n\tif color.colorType() == colorType256 {\n\t\tif value <= 255 {\n\t\t\treturn fmt.Sprint(\"\\x1b[\", fgBgMarker, \"8;5;\", value, \"m\")\n\t\t}\n\t}\n\n\tif color.colorType() == colorType24bit {\n\t\tred := (value & 0xff0000) >> 16\n\t\tgreen := (value & 0xff00) >> 8\n\t\tblue := value & 0xff\n\n\t\treturn fmt.Sprint(\"\\x1b[\", fgBgMarker, \"8;2;\", red, \";\", green, \";\", blue, \"m\")\n\t}\n\n\tif color.colorType() == colorTypeDefault {\n\t\treturn fmt.Sprint(\"\\x1b[\", fgBgMarker, \"9m\")\n\t}\n\n\tpanic(fmt.Errorf(\"unhandled color type=%d value=%#x\", color.colorType(), value))\n}", "func (c *Color) String() string {\n\treturn \"c(\" + floatToString(c.r, 8) + \",\" + floatToString(c.g, 8) + \",\" + floatToString(c.b, 8) + \")\"\n}", "func (c *Canvas) SetColor(row, col int, color utils.Color) { //uint8\n\t//sadly a lot of terminals don't support true colors yet..\n\t//in the future this would be a better alternative:\n\t//c.formatting[row][col] = fmt.Sprintf(\"38;2;%d;%d;%d;1\", r, g, b) //+= ?\n\t//for now.. we use the basic(only) alternative\n\tnumber := 16 + 36*color.R + 6*color.G + color.B\n\tc.SetAnsi(row, col, fmt.Sprintf(\"38;5;%d\", number))\n}", "func (c RGB) String() string {\n\treturn fmt.Sprintf(\"#%02X%02X%02X\", c.R, c.G, c.B)\n}", "func PaintStrFg(text string, fg FgColor) string {\n\treturn fmt.Sprintf(\"%s%s%s\", fg, text, FgDefault)\n}", "func ColorPrint(s string, i int) {\n\tkernel32 := syscall.NewLazyDLL(\"kernel32.dll\")\n\tproc := kernel32.NewProc(\"SetConsoleTextAttribute\")\n\thandle, _, _ := proc.Call(uintptr(syscall.Stdout), uintptr(i))\n\tfmt.Print(s)\n\thandle, _, _ = proc.Call(uintptr(syscall.Stdout), uintptr(7))\n\tCloseHandle := kernel32.NewProc(\"CloseHandle\")\n\tCloseHandle.Call(handle)\n}", "func (canvas *Canvas) Fill(p *Path) {\n\tio.Copy(canvas.contents, &p.buf)\n\twriteCommand(canvas.contents, \"f\")\n}", "func (c *Color) HexString() string {\n\tif c == nil {\n\t\treturn \"nil\"\n\t}\n\treturn fmt.Sprintf(\"#%02X%02X%02X%02X\", c.R, c.G, c.B, c.A)\n}", "func Fill(imageBytes []byte, width, height int, maxBytes int, anchor imaging.Anchor) ([]byte, string, error) {\n\treturn process(imageBytes, maxBytes, func(image image.Image) image.Image {\n\t\treturn imaging.Fill(image, width, height, anchor, imaging.MitchellNetravali)\n\t})\n}", "func WhiteString(format string, a ...interface{}) (s string) {\n\ts = colorString(FgWhite, format, a...)\n\treturn\n}", "func BlueString(format string, a ...interface{}) string { return colorString(format, FgBlue, a...) }", "func (t *TextGame) drawStr(x int, y int, str string, textColor tcell.Style) {\n\tsizeX, sizeY := t.screen.Size()\n\tif (x < 0) || (y < 0) || (y > sizeY) {\n\t\treturn\n\t}\n\tfor row := 0; row < len(str); row++ {\n\t\tscreenX := x + row\n\t\tif screenX > sizeX {\n\t\t\tbreak\n\t\t}\n\t\tt.screen.SetContent(screenX, y, rune(str[row]), nil, textColor)\n\t}\n}", "func (c Colour) String() string {\n\treturn fmt.Sprintf(\"R: %d G: %d B: %d L: %d (#%02X%02X%02X%02X)\", c.R, c.G, c.B, c.L, c.R, c.G, c.B, c.L)\n}", "func (self *TraitPixbuf) Fill(pixel uint32) {\n\tC.gdk_pixbuf_fill(self.CPointer, C.guint32(pixel))\n\treturn\n}", "func (image *Image) Fill(dimensions *Dimensions, dest string) {\n\tfmt.Println(\"[Fill] \", image.Filename, dimensions)\n\n\tcmd := exec.Command(\"convert\", image.Filename, \"-auto-orient\", \"-resize\", dimensions.String+\"^\", \"-gravity\", \"center\", \"-extent\", dimensions.String, \"-quality\", \"80\", \"-strip\", \"-depth\", \"8\", dest)\n\tfmt.Println(\"[Fill] execute:\", cmd.Args)\n\tout, err := cmd.CombinedOutput()\n\n\tif err != nil {\n\t\tfmt.Println(\"[Fill] error: \", string(out[:]), err)\n\t}\n}", "func (vr *vectorRenderer) Fill() {\n\tvr.drawPath(vr.s.SVGFill())\n}", "func fillString(retunString string, toLength int) string {\n\tfor {\n\t\tlengtString := len(retunString)\n\t\tif lengtString < toLength {\n\t\t\tretunString = retunString + \":\"\n\t\t\tcontinue\n\t\t}\n\t\tbreak\n\t}\n\treturn retunString\n}", "func fillString(retunString string, toLength int) string {\n\tfor {\n\t\tlengtString := len(retunString)\n\t\tif lengtString < toLength {\n\t\t\tretunString = retunString + \":\"\n\t\t\tcontinue\n\t\t}\n\t\tbreak\n\t}\n\treturn retunString\n}", "func fillString(retunString string, toLength int) string {\n\tfor {\n\t\tlengtString := len(retunString)\n\t\tif lengtString < toLength {\n\t\t\tretunString = retunString + \":\"\n\t\t\tcontinue\n\t\t}\n\t\tbreak\n\t}\n\treturn retunString\n}", "func (c *Color) Set(value string) error {\n\n\tif !strings.HasPrefix(value, \"#\") {\n\t\treturn errors.New(\"Colours must start with a #\")\n\t}\n\n\tchars := strings.Split(value, \"\")\n\n\tif len(chars) != 7 {\n\t\treturn errors.New(\"Colour string is not the right length\")\n\t}\n\n\trString := strings.Join(chars[1:3], \"\")\n\tgString := strings.Join(chars[3:5], \"\")\n\tbString := strings.Join(chars[5:7], \"\")\n\n\tr, err := strconv.ParseUint(rString, 16, 32)\n\tif err != nil {\n\t\treturn errors.New(\"Invalid hexadecimal number (red)\")\n\t}\n\n\tg, err := strconv.ParseUint(gString, 16, 32)\n\tif err != nil {\n\t\treturn errors.New(\"Invalid hexadecimal number (green)\")\n\t}\n\n\tb, err := strconv.ParseUint(bString, 16, 32)\n\tif err != nil {\n\t\treturn errors.New(\"Invalid hexadecimal number (blue)\")\n\t}\n\n\tc.R = uint8(r)\n\tc.G = uint8(g)\n\tc.B = uint8(b)\n\tc.A = 255\n\n\treturn nil\n}", "func (me TSVGColorType) String() string { return xsdt.String(me).String() }", "func PushColorText(col color.Color) {\n\timgui.PushStyleColor(imgui.StyleColorText, ToVec4Color(col))\n}", "func (c *Color) wrap(s string) string {\n\tif c.isNoColorSet() {\n\t\treturn s\n\t}\n\n\treturn c.format() + s + c.unformat()\n}", "func (c *Color) wrap(s string) string {\n\tif c.isNoColorSet() {\n\t\treturn s\n\t}\n\n\treturn c.format() + s + c.unformat()\n}", "func (p *P1D) Fill(x, y, w float64) {\n\tp.bng.fill(x, y, w)\n}", "func print_color(chunks []Chunk) {\n\tc := 1\n\tfor i := range chunks {\n\t\tpayload := chunks[i].payload\n\t\ttag := chunks[i].tag\n\t\tvar x int\n\t\tif tag == \"\" {\n\t\t\tx = 7 // white\n\t\t} else {\n\t\t\tx = c\n\t\t\tc++\n\t\t\tif c > 6 {\n\t\t\t\tc = 1\n\t\t\t}\n\t\t}\n\t\tcolor := \"\\x1b[3\" + strconv.Itoa(x) + \"m\"\n\t\tfmt.Printf(\"%s%s\", color, payload)\n\t}\n\tfmt.Println()\n}", "func fill(b *Board, t TileColor) {\n\tfor y := 0; y < BOARD_HEIGHT; y++ {\n\t\tfor x := 0; x < BOARD_WIDTH; x++ {\n\t\t\tb.SetTile(t, x, y)\n\t\t}\n\t}\n}", "func grey(str string) string {\n\treturn \"\\x1B[90m\" + str + \"\\033[0m\"\n}", "func applyColor(f, s string) string {\n\treturn f + s + log.ColorNone\n}", "func (me TxsdPresentationAttributesColorColorRendering) String() string {\n\treturn xsdt.String(me).String()\n}", "func color(s string, color string) string {\n\treturn \"\\033[\" + color + \"m\" + s + \"\\033[0m\"\n}", "func (pal *CGBPalette) String() string {\n\tout := \"\"\n\tfor i := 0; i < len(pal.palette); i += 2 {\n\t\tout += fmt.Sprintf(\"%02x%02x \", pal.palette[i+1], pal.palette[i])\n\t\tif (i + 2) % 8 == 0 {\n\t\t\tout += \"\\n\"\n\t\t}\n\t}\n\treturn out\n}", "func (me *TcolorType) Set(s string) { (*xsdt.HexBinary)(me).Set(s) }", "func (c *canvasRenderer) FillText(text string, position sprec.Vec2, typography Typography) {\n\tcurrentLayer := c.currentLayer\n\ttransformMatrix := currentLayer.Transform\n\tclipMatrix := currentLayer.ClipTransform\n\n\tfont := typography.Font\n\tfontSize := typography.Size\n\tcolor := uiColorToVec(typography.Color)\n\n\tvertexOffset := c.textMesh.Offset()\n\toffset := position\n\tlastGlyph := (*fontGlyph)(nil)\n\n\tfor _, ch := range text {\n\t\tlineHeight := font.lineHeight * fontSize\n\t\tlineAscent := font.lineAscent * fontSize\n\t\tif ch == '\\r' {\n\t\t\toffset.X = position.X\n\t\t\tlastGlyph = nil\n\t\t\tcontinue\n\t\t}\n\t\tif ch == '\\n' {\n\t\t\toffset.X = position.X\n\t\t\toffset.Y += lineHeight\n\t\t\tlastGlyph = nil\n\t\t\tcontinue\n\t\t}\n\n\t\tif glyph, ok := font.glyphs[ch]; ok {\n\t\t\tadvance := glyph.advance * fontSize\n\t\t\tleftBearing := glyph.leftBearing * fontSize\n\t\t\trightBearing := glyph.rightBearing * fontSize\n\t\t\tascent := glyph.ascent * fontSize\n\t\t\tdescent := glyph.descent * fontSize\n\n\t\t\tvertTopLeft := textVertex{\n\t\t\t\tposition: sprec.Vec2Sum(\n\t\t\t\t\tsprec.NewVec2(\n\t\t\t\t\t\tleftBearing,\n\t\t\t\t\t\tlineAscent-ascent,\n\t\t\t\t\t),\n\t\t\t\t\toffset,\n\t\t\t\t),\n\t\t\t\ttexCoord: sprec.NewVec2(glyph.leftU, glyph.topV),\n\t\t\t}\n\t\t\tvertTopRight := textVertex{\n\t\t\t\tposition: sprec.Vec2Sum(\n\t\t\t\t\tsprec.NewVec2(\n\t\t\t\t\t\tadvance-rightBearing,\n\t\t\t\t\t\tlineAscent-ascent,\n\t\t\t\t\t),\n\t\t\t\t\toffset,\n\t\t\t\t),\n\t\t\t\ttexCoord: sprec.NewVec2(glyph.rightU, glyph.topV),\n\t\t\t}\n\t\t\tvertBottomLeft := textVertex{\n\t\t\t\tposition: sprec.Vec2Sum(\n\t\t\t\t\tsprec.NewVec2(\n\t\t\t\t\t\tleftBearing,\n\t\t\t\t\t\tlineAscent+descent,\n\t\t\t\t\t),\n\t\t\t\t\toffset,\n\t\t\t\t),\n\t\t\t\ttexCoord: sprec.NewVec2(glyph.leftU, glyph.bottomV),\n\t\t\t}\n\t\t\tvertBottomRight := textVertex{\n\t\t\t\tposition: sprec.Vec2Sum(\n\t\t\t\t\tsprec.NewVec2(\n\t\t\t\t\t\tadvance-rightBearing,\n\t\t\t\t\t\tlineAscent+descent,\n\t\t\t\t\t),\n\t\t\t\t\toffset,\n\t\t\t\t),\n\t\t\t\ttexCoord: sprec.NewVec2(glyph.rightU, glyph.bottomV),\n\t\t\t}\n\n\t\t\tc.textMesh.Append(vertTopLeft)\n\t\t\tc.textMesh.Append(vertBottomLeft)\n\t\t\tc.textMesh.Append(vertBottomRight)\n\n\t\t\tc.textMesh.Append(vertTopLeft)\n\t\t\tc.textMesh.Append(vertBottomRight)\n\t\t\tc.textMesh.Append(vertTopRight)\n\n\t\t\toffset.X += advance\n\t\t\tif lastGlyph != nil {\n\t\t\t\toffset.X += lastGlyph.kerns[ch] * fontSize\n\t\t\t}\n\t\t\tlastGlyph = glyph\n\t\t}\n\t}\n\tvertexCount := c.textMesh.Offset() - vertexOffset\n\n\tif vertexCount == 0 {\n\t\treturn\n\t}\n\n\tc.commandQueue.BindPipeline(c.textPipeline)\n\tc.commandQueue.Uniform4f(c.textMaterial.colorLocation, color.Array())\n\tc.commandQueue.UniformMatrix4f(c.textMaterial.projectionMatrixLocation, c.projectionMatrix.ColumnMajorArray())\n\tc.commandQueue.UniformMatrix4f(c.textMaterial.transformMatrixLocation, transformMatrix.ColumnMajorArray())\n\tc.commandQueue.UniformMatrix4f(c.textMaterial.clipMatrixLocation, clipMatrix.ColumnMajorArray())\n\tc.commandQueue.TextureUnit(0, font.texture)\n\tc.commandQueue.Uniform1i(c.textMaterial.textureLocation, 0)\n\tc.commandQueue.Draw(vertexOffset, vertexCount, 1)\n}", "func RedString(format string, a ...interface{}) (s string) {\n\ts = colorString(FgRed, format, a...)\n\treturn\n}", "func White(format string, a ...interface{}) { colorPrint(FgWhite, format, a...) }", "func colorize(s interface{}, c int) string {\n\treturn fmt.Sprintf(\"\\x1b[%dm%v\\x1b[0m\", c, s)\n}", "func colorize(s interface{}, c int) string {\n\treturn fmt.Sprintf(\"\\x1b[%dm%v\\x1b[0m\", c, s)\n}", "func (dst *Image) StringBg(pt image.Point, src *Image, sp image.Point, f *Font, s string, bg *Image, bgp image.Point) image.Point {\n\tdst.Display.mu.Lock()\n\tdefer dst.Display.mu.Unlock()\n\treturn _string(dst, pt, src, sp, f, s, nil, nil, dst.Clipr, bg, bgp, SoverD)\n}", "func (c Color) String() string {\n\treturn string(c)\n}", "func (e *encoder) encodeString(s string, color []byte) {\n\tif color != nil {\n\t\tsetColor(e.w, color)\n\t}\n\te.w.WriteByte('\"')\n\tstart := 0\n\tfor i := 0; i < len(s); {\n\t\tif b := s[i]; b < utf8.RuneSelf {\n\t\t\tif ' ' <= b && b <= '~' && b != '\"' && b != '\\\\' {\n\t\t\t\ti++\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif start < i {\n\t\t\t\te.w.WriteString(s[start:i])\n\t\t\t}\n\t\t\tswitch b {\n\t\t\tcase '\"':\n\t\t\t\te.w.WriteString(`\\\"`)\n\t\t\tcase '\\\\':\n\t\t\t\te.w.WriteString(`\\\\`)\n\t\t\tcase '\\b':\n\t\t\t\te.w.WriteString(`\\b`)\n\t\t\tcase '\\f':\n\t\t\t\te.w.WriteString(`\\f`)\n\t\t\tcase '\\n':\n\t\t\t\te.w.WriteString(`\\n`)\n\t\t\tcase '\\r':\n\t\t\t\te.w.WriteString(`\\r`)\n\t\t\tcase '\\t':\n\t\t\t\te.w.WriteString(`\\t`)\n\t\t\tdefault:\n\t\t\t\tconst hex = \"0123456789abcdef\"\n\t\t\t\te.w.WriteString(`\\u00`)\n\t\t\t\te.w.WriteByte(hex[b>>4])\n\t\t\t\te.w.WriteByte(hex[b&0xF])\n\t\t\t}\n\t\t\ti++\n\t\t\tstart = i\n\t\t\tcontinue\n\t\t}\n\t\tc, size := utf8.DecodeRuneInString(s[i:])\n\t\tif c == utf8.RuneError && size == 1 {\n\t\t\tif start < i {\n\t\t\t\te.w.WriteString(s[start:i])\n\t\t\t}\n\t\t\te.w.WriteString(`\\ufffd`)\n\t\t\ti += size\n\t\t\tstart = i\n\t\t\tcontinue\n\t\t}\n\t\ti += size\n\t}\n\tif start < len(s) {\n\t\te.w.WriteString(s[start:])\n\t}\n\te.w.WriteByte('\"')\n\tif color != nil {\n\t\tsetColor(e.w, resetColor)\n\t}\n}", "func (pw *PixelWand) GetColorAsString() string {\n\tp := C.PixelGetColorAsString(pw.pw)\n\truntime.KeepAlive(pw)\n\tdefer relinquishMemory(unsafe.Pointer(p))\n\treturn C.GoString(p)\n}", "func (me TColorType) String() string { return xsdt.String(me).String() }", "func LogRed(str string) {\n\tfmt.Printf(\"\\x1b[31;2m%s\\x1b[0m\\n\", str)\n}", "func HiWhite(format string, a ...interface{}) { colorPrint(FgHiWhite, format, a...) }", "func Render(s string) string {\n\treturn searchColors(s, -1, false)\n}", "func (c *ColorFlag) String() string {\n\treturn fmt.Sprintf(\"0x%x\", *c)\n}", "func printColored(boundaries []colored_data, data []byte) []string {\n\tgrouping := 2\n\tper_line := 16\n\t// 16 bytes per line, grouped by 2 bytes\n\tnlines := len(data) / 16\n\tif len(data) % 16 > 0 {\n\t\tnlines++\n\t}\n\t\n\tout := make([]string, nlines)\n\n\tkolo := \"\\x1B[0m\"\n\t\n\tfor line := 0; line < nlines; line++ {\n\t\ts := \"\\t0x\"\n\t\txy := make([]byte, 2)\n\t\tline_offset := line * per_line\n\t\txy[0] = byte(line_offset >> 8)\n\t\txy[1] = byte(line_offset & 0xff)\n\t\ts = s + hex.EncodeToString(xy) + \":\\t\" + kolo\n\n\n\t\tline_length := per_line\n\t\tif line == nlines - 1 && len(data) % 16 > 0 {\n\t\t\tline_length = len(data) % 16\n\t\t}\n\n\t\tfor b := 0; b < line_length; b++ {\n\t\t\ttotal_offset := line * per_line + b\n\n\t\t\t// inserting coulourings\n\t\t\tfor x := 0; x < len(boundaries); x++ {\n\t\t\t\t//fmt.Println(\"!\")\n\t\t\t\tif(boundaries[x].offset == uint16(total_offset)) {\n\t\t\t\t\ts = s + boundaries[x].color\n\t\t\t\t\tkolo = boundaries[x].color\n\t\t\t\t}\n\t\t\t}\n\t\n\t\t\t// add byte from total_offset\n\t\t\txxx := make([]byte, 1)\n\t\t\txxx[0] = data[total_offset]\n\t\t\ts = s + hex.EncodeToString(xxx)\n\t\t\t\n\t\t\t// if b > 0 && b % grouping == 0, insert space\n\t\t\t\n\t\t\tif b > 0 && (b-1) % grouping == 0 {\n\t\t\t\ts = s + \" \"\n\t\t\t}\n\t\t\t\n\t\t}\n\t\t\n\t\tout[line] = s + COLOR_NORMAL\n\t}\n\t\n\treturn out\n}" ]
[ "0.63875264", "0.63263696", "0.6272358", "0.6254065", "0.60946006", "0.59574157", "0.5893883", "0.5891934", "0.58215666", "0.56947726", "0.5689455", "0.5674843", "0.56719476", "0.560667", "0.5572262", "0.5509258", "0.54924154", "0.5467268", "0.5448704", "0.5425666", "0.5407817", "0.5405666", "0.5401476", "0.53982097", "0.5384061", "0.53599954", "0.53578055", "0.5353276", "0.53410614", "0.5337911", "0.5334418", "0.5278036", "0.527168", "0.52605", "0.52409166", "0.5238847", "0.5238072", "0.5231165", "0.5223687", "0.5222994", "0.5221422", "0.51952904", "0.51923", "0.5185672", "0.51814854", "0.51813835", "0.5133353", "0.5121238", "0.5084179", "0.508171", "0.5066777", "0.50604206", "0.5054012", "0.50468117", "0.5044329", "0.5017148", "0.50060236", "0.5004818", "0.5002914", "0.49962002", "0.49885634", "0.49857092", "0.4975638", "0.49688384", "0.49429893", "0.49260798", "0.49040616", "0.48939565", "0.48896417", "0.48896417", "0.48896417", "0.48884264", "0.48767436", "0.48653367", "0.48592007", "0.48592007", "0.48494002", "0.48455638", "0.4837392", "0.4833235", "0.48329595", "0.48287413", "0.48271835", "0.48246652", "0.4824662", "0.48218578", "0.48144242", "0.48065603", "0.479697", "0.479697", "0.47952816", "0.4791414", "0.4789519", "0.47855768", "0.47795403", "0.47790504", "0.47692624", "0.47664773", "0.4761327", "0.4757505" ]
0.806296
0
SetColor writes a color to a specific pixel
func (ls *LightString) SetColor(position int, color uint8) error { if position > ls.Last { log.Errorf("SetColor: last valid pixel is #%v", ls.Last) return fmt.Errorf("SetColor: last valid pixel is #%v", ls.Last) } log.Debugf("Coloring pixel %v %v", position, color) ls.Pixels[position].Color = color return nil }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func setPixel(x, y int, c color, pixels []byte) {\n\tindex := (y*windowWidth + x) * 4\n\n\tif index < len(pixels)-4 && index >= 0 {\n\t\tpixels[index] = c.r\n\t\tpixels[index+1] = c.g\n\t\tpixels[index+1] = c.b\n\t}\n}", "func (writer *Writer) WriteSetColor(color uint16) {\n\twriter.write16(uint16(CmdSetColor))\n\twriter.write16(color)\n}", "func setPixel(x, y int, c color, pixels []byte) error {\n\tindex := (y*int(winWidth) + x) * 4\n\n\tif index > len(pixels) || index < 0 {\n\t\t// simple string-based error\n\t\treturn fmt.Errorf(\"the pixel index is not valid, index: %d\", index)\n\t}\n\n\tif index < len(pixels) && index >= 0 {\n\t\tpixels[index] = c.r\n\t\tpixels[index+1] = c.g\n\t\tpixels[index+2] = c.b\n\t}\n\n\treturn nil\n}", "func setPixel(x, y int, c color, pixels []byte) error {\n\tindex := (y*int(winWidth) + x) * 4\n\n\tif index > len(pixels) || index < 0 {\n\t\t// simple string-based error\n\t\treturn fmt.Errorf(\"the pixel index is not valid, index: %d\", index)\n\t}\n\n\tif index < len(pixels) && index >= 0 {\n\t\tpixels[index] = c.r\n\t\tpixels[index+1] = c.g\n\t\tpixels[index+2] = c.b\n\t}\n\n\treturn nil\n}", "func SetPixel(n, r, g, b uint8) {\n\tbuffer[n*3] = g\n\tbuffer[n*3+1] = r\n\tbuffer[n*3+2] = b\n}", "func (s *Screen) SetPixel(x int, y int, bitindex int, row int, ox int, oy int) {\n\n\tfmt.Printf(\"Inside draw (setpixel) X: %d Y: %d bitindex: %d, row: %d || original x: %d y: %d\\n\", x, y, bitindex, row, ox, oy)\n\tif !s.validPixelIndex(x, y) {\n\t\treturn\n\t}\n\n\ts.Pixels[y][(x*4)+0] = PIXEL_ON\n\ts.Pixels[y][(x*4)+1] = PIXEL_ON\n\ts.Pixels[y][(x*4)+2] = PIXEL_ON\n\ts.Pixels[y][(x*4)+3] = PIXEL_ON\n}", "func (t *Texture) Set(x uint16, y uint16, value rgb565.Rgb565Color) {\n\tt.pixels[y*t.width+x] = value\n}", "func (pw *PixelWand) SetColor(color string) bool {\n\tcscolor := C.CString(color)\n\tdefer C.free(unsafe.Pointer(cscolor))\n\tret := 1 == int(C.PixelSetColor(pw.pw, cscolor))\n\truntime.KeepAlive(pw)\n\treturn ret\n}", "func (fb *FrameBuffer) SetColorAt(x int, y int, c color.Color) {\n\tif x >= 0 && y >= 0 && x < fb.width && y < fb.height {\n\t\tfb.img.Set(x, y, c) // = c\n\t}\n}", "func (d *Dev) SetPixel(x, y int, active bool) {\n\toffset := (y / 8 * Width) + x\n\tpageAddress := y % 8\n\td.pixels[offset] &= ^(1 << byte(pageAddress))\n\td.pixels[offset] |= bTob(active) & 1 << byte(pageAddress)\n}", "func (d *Device) SetPixel(x int16, y int16, c color.RGBA) {\n\tif x < 0 || y < 0 ||\n\t\t(((d.rotation == drivers.Rotation0 || d.rotation == drivers.Rotation180) && (x >= d.width || y >= d.height)) ||\n\t\t\t((d.rotation == drivers.Rotation90 || d.rotation == drivers.Rotation270) && (x >= d.height || y >= d.width))) {\n\t\treturn\n\t}\n\td.FillRectangle(x, y, 1, 1, c)\n}", "func putPixel(screen []byte, color color, x int, y int) {\n\tscreenX := (windowWidth / 2) + x\n\tscreenY := (windowHeight / 2) - y - 1\n\tbase := (screenY*windowWidth + screenX) * 4\n\tscreen[base] = color.r\n\tscreen[base+1] = color.g\n\tscreen[base+2] = color.b\n\tscreen[base+3] = 0xFF\n\tscreen[0] = 0xFF\n}", "func (p *ProjectionPlane) SetPixel(row, col int, rawColor color.Color) {\n\tmappedColor := rawColor\n\n\tif p.ClampOutOfGamut {\n\t\tmappedColor.ClampToColor(p.ClampColor)\n\t} else {\n\t\tmappedColor.MaxToOne()\n\t}\n\n\tif p.Gamma != 1.0 {\n\t\tmappedColor = *mappedColor.Pow(1 / p.Gamma)\n\t}\n\n\tp.Image.Set(row, col, mappedColor.ToRGBA())\n}", "func (p *RGBAf) Set(x, y int, c color.Color) {\n\tif !(image.Point{x, y}.In(p.Rect)) {\n\t\treturn\n\t}\n\ti := p.PixOffset(x, y)\n\tc1 := color.RGBAModel.Convert(c).(color.RGBA)\n\ts := p.Pix[i : i+4 : i+4] // Small cap improves performance, see https://golang.org/issue/27857\n\ts[0] = float32(c1.R)\n\ts[1] = float32(c1.G)\n\ts[2] = float32(c1.B)\n\ts[3] = float32(c1.A)\n}", "func (uni *Uniform1fv) SetColor(pos int, color *math32.Color) {\n\n\tuni.v[pos] = color.R\n\tuni.v[pos+1] = color.G\n\tuni.v[pos+2] = color.B\n}", "func SetPixel(x, y int, c [4]byte, pixels *[]byte) {\n\tindex := (y* int(cfg.COLS*cfg.CELL_SIZE) + x) * 4\n\n\tif index < len(*pixels)-4 && index >= 0 {\n\t\t(*pixels)[index] = c[0]\n\t\t(*pixels)[index+1] = c[1]\n\t\t(*pixels)[index+2] = c[2]\t\n\t\t(*pixels)[index+3] = c[3]\t\n\t}\n}", "func (c *Canvas) SetColor(row, col int, color utils.Color) { //uint8\n\t//sadly a lot of terminals don't support true colors yet..\n\t//in the future this would be a better alternative:\n\t//c.formatting[row][col] = fmt.Sprintf(\"38;2;%d;%d;%d;1\", r, g, b) //+= ?\n\t//for now.. we use the basic(only) alternative\n\tnumber := 16 + 36*color.R + 6*color.G + color.B\n\tc.SetAnsi(row, col, fmt.Sprintf(\"38;5;%d\", number))\n}", "func (uni *Uniform3fv) SetColor(idx int, color *math32.Color) {\n\n\tpos := idx * 3\n\tuni.v[pos] = color.R\n\tuni.v[pos+1] = color.G\n\tuni.v[pos+2] = color.B\n}", "func (g *gfx) SetPixel(x, y int) {\n\tg[x][y] = COLOR_WHITE\n}", "func (rgbw *Rgbw) SetColor(r uint8, g uint8, b uint8, w uint8) {\n\trgbw[0] = r\n\trgbw[1] = g\n\trgbw[2] = b\n\trgbw[3] = w\n}", "func TestWritePixel(t *testing.T) {\n\t// Given\n\tc := canvas.New(10, 20)\n\tred := color.New(1.0, 0.0, 0.0)\n\n\t// When\n\tc.SetPixel(2, 3, red)\n\n\t// Then\n\tassert.True(t, c.Pixel(2, 3).Equal(red))\n}", "func (win *window) SetColor(p sparta.Property, c color.RGBA) {\n\tif (p != sparta.Background) && (p != sparta.Foreground) {\n\t\treturn\n\t}\n\ts := xwin.DefaultScreen()\n\tcode := getColorCode(c)\n\tpx, ok := pixelMap[code]\n\tif !ok {\n\t\tr, g, b, _ := c.RGBA()\n\t\tcl, _ := xwin.AllocColor(s.DefaultColormap, uint16(r), uint16(g), uint16(b))\n\t\tpx = cl.Pixel\n\t\tpixelMap[code] = px\n\t}\n\tif p == sparta.Foreground {\n\t\txwin.ChangeGC(win.gc, xgb.GCForeground, []uint32{px})\n\t} else {\n\t\txwin.ChangeGC(win.gc, xgb.GCBackground, []uint32{px})\n\t}\n}", "func (b *Bitmap) SetPixel(x, y int, pixel byte) error {\n\ti := b.GetByteIndex(x, y)\n\tif i > len(b.Data)-1 {\n\t\treturn ErrIndexOutOfRange\n\t}\n\to := b.GetBitOffset(x)\n\n\tshift := uint(7 - o)\n\tsrc := b.Data[i]\n\n\tresult := src | (pixel & 0x01 << shift)\n\tb.Data[i] = result\n\n\treturn nil\n}", "func (t Texture3D) SetPixel(p Point3D, d []byte, genMipmap bool) error {\n\treturn t.SetPixelArea(p.X, p.Y, p.Z, 1, 1, 1, d, genMipmap)\n}", "func (canvas *Canvas) SetColor(r, g, b float32) {\n\twriteCommand(canvas.contents, \"rg\", r, g, b)\n}", "func (w *Wrapper) Set(c Color) error {\n\treturn w.SetMask(c, 0xFF)\n}", "func (c *Canvas) WritePixel(x, y int, color color.Color) error {\n\terr := c.ValidateInCanvasBounds(x, y)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tc.Pixels[y][x] = color\n\treturn nil\n}", "func (p *PixelLine) Set(pos int, c color.Color) {\n\tc = p.model.Convert(c)\n\tparts := getParts(c)\n\tp.SetParts(pos, parts)\n}", "func (x *ImgSpanner) SetColor(c interface{}) {\n\tswitch c := c.(type) {\n\tcase color.Color:\n\t\tx.colorFunc = nil\n\t\tr, g, b, a := c.RGBA()\n\t\tif x.xpixel == true { // apparently r and b values swap in xgraphics.Image\n\t\t\tr, b = b, r\n\t\t}\n\t\tx.fgColor = color.RGBA{\n\t\t\tR: uint8(r >> 8),\n\t\t\tG: uint8(g >> 8),\n\t\t\tB: uint8(b >> 8),\n\t\t\tA: uint8(a >> 8)}\n\tcase rasterx.ColorFunc:\n\t\tx.colorFunc = c\n\t}\n}", "func (lp *Point) SetColor(color *math32.Color) {\n\n\tlp.color = *color\n\tlp.udata.color = lp.color\n\tlp.udata.color.MultiplyScalar(lp.intensity)\n}", "func (image *Image2D) SetRGB(x, y int, r, g, b uint8) {\n\tidx := image.getIdx(x, y)\n\timage.data[idx] = r\n\timage.data[idx+1] = g\n\timage.data[idx+2] = b\n}", "func (u *User) SetColor(asset, hexcode string) error {\n\tpath := fmt.Sprintf(\"users/%d/colors/%s\", u.ID, asset)\n\tif hexcode[0] == '#' {\n\t\thexcode = hexcode[1:]\n\t}\n\n\tresp, err := put(u.client, path, params{\"hexcode\": {hexcode}})\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn resp.Body.Close()\n}", "func (pw *PixelWand) SetRed(red float64) {\n\tC.PixelSetRed(pw.pw, C.double(red))\n\truntime.KeepAlive(pw)\n}", "func (f Flag) SetColor(color Color) error {\n\t_, err := f.oe.Write([]byte{0x00, 0x00})\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t_, err = f.oe.Write([]byte{0x00, byte(color)})\n\treturn err\n}", "func (c *Canvas) Set(cr *Cursor) error {\n\tif cr.X >= canvasWidth || cr.Y >= canvasHeight {\n\t\treturn fmt.Errorf(`(%d, %d) is out of the Canvas size`, cr.X, cr.Y)\n\t}\n\n\t(*c)[cr.Y][cr.X] = cr.Color\n\n\treturn nil\n}", "func (y *Yeelight) SetRGB(value, effect, duration string) string {\n\tcmd := `{\"id\":3,\"method\":\"set_rgb\",\"params\":[` + value + `,\"` + effect + `\",` + duration + `]}`\n\treturn y.request(cmd)\n}", "func (pw *PixelWand) SetYellow(yellow float64) {\n\tC.PixelSetYellow(pw.pw, C.double(yellow))\n\truntime.KeepAlive(pw)\n}", "func (p *RGBAf) SetRGB(x, y int, c *Vector3) {\n\tif !(image.Point{x, y}.In(p.Rect)) {\n\t\treturn\n\t}\n\ti := p.PixOffset(x, y)\n\ts := p.Pix[i : i+4 : i+4] // Small cap improves performance, see https://golang.org/issue/27857\n\ts[0] = Clamp(c.e[0]*255.999, 0.0, 255.0)\n\ts[1] = Clamp(c.e[1]*255.999, 0.0, 255.0)\n\ts[2] = Clamp(c.e[2]*255.999, 0.0, 255.0)\n\ts[3] = 255.0\n}", "func (pw *PixelWand) SetColorFromWand(pixelWand *PixelWand) {\n\tC.PixelSetColorFromWand(pw.pw, pixelWand.pw)\n\truntime.KeepAlive(pw)\n}", "func (v *Bitmap256) Set(pos uint8) {\n\tv[pos>>6] |= 1 << (pos & 63)\n}", "func (b Buffer) Set(x, y int, c Cell) {\n\tb.CellMap[image.Pt(x, y)] = c\n}", "func (pix *Pixel) Set(keyName string, val int) Pixel {\n\tswitch keyName {\n\tcase \"R\":\n\t\tpix.R = val\n\tcase \"G\":\n\t\tpix.G = val\n\tcase \"B\":\n\t\tpix.B = val\n\tcase \"A\":\n\t\tpix.A = val\n\t}\n\treturn *pix\n}", "func (s *Sprite) SetColor(c sdl.Color) {\n\ts.Tex.SetColorMod(c.R, c.G, c.B)\n}", "func (dw *DrawingWand) Color(x, y float64, pm PaintMethod) {\n\tC.MagickDrawColor(dw.dw, C.double(x), C.double(y), C.PaintMethod(pm))\n}", "func (c *ColorFlag) Set(v string) error {\n\tcol, err := strconv.ParseInt(v, 0, 64)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif col < ColorLimit {\n\t\t*c = ColorFlag(col)\n\t\treturn nil\n\t}\n\n\treturn errors.New(\"Not a color in RGB palette\")\n}", "func (pw *PixelWand) SetMagickColor(color *MagickPixelPacket) {\n\tC.PixelSetMagickColor(pw.pw, color.mpp)\n}", "func (spriteBatch *SpriteBatch) SetColor(vals ...float32) {\n\tspriteBatch.color = vals\n}", "func (c *Color) Set() *Color {\n\tif c.isNoColorSet() {\n\t\treturn c\n\t}\n\tfmt.Fprintf(Output, c.format())\n\n\treturn c\n}", "func SetColor(mode bool) {\n\tColorize = mode\n}", "func (pw *PixelWand) SetBlue(blue float64) {\n\tC.PixelSetBlue(pw.pw, C.double(blue))\n\truntime.KeepAlive(pw)\n}", "func SetColorTemp(temp int) {\n\t// An attempt to fix https://github.com/d4l3k/go-sct/issues/9\n\tif runtime.GOOS == \"windows\" {\n\t\tsetColorTemp(temp + 1)\n\t}\n\tsetColorTemp(temp)\n}", "func (p *Ball) SetColor(color *objects.Vector) { p.color = color }", "func (m *Mat2f) Set(row, col int, v float32) {\n\tm[col*2+row] = v\n}", "func (c *Color) SetInt(r, g, b, a int) {\n\tc.SetUInt32(uint32(r), uint32(g), uint32(b), uint32(a))\n}", "func (b *Buffer) DrawPixel(x, y int, c color.Color) {\n\tb.context.SetColor(c)\n\tb.context.SetPixel(x, y)\n}", "func (pw *PixelWand) SetMagenta(magenta float64) {\n\tC.PixelSetMagenta(pw.pw, C.double(magenta))\n\truntime.KeepAlive(pw)\n}", "func (pic *Picture) SetColor(color color.Color) {\n\tpic.currentColor = color\n}", "func (pw *PixelWand) SetColorCount(count uint) {\n\tC.PixelSetColorCount(pw.pw, C.size_t(count))\n\truntime.KeepAlive(pw)\n}", "func (me *TcolorType) Set(s string) { (*xsdt.HexBinary)(me).Set(s) }", "func (g *Gopher) SetColor(c Color) {\n\tg.mu.Lock()\n\tdefer g.mu.Unlock()\n\tg.color = c\n}", "func (c *Color) Set(value string) error {\n\n\tif !strings.HasPrefix(value, \"#\") {\n\t\treturn errors.New(\"Colours must start with a #\")\n\t}\n\n\tchars := strings.Split(value, \"\")\n\n\tif len(chars) != 7 {\n\t\treturn errors.New(\"Colour string is not the right length\")\n\t}\n\n\trString := strings.Join(chars[1:3], \"\")\n\tgString := strings.Join(chars[3:5], \"\")\n\tbString := strings.Join(chars[5:7], \"\")\n\n\tr, err := strconv.ParseUint(rString, 16, 32)\n\tif err != nil {\n\t\treturn errors.New(\"Invalid hexadecimal number (red)\")\n\t}\n\n\tg, err := strconv.ParseUint(gString, 16, 32)\n\tif err != nil {\n\t\treturn errors.New(\"Invalid hexadecimal number (green)\")\n\t}\n\n\tb, err := strconv.ParseUint(bString, 16, 32)\n\tif err != nil {\n\t\treturn errors.New(\"Invalid hexadecimal number (blue)\")\n\t}\n\n\tc.R = uint8(r)\n\tc.G = uint8(g)\n\tc.B = uint8(b)\n\tc.A = 255\n\n\treturn nil\n}", "func Set(p ...Attribute) (c *Color) {\n\tc = New(p...)\n\tc.Set()\n\n\treturn\n}", "func (pw *PixelWand) SetGreen(green float64) {\n\tC.PixelSetGreen(pw.pw, C.double(green))\n\truntime.KeepAlive(pw)\n}", "func (pw *PixelWand) SetQuantumColor(color *PixelPacket) {\n\tC.PixelSetQuantumColor(pw.pw, color.pp)\n\truntime.KeepAlive(pw)\n}", "func (b *Board) Set(p Position, val byte) {\n\tb[p.row][p.col] = val\n}", "func (im FancyImage) SetDrawFun(pixelDraw func(int, int) RGBA) {\r\n\tim.PixelDraw = pixelDraw\r\n\t//im.Draw()\r\n\t/*\r\n\tfor y:=0; y < im.Y; y++ {\r\n\t\tfor x:= 0; x < im.X; x++ {\r\n\t\t\tv := pixelDraw(x, y)\t\t\t\t\t//pixel value: RGBA\r\n\t\t\ti := y*im.Stride + 4*x \t\t\t\t//pixel cell offset.\r\n\t\t\tim.Pixels[i] = v.R\r\n\t\t\tim.Pixels[i+1] = v.G\r\n\t\t\tim.Pixels[i+2] = v.B\r\n\t\t\tim.Pixels[i+3] = v.A\r\n\t\t}\r\n\t}\r\n\t*/\r\n}", "func (pw *PixelWand) SetBlack(black float64) {\n\tC.PixelSetBlack(pw.pw, C.double(black))\n\truntime.KeepAlive(pw)\n}", "func (self *Graphics) SetTintA(member int) {\n self.Object.Set(\"tint\", member)\n}", "func (ctl *Controller) SetColour(position int, colour Colour) {\n\tif position >= ctl.count || position < 0 {\n\t\t// Do nothing - out of bounds\n\t\treturn\n\t}\n\n\tctl.ledColours[position] = colour\n\n\tctl.updateBuffer(position, colour)\n}", "func (s UserSet) SetColor(value int64) {\n\ts.RecordCollection.Set(models.NewFieldName(\"Color\", \"color\"), value)\n}", "func (m *WorkbookRangeBorder) SetColor(value *string)() {\n err := m.GetBackingStore().Set(\"color\", value)\n if err != nil {\n panic(err)\n }\n}", "func (l *listPlot) Set(x, y int, c color.Color) {\n\tl.pts = append(l.pts, image.Pt(x, y))\n}", "func (pal *CGBPalette) write(value byte) {\n\tpal.palette[pal.index] = value\n\tif pal.inc {\n\t\tpal.index = (pal.index + 1) & 0x3F\n\t}\n}", "func (c *Canvas) SetPixels(pixels []uint8) {\n\tc.gf.Dirty()\n\n\tmainthread.Call(func() {\n\t\ttex := c.Texture()\n\t\ttex.Begin()\n\t\ttex.SetPixels(0, 0, tex.Width(), tex.Height(), pixels)\n\t\ttex.End()\n\t})\n}", "func (me *TxsdPresentationAttributesColorColorRendering) Set(s string) { (*xsdt.String)(me).Set(s) }", "func (win *window) Pixel(pt image.Point) {\n\txwin.PolyPoint(xgb.CoordModeOrigin, win.id, win.gc, []xgb.Point{xgb.Point{int16(pt.X), int16(pt.Y)}})\n}", "func (me *TxsdPresentationAttributesColorColorInterpolation) Set(s string) { (*xsdt.String)(me).Set(s) }", "func (f *CytoGrid) Set(x, y int, b bool) {\n\tf.S[y][x] = b\n}", "func (image *Image2D) SetA(x, y int, a uint8) {\n\tidx := image.getIdx(x, y)\n\timage.data[idx+3] = a\n}", "func (m *Milight) Color(color byte) error {\n\tcmd := []byte{0x31, 0x00, 0x00, 0x00, 0x01, color, color, color, color}\n\treturn m.sendCommand(cmd)\n}", "func (f *FenceComponent) SetColor(color api.IPalette) {\n\tgr := f.bottom.(*custom.LineNode)\n\tgr.SetColor(color)\n\tgr = f.right.(*custom.LineNode)\n\tgr.SetColor(color)\n\tgr = f.top.(*custom.LineNode)\n\tgr.SetColor(color)\n\tgr = f.left.(*custom.LineNode)\n\tgr.SetColor(color)\n}", "func (t *TextRenderer) SetColor(red float32, green float32, blue float32, alpha float32) {\n\tt.shader.Use().SetVec4f(\"textColor\", mgl32.Vec4{red, green, blue, alpha})\n}", "func (f *Font) SetColor(red float32, green float32, blue float32, alpha float32) {\n\tf.color.r = red\n\tf.color.g = green\n\tf.color.b = blue\n\tf.color.a = alpha\n}", "func (hw *HighlightedWriter) SetColor(fg, bg prompt.Color, bold bool) {\n\t// We use unreasonable color settings to \"flag\" when prompt is about to write input text\n\tif fg == prompt.Red && bg == prompt.Red {\n\t\thw.writingInput = true\n\t\thw.delegate.SetColor(prompt.DefaultColor, prompt.DefaultColor, bold)\n\t} else {\n\t\thw.writingInput = false\n\t\thw.delegate.SetColor(fg, bg, bold)\n\t}\n}", "func (s *Surface) SetPixelData(data []color.RGBA, area geo.Rect) {\n\tx, y, w, h := math.Floor(area.X), math.Floor(area.Y), math.Floor(area.W), math.Floor(area.H)\n\timgData := s.Ctx.Call(\"getImageData\", x, y, w, h)\n\tpxData := imgData.Get(\"data\")\n\tfor i := 0; i < len(data); i++ {\n\t\tpxData.SetIndex(i*4, data[i].R)\n\t\tpxData.SetIndex(i*4+1, data[i].G)\n\t\tpxData.SetIndex(i*4+2, data[i].B)\n\t\tpxData.SetIndex(i*4+3, data[i].A)\n\t}\n\ts.Ctx.Call(\"putImageData\", imgData, x, y)\n}", "func (g *gfx) ClearPixel(x, y int) {\n\tg[x][y] = COLOR_BLACK\n}", "func (se *SimpleElement) SetColor(value color.Color) {\n\tse.value = formatColor(value)\n}", "func (me *TColorType) Set(s string) { (*xsdt.String)(me).Set(s) }", "func (w *Window) Setxy(x, y int, r, g, b byte) {\n\tif x < 0 || x > w.width || y < 0 || y > w.height {\n\t\treturn\n\t}\n\ti := y*3*w.width + x*3\n\tw.tex[i] = r\n\tw.tex[i+1] = g\n\tw.tex[i+2] = b\n}", "func (m *BitPrecMat) Set(x, y int, to uint32) {\n\tbitOfs := (x + y*m.w) * m.bits\n\tdataI, maskI := bitOfs/32, bitOfs%32\n\tto64 := uint64(to) << (64 - uint64(m.bits) - uint64(maskI))\n\tmask64 := bitMasks[m.bits][maskI]\n\toldData64 := uint64(m.data[dataI])<<32 | uint64(m.data[dataI+1])\n\tnewData64 := (oldData64 & ^mask64) | to64\n\tm.data[dataI] = uint32(newData64 & 0xFFFFFFFF00000000 >> 32)\n\tm.data[dataI+1] = uint32(newData64 & 0xFFFFFFFF)\n}", "func (t Texture3D) SetPixelArea(x, y, z, w, h, depth int32, d []byte, genMipmap bool) error {\n\tif x < 0 || y < 0 || z < 0 || x >= t.width || y >= t.height || z >= t.depth {\n\t\treturn fmt.Errorf(\"SetPixelArea(%v %v %v %v %v %v): %w\", x, y, z, w, h, depth, ErrCoordOutOfRange)\n\t}\n\tgl.PixelStorei(gl.UNPACK_ALIGNMENT, t.alignment)\n\tgl.TextureSubImage3D(t.id, 0, x, y, z, w, h, depth, t.format, gl.UNSIGNED_BYTE, unsafe.Pointer(&d[0]))\n\tif genMipmap {\n\t\tt.Bind()\n\t\tgl.GenerateMipmap(gl.TEXTURE_3D)\n\t\tt.Unbind()\n\t}\n\treturn nil\n}", "func (w *VT100Writer) SetColor(fg, bg Color, bold bool) {\n\tif bold {\n\t\tw.SetDisplayAttributes(fg, bg, DisplayBold)\n\t} else {\n\t\t// If using `DisplayDefualt`, it will be broken in some environment.\n\t\t// Details are https://github.com/c-bata/go-prompt/pull/85\n\t\tw.SetDisplayAttributes(fg, bg, DisplayReset)\n\t}\n}", "func (pw *PixelWand) SetOpacity(opacity float64) {\n\tC.PixelSetOpacity(pw.pw, C.double(opacity))\n\truntime.KeepAlive(pw)\n}", "func (fb FrameBuffer) ColorAt(x int, y int) color.Color {\n\tc := fb.img.At(x, y)\n\treturn c\n}", "func (obj *Device) SetPixelShader(shader *PixelShader) Error {\n\tret, _, _ := syscall.Syscall(\n\t\tobj.vtbl.SetPixelShader,\n\t\t2,\n\t\tuintptr(unsafe.Pointer(obj)),\n\t\tuintptr(unsafe.Pointer(shader)),\n\t\t0,\n\t)\n\treturn toErr(ret)\n}", "func (rgbw *Rgbw) SetRed(r uint8) {\n\trgbw[0] = r\n}", "func (c *Canvas) SetColorMask(col color.Color) {\n\trgba := pixel.Alpha(1)\n\tif col != nil {\n\t\trgba = pixel.ToRGBA(col)\n\t}\n\tc.col = mgl32.Vec4{\n\t\tfloat32(rgba.R),\n\t\tfloat32(rgba.G),\n\t\tfloat32(rgba.B),\n\t\tfloat32(rgba.A),\n\t}\n}", "func (a *PixelSubArray) set(x, y int) {\n\txByte := x/8 - a.xStartByte\n\txBit := uint(x % 8)\n\tyRow := y - a.yStart\n\n\tif yRow > len(a.bytes) {\n\t\tfmt.Println(\"Y OOB:\", len(a.bytes), yRow)\n\t}\n\n\tif xByte > len(a.bytes[0]) {\n\t\tfmt.Println(\"X OOB:\", len(a.bytes[0]), xByte)\n\t}\n\n\ta.bytes[yRow][xByte] |= (1 << xBit)\n}", "func (frame *Frame) SetColor(color string) {\n\tif color == \"\" {\n\t\treturn\n\t}\n\tframe.color = color\n\tprefix := frame.ctx.Prefix() + Fmt(\"{{. | \"+color+\"}} \", \"┃\")\n\tframe.nestedCtx.Logger = log.New(frame.ctx.Writer(), prefix, 0)\n}", "func (c *Chunk) Set(x, y, z uint8) {\n\tc.Solid[x][y] |= 1 << z\n}" ]
[ "0.73720556", "0.72960764", "0.7205484", "0.7205484", "0.71994334", "0.70884395", "0.6987065", "0.69633406", "0.6919301", "0.6915945", "0.68813854", "0.6852106", "0.68471354", "0.6827616", "0.67762893", "0.6771381", "0.6748354", "0.67284906", "0.6727163", "0.66611207", "0.66167384", "0.6595591", "0.65286773", "0.6526936", "0.6473515", "0.64620584", "0.6335969", "0.6313319", "0.6303317", "0.62799466", "0.627932", "0.61997485", "0.61726", "0.6157475", "0.6099866", "0.6082694", "0.6069188", "0.604937", "0.60175", "0.600043", "0.59939927", "0.5992341", "0.5990861", "0.5956493", "0.59480137", "0.5903049", "0.58884627", "0.5835899", "0.5813126", "0.5795813", "0.578147", "0.57799894", "0.5779302", "0.577023", "0.5769785", "0.5768613", "0.5765059", "0.5762007", "0.5760854", "0.57473737", "0.57406586", "0.56745607", "0.56550324", "0.5625263", "0.5621199", "0.561767", "0.5612824", "0.56098294", "0.5604483", "0.56024766", "0.56021965", "0.55797493", "0.5574533", "0.55623966", "0.5540532", "0.5530291", "0.5522748", "0.55066484", "0.55017734", "0.544698", "0.5441602", "0.5434755", "0.54233575", "0.5414673", "0.5407814", "0.5405916", "0.5404772", "0.5395175", "0.5392094", "0.53868836", "0.5373914", "0.53717345", "0.5362709", "0.53611124", "0.5343993", "0.5327785", "0.5323134", "0.5313699", "0.53134286", "0.53072083" ]
0.6281695
29
Render writes the string to hardware
func (ls *LightString) Render() { log.Debug("Rendering string") }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (e *rawData) Render(w io.Writer) (int, error) { return w.Write([]byte(e.content)) }", "func (t *textRender) Render(w io.Writer) (err error) {\n\tif len(t.Values) > 0 {\n\t\t_, err = fmt.Fprintf(w, t.Format, t.Values...)\n\t} else {\n\t\t_, err = io.WriteString(w, t.Format)\n\t}\n\treturn\n}", "func (g *Game) Render() string {\n\tascii := \"\"\n\n\tm := g.generateScreen()\n\tfor _, row := range m.cells {\n\t\tascii += strings.Join(row, \"\") + \"\\n\"\n\t}\n\n\treturn ascii\n}", "func (d *Device) Render() error {\n\tbuf := new(bytes.Buffer)\n\n\tfor _, chain := range d.LEDs {\n\t\tfor _, col := range chain {\n\t\t\tbuf.Write([]byte{col.R, col.G, col.B})\n\t\t}\n\t}\n\n\t_, err := Conn.WriteToUDP(buf.Bytes(), d.Addr)\n\treturn err\n}", "func (ctx *Context) Render(bytes []byte) {\n\t//debug\n\t//fmt.Println(\"response msg = \", string(bytes))\n\tctx.Writer.WriteHeader(200)\n\t_, err := ctx.Writer.Write(bytes)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n}", "func (rf RendererFunc) Render(w io.Writer, v interface{}) error { return rf(w, v) }", "func (c *Ctx) RenderString(code int, s string) {\n\tc.W.WriteHeader(code)\n\tc.W.Write([]byte(s))\n}", "func (s *Source) Render() ([]byte, error) {\n\tvar ret bytes.Buffer\n\tfmt.Fprintf(&ret, \"<source>\")\n\tfmt.Fprintf(&ret, \"\\n @type forward\")\n\tfmt.Fprintf(&ret, \"\\n port %d\", s.port)\n\tfmt.Fprintf(&ret, \"\\n bind 0.0.0.0\")\n\tfmt.Fprintf(&ret, \"\\n</source>\")\n\n\treturn ret.Bytes(), nil\n}", "func (p *pattern) RenderString(d map[string]interface{}) string {\n\t// TODO strings.Builder\n\tbuf := p.bufPool.Get().(*bytes.Buffer)\n\tdefer func() {\n\t\tbuf.Reset()\n\t\tp.bufPool.Put(buf)\n\t}()\n\tfor _, f := range p.funcs {\n\t\tbuf.WriteString(f(d))\n\t}\n\n\treturn buf.String()\n}", "func (c *Controller) RenderString() (string, error) {\n\tb, e := c.RenderBytes()\n\tif e != nil {\n\t\treturn \"\", e\n\t}\n\treturn string(b), e\n}", "func (v Binary) Render(i, width int, baseStyle lipgloss.Style) string {\n\tw := dataWidth(width)\n\t_, err := v.Seek(int64(i*w), io.SeekStart)\n\tif err != nil {\n\t\treturn baseStyle.Blink(true).Render(err.Error())\n\t}\n\tif len(v.b) != w {\n\t\tv.b = make([]byte, w)\n\t}\n\tn, err := v.Read(v.b)\n\tif err != nil && !errors.Is(err, io.EOF) {\n\t\treturn baseStyle.Blink(true).Render(err.Error())\n\t}\n\ts := fmt.Sprintf(\"% X%s \", v.b[0:n], strings.Repeat(\" \", w-n))\n\tvar x strings.Builder\n\tfor i := 0; i < n; i++ {\n\t\tif unicode.IsPrint(rune(v.b[i])) {\n\t\t\tx.WriteRune(rune(v.b[i]))\n\t\t} else {\n\t\t\tx.WriteRune('.')\n\t\t}\n\t}\n\treturn baseStyle.Render(s + x.String())\n}", "func (this *AsciiRender) Render(asciiStr string) (string, error) {\n\treturn this.render(asciiStr, NewRenderOptions())\n}", "func (msg *Message) Render() string {\n\treturn msg.RenderBuffer().String()\n}", "func (c *Ctx) Render(code int, b []byte) {\n\tc.W.WriteHeader(code)\n\tc.W.Write(b)\n}", "func render(fset *token.FileSet, x interface{}) string {\n\tvar buf bytes.Buffer\n\tif err := printer.Fprint(&buf, fset, x); err != nil {\n\t\tpanic(err)\n\t}\n\treturn buf.String()\n}", "func render(fset *token.FileSet, x interface{}) string {\n\tvar buf bytes.Buffer\n\tif err := printer.Fprint(&buf, fset, x); err != nil {\n\t\tpanic(err)\n\t}\n\treturn buf.String()\n}", "func Render(rawBytes []byte, urlPrefix string, metas map[string]string, isWiki bool) []byte {\n\thtmlWriter := org.NewHTMLWriter()\n\n\trenderer := &Renderer{\n\t\tHTMLWriter: htmlWriter,\n\t\tURLPrefix: urlPrefix,\n\t\tIsWiki: isWiki,\n\t}\n\n\thtmlWriter.ExtendingWriter = renderer\n\n\tres, err := org.New().Silent().Parse(bytes.NewReader(rawBytes), \"\").Write(renderer)\n\tif err != nil {\n\t\tlog.Error(\"Panic in orgmode.Render: %v Just returning the rawBytes\", err)\n\t\treturn rawBytes\n\t}\n\treturn []byte(res)\n}", "func Render(colorCode int, fontSize int, content string) string {\n\treturn \"\\033[\" + strconv.Itoa(fontSize) + \";\" + strconv.Itoa(colorCode) + \"m\" + content + reset\n}", "func RenderString(s string) {\n\tq, err := qrcode.New(s, qrcode.Medium)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tfmt.Println(q.ToSmallString(false))\n}", "func (r *render) RenderString(e *entry) string {\n\tr.mu.Lock()\n\tr.buf = r.buf[0:0]\n\trenderEntry(&r.buf, e)\n\tstr := string(r.buf)\n\tr.mu.Unlock()\n\treturn str\n}", "func (c *Controller) Render() error {\n\tif !c.EnableRender {\n\t\treturn nil\n\t}\n\trb, err := c.RenderBytes()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif c.Ctx.ResponseWriter.Header().Get(\"Content-Type\") == \"\" {\n\t\tc.Ctx.Output.Header(\"Content-Type\", \"text/html; charset=utf-8\")\n\t}\n\n\treturn c.Ctx.Output.Body(rb)\n}", "func Render(w io.Writer, template string, data interface{}) error {\n\tif err := renderer.HTML(w, 0, template, data); err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}", "func (e *Engine) String(s string) Renderer {\n\treturn String(s)\n}", "func (t *Table) Render() string {\n\tfmt.Fprintln(t.w, \"-\")\n\tt.w.Flush()\n\n\treturn t.buf.String()\n}", "func ExampleRender() {\n\tconst s = `\n\tFirst Line\n\tSecond Line\n\tThird Line\n\tHello\n\tThis is go-music`\n\n\tfmt.Println(RenderText(s, Spring))\n\tfmt.Println(RenderText(s, Autumn))\n\tfmt.Println(RenderText(s, Winter))\n\tfmt.Println(RenderText(s, Rose))\n\tfmt.Println(RenderText(s, Valentine))\n}", "func (t *tScreen) writeString(s string) {\r\n\tif t.buffering {\r\n\t\tio.WriteString(&t.buf, s)\r\n\t} else {\r\n\t\tio.WriteString(t.c, s)\r\n\t}\r\n}", "func (r ErrorRenderer) Render(_ io.Writer, _ interface{}) error { return r.Error }", "func String(s string) Renderer {\n\treturn stringRenderer{\n\t\tEngine: New(Options{}),\n\t\tbody: s,\n\t}\n}", "func (s *Server) Render(w io.Writer, name string, data interface{}, c echo.Context) error {\n\treturn s.Templates.ExecuteTemplate(w, name, data)\n}", "func (s *Server) Render(w ResponseWriter, r *http.Request) {\n\tif w.Written() {\n\t\treturn\n\t}\n\tif err := s.renderTemplate(w, r); err != nil {\n\t\tw.Header().Set(\"Content-Type\", \"text/plain; charset=utf-8\")\n\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\tfmt.Fprintf(w, \"Error executing template: %s\\n\", err)\n\t}\n}", "func RenderString(code string, str string) string {\n\tif len(code) == 0 || str == \"\" {\n\t\treturn str\n\t}\n\n\t// disabled OR not support color\n\tif !Enable || !isSupportColor {\n\t\treturn ClearCode(str)\n\t}\n\n\treturn fmt.Sprintf(FullColorTpl, code, str)\n}", "func (tc *TermColor) RenderTo(w io.Writer, str string) {\n\tw.Write(types.UnsafeBytes(tc.Render(str)))\n}", "func (c *Car) Render(out chan<- string) {\n defer close(out) // Always close the channel!\n\n var vs string\n if vs = os.Getenv(\"BULLETTRAIN_CAR_VIRTUALENV_SYMBOL_ICON\"); vs == \"\" {\n vs = virtualenvSymbolIcon\n }\n\n var vsp string\n if vsp = os.Getenv(\"BULLETTRAIN_CAR_VIRTUALENV_SYMBOL_PAINT\"); vsp == \"\" {\n vsp = virtualenvSymbolPaint\n }\n\n var s string\n if s = os.Getenv(\"BULLETTRAIN_CAR_VIRTUALENV_TEMPLATE\"); s == \"\" {\n s = carTemplate\n }\n\n funcMap := template.FuncMap{\n // Pipeline functions for colouring.\n \"c\": func(t string) string { return ansi.Color(t, c.GetPaint()) },\n \"cs\": func(t string) string { return ansi.Color(t, vsp) },\n }\n\n tpl := template.Must(template.New(\"python\").Funcs(funcMap).Parse(s))\n data := struct {\n VenvIcon string\n Venv string\n }{\n VenvIcon: virtualenvSymbolIcon,\n Venv: path.Base(os.Getenv(\"VIRTUAL_ENV\")),\n }\n fromTpl := new(bytes.Buffer)\n err := tpl.Execute(fromTpl, data)\n if err != nil {\n log.Fatalf(\"Can't generate the python template: %s\", err.Error())\n }\n\n out <- fromTpl.String()\n}", "func renderToTerminal(output string) {\n\tswitch render_interface {\n\tcase unix:\n\t\tfmt.Println(\"\\033[2J\")\n\t\tfmt.Println(output)\n\tcase playground:\n\t\tfmt.Printf(\"\\x0c %s\", output)\n\t}\n\ttime.Sleep(delay_time * time.Millisecond)\n}", "func RenderString(rawContent string, urlPrefix string, metas map[string]string, isWiki bool) string {\n\treturn string(Render([]byte(rawContent), urlPrefix, metas, isWiki))\n}", "func (d *Display) Write(data string) error {\n\t_, err := d.port.Write([]byte(data))\n\treturn err\n}", "func Render(str string) string {\n\t// not contains emoji name.\n\tif strings.IndexByte(str, ':') == -1 {\n\t\treturn str\n\t}\n\n\treturn codeMatch.ReplaceAllStringFunc(str, func(name string) string {\n\t\treturn GetByName(name) // + \" \"\n\t})\n}", "func (m *Manager) Render(name string, w io.Writer, data interface{}) error {\n\tif w == nil {\n\t\treturn ErrWriterRequired\n\t}\n\tm.m.RLock()\n\tv, ok := m.views[name]\n\tm.m.RUnlock()\n\tif !ok {\n\t\treturn fmt.Errorf(\"view: View \\\"%s\\\" doesn't exist\", name)\n\t}\n\tb := m.buffers.Get()\n\t// trade-off:\n\t// when Render causes a panic, the buffer will not be reused\n\t// but the runtime overhead of defer is avoided\n\terr := v.Execute(b, data)\n\tif err != nil {\n\t\tm.buffers.Put(b)\n\t\treturn err\n\t}\n\t_, err = b.WriteTo(w)\n\tm.buffers.Put(b)\n\treturn err\n}", "func (Empty) Render(width, height int) *term.Buffer {\n\treturn term.NewBufferBuilder(width).Buffer()\n}", "func (e *Element) Render() string {\n\treturn strings.Join(e.renderConnections(), \"\\n\")\n}", "func (c *ConnectionSPI) Render(pixels []RGBPixel) {\r\n\tlogFields := log.Fields{\"package\": logPkg, \"conn\": \"SPI\", \"func\": \"RenderLEDs\"}\r\n\tlog.WithFields(logFields).Infof(\"Render %d LEDs\", len(pixels))\r\n\t// Fix for Raspberry Pi 3 Model B+ (5.15.84-v7+)\r\n\t// Data signal seems to be splitted sending less than 11 LEDS\r\n\tif len(pixels) < 11 {\r\n\t\tpixels = append(pixels, []RGBPixel{{}, {}, {}, {}, {}, {}, {}, {}, {}, {}}...)\r\n\t}\r\n\tvar translatedRGBs []uint8\r\n\tfor _, pixel := range pixels {\r\n\r\n\t\tcolorData := GetColorData(pixel, c.FixSPI)\r\n\t\tlog.WithFields(logFields).Tracef(\"%08b\", pixel)\r\n\r\n\t\tfor _, c := range colorData {\r\n\t\t\ttranslatedRGBs = append(translatedRGBs, c)\r\n\t\t}\r\n\t}\r\n\r\n\tc.transfer(translatedRGBs)\r\n}", "func (s Style) Render(raw string) string {\n\tt := NewAnstring(raw)\n\tfor i := len(s) - 1; i >= 0; i-- {\n\t\tt.meld(s[i])\n\t}\n\treturn string(t)\n}", "func (r *Ris) Render() {\n\tr.render()\n}", "func Renderer() func(string) {\n\tvar prev string\n\n\treturn func(curr string) {\n\t\t// clear lines\n\t\tif prev != \"\" {\n\t\t\tfor range lines(prev) {\n\t\t\t\tMoveUp(1)\n\t\t\t\tClearLine()\n\t\t\t}\n\t\t}\n\n\t\t// print lines\n\t\tif curr != \"\" {\n\t\t\tfor _, s := range lines(curr) {\n\t\t\t\tfmt.Printf(\"%s\\n\", s)\n\t\t\t}\n\t\t}\n\n\t\tprev = curr\n\t}\n}", "func Render(s api.Session, renderAs RenderName, value dgo.Value, out io.Writer) {\n\t// Convert value to rich data format without references\n\tdedupStream := func(value dgo.Value, consumer streamer.Consumer) {\n\t\topts := streamer.DefaultOptions()\n\t\topts.DedupLevel = streamer.NoDedup\n\t\tser := streamer.New(s.AliasMap(), opts)\n\t\tser.Stream(value, consumer)\n\t}\n\n\tswitch renderAs {\n\tcase JSON:\n\t\tif value.Equals(vf.Nil) {\n\t\t\tutil.WriteString(out, \"null\\n\")\n\t\t} else {\n\t\t\tdedupStream(value, streamer.JSON(out))\n\t\t\tutil.WriteByte(out, '\\n')\n\t\t}\n\n\tcase YAML:\n\t\tif value.Equals(vf.Nil) {\n\t\t\tutil.WriteString(out, \"\\n\")\n\t\t} else {\n\t\t\tdc := streamer.DataCollector()\n\t\t\tdedupStream(value, dc)\n\t\t\tbs, err := yaml.Marshal(dc.Value())\n\t\t\tif err != nil {\n\t\t\t\tpanic(err)\n\t\t\t}\n\t\t\tutil.WriteString(out, string(bs))\n\t\t}\n\tcase Binary:\n\t\tbi := vf.New(typ.Binary, value).(dgo.Binary)\n\t\t_, err := out.Write(bi.GoBytes())\n\t\tif err != nil {\n\t\t\tpanic(err)\n\t\t}\n\tcase Text:\n\t\tutil.Fprintln(out, value)\n\tdefault:\n\t\tpanic(fmt.Errorf(`unknown rendering '%s'`, renderAs))\n\t}\n}", "func (f Frame) Render() string {\n\tsb := strings.Builder{}\n\tfor _, row := range f {\n\t\tfor _, shade := range row {\n\t\t\tsb.WriteString(fmt.Sprintf(\"%d\", shade))\n\t\t}\n\t\tsb.WriteString(\"\\n\")\n\t}\n\tsb.WriteString(\"==============================\\n\")\n\n\treturn sb.String()\n}", "func (c *Context) Render(content string) {\n\t// Write response\n\tc.Response.Write([]byte(content))\n}", "func (s Resource) Render(geometry Geometry) {\n\tcgeometry := geometry.c()\n\tdefer C.free(unsafe.Pointer(cgeometry))\n\tC.wlc_surface_render(C.wlc_resource(s), cgeometry)\n}", "func (t *TemplateRenderable) Render(_ ...string) string {\n\treturn string(t.cache.Bytes())\n}", "func (t *Renderer) Render(\n\tw io.Writer,\n\tname string,\n\tdata interface{},\n\tc echo.Context,\n) error {\n\tif t.debug {\n\t\tt.ReloadTemplates()\n\t}\n\n\treturn t.template.ExecuteTemplate(w, name, data)\n}", "func (r *renderer) write(s string, unescaped bool) {\n\tif r.indentNext {\n\t\tr.indentNext = false\n\t\tr.w.WriteString(r.indent)\n\t}\n\tif !unescaped {\n\t\ts = html.EscapeString(s)\n\t}\n\tr.w.WriteString(s)\n}", "func RenderString(str string, w io.Writer, ctx ...interface{}) error {\n\tt, err := ParseString(str)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn t.Render(w, ctx...)\n}", "func (vhs *VHS) Render() error {\n\t// Apply Loop Offset by modifying frame sequence\n\tif err := vhs.ApplyLoopOffset(); err != nil {\n\t\treturn err\n\t}\n\n\t// Generate the video(s) with the frames.\n\tvar cmds []*exec.Cmd\n\tcmds = append(cmds, MakeGIF(vhs.Options.Video))\n\tcmds = append(cmds, MakeMP4(vhs.Options.Video))\n\tcmds = append(cmds, MakeWebM(vhs.Options.Video))\n\tcmds = append(cmds, MakeScreenshots(vhs.Options.Screenshot)...)\n\n\tfor _, cmd := range cmds {\n\t\tif cmd == nil {\n\t\t\tcontinue\n\t\t}\n\t\tout, err := cmd.CombinedOutput()\n\t\tif err != nil {\n\t\t\tlog.Println(string(out))\n\t\t}\n\t}\n\n\treturn nil\n}", "func (s *SegmentRoot) Render() []byte {\n\tvar b bytes.Buffer\n\tb.Write(SetBackground(SegmentRootBackground))\n\tfmt.Fprint(&b, \" \")\n\tb.Write(s.Data)\n\tfmt.Fprint(&b, \" \")\n\treturn b.Bytes()\n}", "func render(m diag.Message, colorize bool) string {\n\treturn fmt.Sprintf(\"%s%v%s [%v]%s %s\",\n\t\tcolorPrefix(m, colorize), m.Type.Level(), colorSuffix(colorize),\n\t\tm.Type.Code(), m.Origin(), fmt.Sprintf(m.Type.Template(), m.Parameters...),\n\t)\n}", "func (Parser) Render(rawBytes []byte, urlPrefix string, metas map[string]string, isWiki bool) []byte {\n\treturn Render(rawBytes, urlPrefix, metas, isWiki)\n}", "func (c *cartridge) Render() {\n\tc.Cls()\n\tc.PrintAt(fmt.Sprintf(\"counter:%d\", c.counter), c.x, c.y)\n}", "func (msg *Message) RenderBuffer() *bytes.Buffer {\n\tbuffer := bufpool.New()\n\n\tif msg.Sender != EMPTY {\n\t\tbuffer.WriteString(COLON)\n\t\tbuffer.WriteString(msg.Sender)\n\t\tbuffer.WriteString(SPACE)\n\t}\n\n\tif msg.Code > 0 {\n\t\tbuffer.WriteString(fmt.Sprintf(PADNUM, msg.Code))\n\t} else if msg.Command != EMPTY {\n\t\tbuffer.WriteString(msg.Command)\n\t}\n\n\tif len(msg.Params) > 0 {\n\t\tif len(msg.Params) > 14 {\n\t\t\tmsg.Params = msg.Params[0:15]\n\t\t}\n\n\t\tbuffer.WriteString(SPACE)\n\t\tbuffer.WriteString(strings.Join(msg.Params, SPACE))\n\t}\n\n\tif msg.Text != EMPTY {\n\t\tbuffer.WriteString(SPACE)\n\t\tbuffer.WriteString(COLON)\n\t\tbuffer.WriteString(msg.Text)\n\t}\n\n\tbuffer.WriteString(CRLF)\n\n\treturn buffer\n}", "func (d *Display) Write(txt string) *Display {\n\tfor _, char := range txt {\n\t\tswitch {\n\t\tcase char >= 32 && char <= 126:\n\t\t\td.sendData(uint8(char)) // code 32 to 126 from font table matchs exactly ASCII\n\t\tcase char == 216:\n\t\t\td.sendData(174) // Ø\n\t\tcase char == 224:\n\t\t\td.sendData(133) // à\n\t\tcase char == 226:\n\t\t\td.sendData(131) // â\n\t\tcase char == 228:\n\t\t\td.sendData(132) // ä\n\t\tcase char == 232:\n\t\t\td.sendData(138) // è\n\t\tcase char == 233:\n\t\t\td.sendData(130) // é\n\t\tcase char == 234:\n\t\t\td.sendData(136) // ê\n\t\tcase char == 235:\n\t\t\td.sendData(137) // ë\n\t\tcase char == 238:\n\t\t\td.sendData(140) // î\n\t\tcase char == 239:\n\t\t\td.sendData(139) // ï\n\t\tcase char == 241:\n\t\t\td.sendData(155) // ñ\n\t\tcase char == 244:\n\t\t\td.sendData(148) // ô\n\t\tcase char == 246:\n\t\t\td.sendData(149) // ö\n\t\tcase char == 248:\n\t\t\td.sendData(175) // ø\n\t\tcase char == 249:\n\t\t\td.sendData(151) // ù\n\t\tcase char == 251:\n\t\t\td.sendData(150) // û\n\t\tcase char == 252:\n\t\t\td.sendData(129) // ü\n\t\tcase char == 255:\n\t\t\td.sendData(152) // ÿ\n\t\tcase char == 231:\n\t\t\td.sendData(135) // ç\n\t\tdefault:\n\t\t\td.sendData(159) // ¿\n\t\t}\n\t}\n\treturn d\n}", "func renderContent(w http.ResponseWriter, r *http.Request, path string, content []byte) {\n guessContent(w, path)\n w.Write(content)\n}", "func RenderString(text string, codec StringerCodec) string {\n\tRender(text, codec)\n\treturn codec.String()\n}", "func (f *binaryRender) Render(w io.Writer) error {\n\tif f.Reader != nil {\n\t\tdefer ess.CloseQuietly(f.Reader)\n\t\t_, err := io.Copy(w, f.Reader)\n\t\treturn err\n\t}\n\n\tfile, err := os.Open(f.Path)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer ess.CloseQuietly(file)\n\n\tfi, err := file.Stat()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif fi.IsDir() {\n\t\treturn fmt.Errorf(\"'%s' is a directory\", f.Path)\n\t}\n\n\t_, err = io.Copy(w, file)\n\treturn err\n}", "func Render(s string) string {\n\treturn searchColors(s, -1, false)\n}", "func renderHTMLPage(w http.ResponseWriter, r *http.Request, content []byte) {\n w.Header().Set(\"Content-Type\", \"text/html\")\n w.Write(content)\n}", "func (t *TemplateRenderable) String() string {\n\treturn t.Render()\n}", "func Render(w http.ResponseWriter, r *http.Request, v Renderer) error {\n\tif err := renderer(w, r, v); err != nil {\n\t\treturn err\n\t}\n\tRespond(w, r, v)\n\treturn nil\n}", "func (c *Context) Render(status int, name string, data interface{}) (err error) {\n\tif c.router.Renderer == nil {\n\t\treturn errors.New(\"renderer not registered\")\n\t}\n\n\tvar buf = new(bytes.Buffer)\n\tif err = c.router.Renderer.Render(buf, name, data, c); err != nil {\n\t\treturn err\n\t}\n\n\tc.HTML(status, buf.Bytes())\n\treturn\n}", "func Render(source string, context *Context) (string, error) {\n\n\twriter := &strings.Builder{}\n\n\tparsed, err := template.New(\"\").Funcs(context.Functions()).Parse(source)\n\tif err != nil {\n\t\treturn \"\", errors.Wrap(err, \"error parsing template\")\n\t}\n\n\terr = parsed.Execute(writer, nil)\n\tif err != nil {\n\t\treturn \"\", errors.Wrap(err, \"error executing template\")\n\t}\n\n\treturn writer.String(), nil\n}", "func (s funcRenderer) Render(w io.Writer, data Data) error {\n\treturn s.renderFunc(w, data)\n}", "func render(template_content string, data TemplateData) string {\n temp, err := template.New(\"html\").Parse(template_content)\n if err!=nil {\n os.Stderr.WriteString(err.Error() + \"\\n\")\n } else {\n buf := new(strings.Builder)\n err = temp.Execute(buf, data)\n if err!=nil {\n os.Stderr.WriteString(err.Error() + \"\\n\")\n }\n output := buf.String()\n return output\n }\n return \"\"\n}", "func (gm *GraphicsManager) Render(compsToSend *common.Vector) {\n\thandlerIndex := 0\n\tdefer gm.handleClosedGraphicsHandler(handlerIndex)\n\n\t//common.LogInfo.Println(compsToSend)\n\tfor handlerIndex = range gm.graphicsHandlersLink {\n\t\tif gm.graphicsHandlersLink[handlerIndex] == nil {\n\t\t\tcontinue\n\t\t}\n\t\tgm.graphicsHandlersLink[handlerIndex] <- compsToSend\n\t}\n}", "func (renderer *SimpleMatrixRenderer) renderCharacter(character string) {\n\trenderer.renderCharacterWithColor(character, black)\n}", "func (e *Engine) Render(arg string) (string, error) {\n\n\tif len(e.fmap) == 0 {\n\t\te.initFuncMap()\n\t}\n\n\t_, err := e.template.Parse(arg)\n\tif err != nil {\n\t\treturn \"\", errors.Wrapf(err, renderErr)\n\t}\n\n\tvar tpl bytes.Buffer\n\terr = e.template.Execute(&tpl, \"\")\n\tif err != nil {\n\t\treturn \"\", errors.Wrapf(err, renderErr)\n\t}\n\n\treturn tpl.String(), nil\n}", "func (x *xmlRender) Render(w io.Writer) error {\n\tif _, err := w.Write(xmlHeaderBytes); err != nil {\n\t\treturn err\n\t}\n\treturn xml.NewEncoder(w).Encode(x.Data)\n}", "func (t *Table) Render() string {\n\tt.t.Render()\n\treturn t.output.String()\n}", "func (h *HAProxyManager) render(ports []uint16) ([]byte, error) {\n\n\t// prepare the context\n\td := make([]templateContext, len(ports))\n\tfor i, port := range ports {\n\t\tif i == len(h.serviceAddrs) {\n\t\t\th.logger.Warnf(\"got port index %d, but only have %d service addrs. ports=%v serviceAddrs=%v\", i, len(h.serviceAddrs), ports, h.serviceAddrs)\n\t\t\tcontinue\n\t\t}\n\t\td[i] = templateContext{Port: port, Source: h.listenAddr, Dest: h.serviceAddrs[i]}\n\t}\n\n\t// render the template\n\tbuf := &bytes.Buffer{}\n\tif err := h.template.Execute(buf, d); err != nil {\n\t\treturn nil, err\n\t}\n\treturn buf.Bytes(), nil\n}", "func (r *Renderer) Render(path string) error {\n\toutPath := outPath(path, r.OutDir, r.BaseDir)\n\n\tif err := os.MkdirAll(filepath.Dir(outPath), os.ModeDir); err != nil {\n\t\treturn errors.Wrapf(err, \"failed to create %s\", filepath.Dir(outPath))\n\t}\n\n\tdata, err := ioutil.ReadFile(path)\n\tif err != nil {\n\t\treturn errors.Wrapf(err, \"failed to read %s\", path)\n\t}\n\n\tmarkdowned := blackfriday.MarkdownCommon(data)\n\n\t// we need document reader to modify markdowned html text, for example,\n\t// syntax highlight.\n\tdoc, err := goquery.NewDocumentFromReader(bytes.NewReader(markdowned))\n\tif err != nil {\n\t\treturn errors.Wrapf(err, \"failed to parse markdown contents of %s\", path)\n\t}\n\tr.highlightCode(doc)\n\tr.handleImage(doc, filepath.Dir(path))\n\n\tcontent, _ := doc.Html()\n\tcontent = strings.Replace(content, \"<html><head></head><body>\", \"\", 1)\n\tcontent = strings.Replace(content, \"</body></html>\", \"\", 1)\n\n\toutput := r.Template\n\toutput = strings.Replace(output, \"{{{style}}}\", r.Style, -1)\n\toutput = strings.Replace(output, \"{{{content}}}\", content, -1)\n\n\terr = ioutil.WriteFile(outPath, []byte(output), os.ModeAppend)\n\tif err != nil {\n\t\treturn errors.Wrapf(err, \"failed to write %s\", outPath)\n\t}\n\n\treturn nil\n}", "func Render(w http.ResponseWriter, actions ...*Action) error {\n\tbuf := new(bytes.Buffer)\n\tfor _, a := range actions {\n\t\tif err := a.appendTo(buf); err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif a != nil {\n\t\t\tbuf.WriteByte('\\n')\n\t\t}\n\t}\n\tw.Header().Set(\"Content-Type\", ContentType+\"; charset=utf-8\")\n\tw.Header().Set(\"Content-Length\", strconv.Itoa(buf.Len()))\n\tio.Copy(w, buf) // ignore errors, since we already wrote\n\treturn nil\n}", "func (s *kabuta) writeToFrontend(str string) {\n\tn, err := os.Stdout.WriteString(str)\n\tif err != nil {\n\t\ts.log(\"Error sending %s: %v\\n\", str, err)\n\t\tpanic(err)\n\t}\n\ts.log(\"SENT>%s\", str)\n}", "func Renderer() func(string) {\n\tvar prev string\n\treturn func(curr string) {\n\t\tlines := linesChanged(curr, prev)\n\t\tfor _, line := range lines {\n\t\t\tMoveDown(1)\n\t\t\tif line != \"\" {\n\t\t\t\tClearLineEnd()\n\t\t\t\tfmt.Printf(\"%s\", line)\n\t\t\t}\n\t\t}\n\t\tMoveUp(len(lines))\n\t\tprev = curr\n\t}\n}", "func (r renderer) Entity(out *bytes.Buffer, entity []byte) {\n\tout.Write(entity)\n}", "func (t *Text) Render() *Text {\n\tt.RenderComponent.Drawable = common.Text{\n\t\tFont: t.Font,\n\t\tText: t.Text,\n\t}\n\n\tt.SpaceComponent = common.SpaceComponent{\n\t\tPosition: engo.Point{X: t.X, Y: t.Y},\n\t\tWidth: float32(t.Font.Size) * float32(len(t.Text)),\n\t\tHeight: float32(t.Font.Size),\n\t}\n\n\treturn t\n}", "func (c *stackComponent) Render(out io.Writer) (numLines int, err error) {\n\tc.mu.Lock()\n\tdefer c.mu.Unlock()\n\n\treturn renderComponents(out, c.resources)\n}", "func (r *R) Render(w http.ResponseWriter) {\n\tWriteJSON(w, r.Code, r)\n}", "func (m Msg) Render(t *Theme) string {\n\t// TODO: Render based on theme\n\t// TODO: Cache based on theme\n\treturn m.String()\n}", "func RenderText(w http.ResponseWriter, text string, code int) error {\n\tw.Header().Add(\"Content-Type\", \"text/plain\")\n\tw.WriteHeader(code)\n\tw.Write([]byte(text))\n\treturn nil\n}", "func TestSparkling_Render(t *testing.T) {\n\tvar buf bytes.Buffer\n\tsp := New(&buf)\n\tsp.AddSeries([]float64{0, 30, 55, 80, 33, 150}, \"Awesome\")\n\tsp.Render()\n\n\twant := \"Awesome ▁▂▃▄▂█\\n\"\n\n\ts := buf.String()\n\tgot := s[len(s)-len(want):]\n\n\tif got != want {\n\t\tt.Errorf(\"sparkling.Render() = %s, want: %s\", got, want)\n\t}\n}", "func (s RenderingEngine) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (h Hand) Render() string {\n\tbuffer := bytes.Buffer{}\n\tlast := len(h.Cards) - 1\n\tfor i, card := range h.Cards {\n\t\tbuffer.WriteString(card.Render())\n\t\tif i != last {\n\t\t\tbuffer.WriteString(\", \")\n\t\t}\n\t}\n\tbuffer.WriteString(fmt.Sprintf(\n\t\t\" (%s)\", scoresRenderer{h.Scores()}.Render(),\n\t))\n\treturn strings.Trim(buffer.String(), \" \")\n}", "func (l *LogItem) Render(c int, showTime bool) []byte {\n\tbb := make([]byte, 0, 200)\n\tif showTime {\n\t\tt := l.Timestamp\n\t\tfor i := len(t); i < 30; i++ {\n\t\t\tt += \" \"\n\t\t}\n\t\tbb = append(bb, color.ANSIColorize(t, 106)...)\n\t\tbb = append(bb, ' ')\n\t}\n\n\tif l.Pod != \"\" {\n\t\tbb = append(bb, color.ANSIColorize(l.Pod, c)...)\n\t\tbb = append(bb, ':')\n\t}\n\tif !l.SingleContainer && l.Container != \"\" {\n\t\tbb = append(bb, color.ANSIColorize(l.Container, c)...)\n\t\tbb = append(bb, ' ')\n\t}\n\n\treturn append(bb, escPattern.ReplaceAll(l.Bytes, matcher)...)\n}", "func (s *Spinner) Render() error {\n\tif len(s.frames) == 0 {\n\t\treturn errors.New(\"no frames available to to render\")\n\t}\n\n\ts.step = s.step % len(s.frames)\n\tpreviousLen := len(s.previousFrame)\n\ts.previousFrame = fmt.Sprintf(\"%s%s\", s.separator, s.frames[s.step])\n\tnewLen := len(s.previousFrame)\n\n\t// We need to clean the previous message\n\tif previousLen > newLen {\n\t\tr := previousLen - newLen\n\t\tsuffix := strings.Repeat(\" \", r)\n\t\ts.previousFrame = s.previousFrame + suffix\n\t}\n\n\tfmt.Fprint(s.Writer, s.previousFrame)\n\ts.step++\n\treturn nil\n}", "func (r *Resp3) RenderString() string {\n\treturn r.renderString(\"\")\n}", "func (ctx *Context) Render(name string, data interface{}) {\n\tctx.zion.config.TemplateEngine.Render(name, data, ctx.writer)\n}", "func (sh *ShaderStd) PostRender() error { return nil }", "func (renderer *SimpleMatrixRenderer) Render() {\n\trenderer.renderCharacter(\"\\n\")\n\n\tfor row := 0; row < renderer.Matrix.Height; row++ {\n\t\tfor col := 0; col < renderer.Matrix.Width; col++ {\n\t\t\tif !renderer.Matrix.IsFieldOccupied(row, col) {\n\t\t\t\trenderer.renderUnoccupiedField()\n\t\t\t} else {\n\t\t\t\trenderer.renderOccupiedFieldAtCurrentCursorPos(row, col)\n\t\t\t}\n\t\t}\n\n\t\trenderer.renderCharacter(\"\\n\")\n\t}\n\n\trenderer.renderCharacter(\"\\n\")\n}", "func (r Renderer) renderUnitLine() {\n\tfmt.Print(unitLine)\n}", "func Render(c Compo) {\n\tdriver.CallOnUIGoroutine(func() {\n\t\tdriver.Render(c)\n\t})\n}", "func (script Script) RenderHTML() {\n\tsymbols := []string{}\n\ttemplateHTML := `\n<!DOCTYPE html>\n<html>\n <head>\n <title>Writing System</title>\n <style type=\"text/css\">\n body, html { font-size: 28px; }\n div.container { display: flex; flex-wrap: wrap; width: 1600px; margin: 1rem auto; }\n div.cell { width: 100px; height: 100px; margin: 1rem; text-align: center; font-weight: 700; }\n div.cell > img { display: block; }\n </style>\n </head>\n <body>\n\t\t<div class=\"container\">\n\t\t\t{{range $index, $element := .}}\n <div class=\"cell\">\n <img src=\"{{ $element }}.png\">\n <p>{{ $element }}</p>\n </div>\n {{end}}\n </div>\n </body>\n</html>\n`\n\n\twriter, err := os.Create(\"./output/index.html\")\n\tif err != nil {\n\t\tfmt.Println(err)\n\t\treturn\n\t}\n\n\tt, err := template.New(\"htmlIndex\").Parse(templateHTML)\n\tif err != nil {\n\t\tfmt.Println(err)\n\t\treturn\n\t}\n\n\tfor _, g := range script.Glyphs {\n\t\tsymbols = append(symbols, g.Representation)\n\t}\n\n\terr = t.Execute(writer, symbols)\n\tif err != nil {\n\t\tfmt.Println(err)\n\t\treturn\n\t}\n\n\tdefer writer.Close()\n}", "func (t tag) Render() string {\n return t.render()\n}", "func drawASCII(text string) {\n\tio.Copy(os.Stdout, strings.NewReader(text))\n}" ]
[ "0.6693423", "0.6568186", "0.6555938", "0.65232486", "0.64735943", "0.64108616", "0.630587", "0.6246599", "0.62406623", "0.6227451", "0.6213972", "0.6186643", "0.61188525", "0.60934913", "0.6093434", "0.6093434", "0.60800576", "0.6035777", "0.60135376", "0.59639907", "0.594344", "0.59230626", "0.59126574", "0.5892823", "0.5886948", "0.5871706", "0.5871549", "0.5858069", "0.58326966", "0.5821848", "0.5784359", "0.5758371", "0.57485455", "0.57466924", "0.57434684", "0.5736127", "0.5716315", "0.57130957", "0.5700571", "0.5696914", "0.5691903", "0.5688303", "0.5653744", "0.5652929", "0.56474733", "0.5631945", "0.5630876", "0.5628251", "0.56183505", "0.5615405", "0.5605704", "0.5602482", "0.5596257", "0.55932313", "0.5593113", "0.55880934", "0.5583287", "0.55811566", "0.55705094", "0.5563811", "0.55511653", "0.5549953", "0.55460674", "0.5541668", "0.5536788", "0.5536362", "0.5535174", "0.5529004", "0.55253786", "0.5520211", "0.5500864", "0.54905146", "0.54851204", "0.5481983", "0.5474647", "0.5472251", "0.54668325", "0.5465142", "0.546494", "0.5454822", "0.54540235", "0.5442321", "0.5442027", "0.5441063", "0.5440113", "0.54298496", "0.5424584", "0.54228956", "0.54160225", "0.54120094", "0.5408285", "0.5406042", "0.54056144", "0.54037565", "0.5398557", "0.5397377", "0.53897464", "0.53863853", "0.5386261", "0.5385907" ]
0.67597234
0
NewDirectRequestSpec initializes a new DirectRequestSpec from a job.DirectRequestSpec
func NewDirectRequestSpec(spec *job.DirectRequestSpec) *DirectRequestSpec { return &DirectRequestSpec{ ContractAddress: spec.ContractAddress, MinIncomingConfirmations: spec.MinIncomingConfirmations, MinIncomingConfirmationsEnv: spec.MinIncomingConfirmationsEnv, MinContractPayment: spec.MinContractPayment, Requesters: spec.Requesters, // This is hardcoded to runlog. When we support other initiators, we need // to change this Initiator: "runlog", CreatedAt: spec.CreatedAt, UpdatedAt: spec.UpdatedAt, EVMChainID: spec.EVMChainID, } }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func NewDirectRequestSpec(spec *job.DirectRequestSpec) *DirectRequestSpec {\n\treturn &DirectRequestSpec{\n\t\tContractAddress: spec.ContractAddress,\n\t\tOnChainJobSpecID: spec.OnChainJobSpecID.String(),\n\t\t// This is hardcoded to runlog. When we support other intiators, we need\n\t\t// to change this\n\t\tInitiator: \"runlog\",\n\t\tCreatedAt: spec.CreatedAt,\n\t\tUpdatedAt: spec.UpdatedAt,\n\t}\n}", "func (c *InputService17ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request {\n\treq := c.NewRequest(op, params, data)\n\n\treturn req\n}", "func (c *InputService11ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request {\n\treq := c.NewRequest(op, params, data)\n\n\treturn req\n}", "func (c *InputService14ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request {\n\treq := c.NewRequest(op, params, data)\n\n\treturn req\n}", "func (c *InputService18ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request {\n\treq := c.NewRequest(op, params, data)\n\n\treturn req\n}", "func (c *InputService16ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request {\n\treq := c.NewRequest(op, params, data)\n\n\treturn req\n}", "func (c *InputService8ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request {\n\treq := c.NewRequest(op, params, data)\n\n\treturn req\n}", "func (c *InputService8ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request {\n\treq := c.NewRequest(op, params, data)\n\n\treturn req\n}", "func (c *InputService9ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request {\n\treq := c.NewRequest(op, params, data)\n\n\treturn req\n}", "func (c *InputService9ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request {\n\treq := c.NewRequest(op, params, data)\n\n\treturn req\n}", "func (c *InputService4ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request {\n\treq := c.NewRequest(op, params, data)\n\n\treturn req\n}", "func (c *InputService4ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request {\n\treq := c.NewRequest(op, params, data)\n\n\treturn req\n}", "func (c *InputService2ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request {\n\treq := c.NewRequest(op, params, data)\n\n\treturn req\n}", "func (c *InputService2ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request {\n\treq := c.NewRequest(op, params, data)\n\n\treturn req\n}", "func (c *InputService22ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request {\n\treq := c.NewRequest(op, params, data)\n\n\treturn req\n}", "func (c *InputService1ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request {\n\treq := c.NewRequest(op, params, data)\n\n\treturn req\n}", "func (c *InputService1ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request {\n\treq := c.NewRequest(op, params, data)\n\n\treturn req\n}", "func (c *InputService12ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request {\n\treq := c.NewRequest(op, params, data)\n\n\treturn req\n}", "func (c *InputService11ProtocolTest) newRequest(op *aws.Operation, params, data interface{}) *aws.Request {\n\treq := c.NewRequest(op, params, data)\n\n\treturn req\n}", "func (c *InputService9ProtocolTest) newRequest(op *aws.Operation, params, data interface{}) *aws.Request {\n\treq := c.NewRequest(op, params, data)\n\n\treturn req\n}", "func (c *InputService10ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request {\n\treq := c.NewRequest(op, params, data)\n\n\treturn req\n}", "func (c *InputService8ProtocolTest) newRequest(op *aws.Operation, params, data interface{}) *aws.Request {\n\treq := c.NewRequest(op, params, data)\n\n\treturn req\n}", "func (c *InputService20ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request {\n\treq := c.NewRequest(op, params, data)\n\n\treturn req\n}", "func (c *InputService19ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request {\n\treq := c.NewRequest(op, params, data)\n\n\treturn req\n}", "func (c *InputService1ProtocolTest) newRequest(op *aws.Operation, params, data interface{}) *aws.Request {\n\treq := c.NewRequest(op, params, data)\n\n\treturn req\n}", "func (s Benchmark_send_Params) NewReq() (Message, error) {\n\tss, err := NewMessage(s.Struct.Segment())\n\tif err != nil {\n\t\treturn Message{}, err\n\t}\n\terr = s.Struct.SetPtr(0, ss.Struct.ToPtr())\n\treturn ss, err\n}", "func (c *InputService2ProtocolTest) newRequest(op *aws.Operation, params, data interface{}) *aws.Request {\n\treq := c.NewRequest(op, params, data)\n\n\treturn req\n}", "func (c *InputService13ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request {\n\treq := c.NewRequest(op, params, data)\n\n\treturn req\n}", "func (c *InputService10ProtocolTest) newRequest(op *aws.Operation, params, data interface{}) *aws.Request {\n\treq := c.NewRequest(op, params, data)\n\n\treturn req\n}", "func (c *InputService21ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request {\n\treq := c.NewRequest(op, params, data)\n\n\treturn req\n}", "func (c *InputService4ProtocolTest) newRequest(op *aws.Operation, params, data interface{}) *aws.Request {\n\treq := c.NewRequest(op, params, data)\n\n\treturn req\n}", "func (c *InputService7ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request {\n\treq := c.NewRequest(op, params, data)\n\n\treturn req\n}", "func (c *InputService7ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request {\n\treq := c.NewRequest(op, params, data)\n\n\treturn req\n}", "func (c *InputService15ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request {\n\treq := c.NewRequest(op, params, data)\n\n\treturn req\n}", "func (c *InputService7ProtocolTest) newRequest(op *aws.Operation, params, data interface{}) *aws.Request {\n\treq := c.NewRequest(op, params, data)\n\n\treturn req\n}", "func (c *InputService3ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request {\n\treq := c.NewRequest(op, params, data)\n\n\treturn req\n}", "func (c *InputService3ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request {\n\treq := c.NewRequest(op, params, data)\n\n\treturn req\n}", "func (c *InputService6ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request {\n\treq := c.NewRequest(op, params, data)\n\n\treturn req\n}", "func (c *InputService6ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request {\n\treq := c.NewRequest(op, params, data)\n\n\treturn req\n}", "func (c *InputService6ProtocolTest) newRequest(op *aws.Operation, params, data interface{}) *aws.Request {\n\treq := c.NewRequest(op, params, data)\n\n\treturn req\n}", "func (c *InputService3ProtocolTest) newRequest(op *aws.Operation, params, data interface{}) *aws.Request {\n\treq := c.NewRequest(op, params, data)\n\n\treturn req\n}", "func (c *OutputService8ProtocolTest) newRequest(op *aws.Operation, params, data interface{}) *aws.Request {\n\treq := c.NewRequest(op, params, data)\n\n\treturn req\n}", "func (c *OutputService9ProtocolTest) newRequest(op *aws.Operation, params, data interface{}) *aws.Request {\n\treq := c.NewRequest(op, params, data)\n\n\treturn req\n}", "func (c *InputService5ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request {\n\treq := c.NewRequest(op, params, data)\n\n\treturn req\n}", "func (c *InputService5ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request {\n\treq := c.NewRequest(op, params, data)\n\n\treturn req\n}", "func (c *OutputService14ProtocolTest) newRequest(op *aws.Operation, params, data interface{}) *aws.Request {\n\treq := c.NewRequest(op, params, data)\n\n\treturn req\n}", "func (c *OutputService11ProtocolTest) newRequest(op *aws.Operation, params, data interface{}) *aws.Request {\n\treq := c.NewRequest(op, params, data)\n\n\treturn req\n}", "func NewSpec(details *SpecDetails) *Spec {\n\treturn &Spec{\n\t\tDetails: details,\n\t\tServices: NewServiceList(),\n\t\tStatus: SpecWaiting,\n\t}\n}", "func (c *OutputService1ProtocolTest) newRequest(op *aws.Operation, params, data interface{}) *aws.Request {\n\treq := c.NewRequest(op, params, data)\n\n\treturn req\n}", "func (c *OutputService1ProtocolTest) newRequest(op *aws.Operation, params, data interface{}) *aws.Request {\n\treq := c.NewRequest(op, params, data)\n\n\treturn req\n}", "func (c *OutputService2ProtocolTest) newRequest(op *aws.Operation, params, data interface{}) *aws.Request {\n\treq := c.NewRequest(op, params, data)\n\n\treturn req\n}", "func (c *OutputService2ProtocolTest) newRequest(op *aws.Operation, params, data interface{}) *aws.Request {\n\treq := c.NewRequest(op, params, data)\n\n\treturn req\n}", "func (c *OutputService4ProtocolTest) newRequest(op *aws.Operation, params, data interface{}) *aws.Request {\n\treq := c.NewRequest(op, params, data)\n\n\treturn req\n}", "func (c *OutputService4ProtocolTest) newRequest(op *aws.Operation, params, data interface{}) *aws.Request {\n\treq := c.NewRequest(op, params, data)\n\n\treturn req\n}", "func (c *OutputService10ProtocolTest) newRequest(op *aws.Operation, params, data interface{}) *aws.Request {\n\treq := c.NewRequest(op, params, data)\n\n\treturn req\n}", "func (c *InputService5ProtocolTest) newRequest(op *aws.Operation, params, data interface{}) *aws.Request {\n\treq := c.NewRequest(op, params, data)\n\n\treturn req\n}", "func NewRequestSchedule()(*RequestSchedule) {\n m := &RequestSchedule{\n }\n m.backingStore = ie8677ce2c7e1b4c22e9c3827ecd078d41185424dd9eeb92b7d971ed2d49a392e.BackingStoreFactoryInstance();\n m.SetAdditionalData(make(map[string]any))\n return m\n}", "func newReq(t *testing.T, method, path string, payload interface{}) *http.Request {\n\tt.Helper()\n\n\tvar body io.Reader\n\n\tif payload != nil {\n\t\traw, err := json.Marshal(payload)\n\t\trequire.NoError(t, err)\n\n\t\tbody = bytes.NewReader(raw)\n\t}\n\n\treturn httptest.NewRequest(method, path, body)\n}", "func (c *OutputService7ProtocolTest) newRequest(op *aws.Operation, params, data interface{}) *aws.Request {\n\treq := c.NewRequest(op, params, data)\n\n\treturn req\n}", "func (c *OutputService7ProtocolTest) newRequest(op *aws.Operation, params, data interface{}) *aws.Request {\n\treq := c.NewRequest(op, params, data)\n\n\treturn req\n}", "func NewDeviceManagementRequestBuilder(rawUrl string, requestAdapter i2ae4187f7daee263371cb1c977df639813ab50ffa529013b7437480d1ec0158f.RequestAdapter)(*DeviceManagementRequestBuilder) {\n urlParams := make(map[string]string)\n urlParams[\"request-raw-url\"] = rawUrl\n return NewDeviceManagementRequestBuilderInternal(urlParams, requestAdapter)\n}", "func (p *DiscoveryProtocol) copyNewDiscoveryRequest(request *api.DiscoveryRequest) *api.DiscoveryRequest {\n\treq := &api.DiscoveryRequest{DiscoveryMsgData: NewDiscoveryMsgData(uuid.Must(uuid.NewV4(), nil).String(), true, p.p2pHost),\n\t\tMessage: api.DiscoveryMessage_DiscoveryReq}\n\treq.DiscoveryMsgData.InitNodeID = request.DiscoveryMsgData.InitNodeID\n\treq.DiscoveryMsgData.TTL = request.DiscoveryMsgData.TTL\n\treq.DiscoveryMsgData.Expiry = request.DiscoveryMsgData.Expiry\n\treq.DiscoveryMsgData.InitHash = request.DiscoveryMsgData.InitHash\n\tlog.Println(\"COPYING: \", req.DiscoveryMsgData.InitHash)\n\n\tkey := p.p2pHost.Peerstore().PrivKey(p.p2pHost.ID())\n\treq.DiscoveryMsgData.MessageData.Sign = signProtoMsg(req, key)\n\treturn req\n}", "func (c *OutputService12ProtocolTest) newRequest(op *aws.Operation, params, data interface{}) *aws.Request {\n\treq := c.NewRequest(op, params, data)\n\n\treturn req\n}", "func (w *AWS) newRequest(r *aws.Request) *Request {\n\tctx := arn.Ctx{\n\t\tPartition: region.Partition(r.Config.Region),\n\t\tRegion: r.Config.Region,\n\t\tAccount: w.Ctx.Account,\n\t}\n\tcr, err := r.Config.Credentials.Retrieve()\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tif cr.SessionToken != \"\" {\n\t\tctx.Account = arn.ARN(cr.SessionToken).Account()\n\t}\n\tq := &Request{r, w, ctx}\n\tif q.Ctx.Partition != w.Ctx.Partition {\n\t\tpanic(fmt.Sprintf(\"mock: %s called in partition %q (should be %q)\",\n\t\t\tq.Name(), ctx.Partition, w.Ctx.Partition))\n\t}\n\tif region.Subset(ctx.Partition, q.Metadata.ServiceName) == nil {\n\t\tpanic(fmt.Sprintf(\"mock: %q partition does not support %q api\",\n\t\t\tctx.Partition, q.Metadata.ServiceName))\n\t}\n\treturn q\n}", "func NewDeviceRequestBuilder(rawUrl string, requestAdapter i2ae4187f7daee263371cb1c977df639813ab50ffa529013b7437480d1ec0158f.RequestAdapter)(*DeviceRequestBuilder) {\n urlParams := make(map[string]string)\n urlParams[\"request-raw-url\"] = rawUrl\n return NewDeviceRequestBuilderInternal(urlParams, requestAdapter)\n}", "func (c *OutputService15ProtocolTest) newRequest(op *aws.Operation, params, data interface{}) *aws.Request {\n\treq := c.NewRequest(op, params, data)\n\n\treturn req\n}", "func (c *OutputService13ProtocolTest) newRequest(op *aws.Operation, params, data interface{}) *aws.Request {\n\treq := c.NewRequest(op, params, data)\n\n\treturn req\n}", "func NewReq(required []string) *Request {\n\treturn &Request{\n\t\targuments: make(map[string]string),\n\t\trequired: required,\n\t}\n}", "func (c *OutputService3ProtocolTest) newRequest(op *aws.Operation, params, data interface{}) *aws.Request {\n\treq := c.NewRequest(op, params, data)\n\n\treturn req\n}", "func (c *OutputService3ProtocolTest) newRequest(op *aws.Operation, params, data interface{}) *aws.Request {\n\treq := c.NewRequest(op, params, data)\n\n\treturn req\n}", "func (s *AuthnReqListsService) newRequest(op *request.Operation, params, data interface{}) *request.Request {\n\treq := s.NewRequest(op, params, data)\n\n\treturn req\n}", "func NewRequest(params interface{}, atta map[string]interface{}) *DubboRequest {\n\tif atta == nil {\n\t\tatta = make(map[string]interface{})\n\t}\n\treturn &DubboRequest{\n\t\tParams: params,\n\t\tAttachments: atta,\n\t}\n}", "func (c *OutputService6ProtocolTest) newRequest(op *aws.Operation, params, data interface{}) *aws.Request {\n\treq := c.NewRequest(op, params, data)\n\n\treturn req\n}", "func (c *OutputService6ProtocolTest) newRequest(op *aws.Operation, params, data interface{}) *aws.Request {\n\treq := c.NewRequest(op, params, data)\n\n\treturn req\n}", "func (c *IoT) newRequest(op *request.Operation, params, data interface{}) *request.Request {\n\treq := c.NewRequest(op, params, data)\n\n\t// Run custom request initialization if present\n\tif initRequest != nil {\n\t\tinitRequest(req)\n\t}\n\n\treturn req\n}", "func (c *client) newRequest(ctx context.Context, method string, svc service, input Marshaler) (*http.Request, error) {\n\tparamsMap, err := input.MarshalMap()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar svcPath string\n\tswitch svc {\n\tcase servicePayByPrime:\n\t\tsvcPath = payByPrimePath\n\tcase serviceRecord:\n\t\tsvcPath = recordPath\n\tcase serviceRefund:\n\t\tsvcPath = refundPath\n\t}\n\n\tu, _ := url.Parse(svcPath)\n\tbase, _ := url.Parse(c.url)\n\tpath := base.ResolveReference(u).String()\n\n\tparamsMap[\"partner_key\"] = c.partnerKey\n\tbody, _ := json.Marshal(paramsMap)\n\treq, err := http.NewRequestWithContext(ctx, method, path, bytes.NewBuffer(body))\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"cannot create a TapPay request: %v\", err)\n\t}\n\treq.Header.Add(\"x-api-key\", c.partnerKey)\n\treq.Header.Add(\"Content-Type\", \"application/json\")\n\treturn req, nil\n}", "func (c *Client) newRequest(t RequestType) *Request {\n\tc.seqID++\n\treturn &Request{\n\t\tAPIVersion: \"v1\",\n\t\tRequestType: t,\n\t\tTracerTime: time.Now().Unix(),\n\t\tRuntimeID: globalconfig.RuntimeID(),\n\t\tSeqID: c.seqID,\n\t\tDebug: c.debug,\n\t\tApplication: Application{\n\t\t\tServiceName: c.Service,\n\t\t\tEnv: c.Env,\n\t\t\tServiceVersion: c.Version,\n\t\t\tTracerVersion: version.Tag,\n\t\t\tLanguageName: \"go\",\n\t\t\tLanguageVersion: runtime.Version(),\n\t\t},\n\t\tHost: Host{\n\t\t\tHostname: hostname,\n\t\t\tContainerID: internal.ContainerID(),\n\t\t\tOS: getOSName(),\n\t\t\tOSVersion: getOSVersion(),\n\t\t},\n\t}\n}", "func (c *APIGateway) newRequest(op *request.Operation, params, data interface{}) *request.Request {\n\treq := c.NewRequest(op, params, data)\n\n\t// Run custom request initialization if present\n\tif initRequest != nil {\n\t\tinitRequest(req)\n\t}\n\n\treturn req\n}", "func NewThreatAssessmentRequest()(*ThreatAssessmentRequest) {\n m := &ThreatAssessmentRequest{\n Entity: *NewEntity(),\n }\n return m\n}", "func NewThreatAssessmentRequest()(*ThreatAssessmentRequest) {\n m := &ThreatAssessmentRequest{\n Entity: *NewEntity(),\n }\n return m\n}", "func NewMyRequestBuilder(rawUrl string, requestAdapter i2ae4187f7daee263371cb1c977df639813ab50ffa529013b7437480d1ec0158f.RequestAdapter)(*MyRequestBuilder) {\n urlParams := make(map[string]string)\n urlParams[\"request-raw-url\"] = rawUrl\n return NewMyRequestBuilderInternal(urlParams, requestAdapter)\n}", "func newRequest(ctx context.Context, msg interface{}) *request {\n\treturn &request{\n\t\tctx: ctx,\n\t\tmsg: msg,\n\t\tfailure: make(chan error, 1),\n\t\tresponse: make(chan *Delivery, 1),\n\t}\n}", "func NewRequest(robot *Robot, message *Message, query []string) *Request {\n\treturn &Request{\n\t\tMessage: message,\n\t\tQuery: query,\n\t\trobot: robot,\n\t}\n}", "func (s *Splitter) newChunkRequest(dr DownloadRange) (*http.Request, error) {\n\trequest, err := http.NewRequestWithContext(\n\t\ts.Ctx,\n\t\t\"GET\",\n\t\ts.PI.Source.Path.String(),\n\t\tnil,\n\t)\n\tif err != nil {\n\t\treturn nil, &splitterError{context: \"cannot prepare request\", err: err}\n\t}\n\n\trequest.Header.Add(\"Range\", dr.BuildRangeHeader())\n\n\treturn request, nil\n}", "func NewDeltasReq(code string, table string, scope string, payer string) *DeltasReq {\n\treturn ndr(code, table, scope, payer, \"\", \"\", 0, 0)\n}", "func (m *manager) newRequest(ctx context.Context, selector ipld.Node, isPull bool, voucher datatransfer.Voucher, baseCid cid.Cid, to peer.ID) (datatransfer.Request, error) {\n\t// Generate a new transfer ID for the request\n\ttid := datatransfer.TransferID(m.transferIDGen.next())\n\treturn message.NewRequest(tid, false, isPull, voucher.Type(), voucher, baseCid, selector)\n}", "func (s *SitesService) newRequest(op *request.Operation, params, data interface{}) *request.Request {\n\treq := s.NewRequest(op, params, data)\n\n\treturn req\n}", "func (c *Client) newCertificateRequest(req *api.CSRInfo) *csr.CertificateRequest {\n\tcr := csr.CertificateRequest{}\n\tif req != nil && req.Names != nil {\n\t\tcr.Names = req.Names\n\t}\n\tif req != nil && req.Hosts != nil {\n\t\tcr.Hosts = req.Hosts\n\t} else {\n\t\thostname, _ := os.Hostname()\n\t\tif hostname != \"\" {\n\t\t\tcr.Hosts = make([]string, 1)\n\t\t\tcr.Hosts[0] = hostname\n\t\t}\n\t}\n\tif req != nil && req.KeyRequest != nil {\n\t\tcr.KeyRequest = newCfsslBasicKeyRequest(req.KeyRequest)\n\t}\n\tif req != nil {\n\t\tcr.CA = req.CA\n\t\tcr.SerialNumber = req.SerialNumber\n\t}\n\treturn &cr\n}", "func newFrontendRequest(k *kabuta, command string) *frontendRequest {\n\treturn &frontendRequest{kabuta: k, t0: time.Now(), rawCmd: command}\n}", "func (s *OidcService) newRequest(op *request.Operation, params, data interface{}) *request.Request {\n\treq := s.NewRequest(op, params, data)\n\n\treturn req\n}", "func (in *DownloadRequestSpec) DeepCopy() *DownloadRequestSpec {\n\tif in == nil {\n\t\treturn nil\n\t}\n\tout := new(DownloadRequestSpec)\n\tin.DeepCopyInto(out)\n\treturn out\n}", "func (s *Stein) newRequest(method string, path string, body io.Reader) (*http.Request, error) {\n\treq, err := http.NewRequest(method, path, body)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treq.Header.Set(\"Content-Type\", \"application/json\")\n\n\treturn req, nil\n}", "func NewDeviceManagementRequestBuilderInternal(pathParameters map[string]string, requestAdapter i2ae4187f7daee263371cb1c977df639813ab50ffa529013b7437480d1ec0158f.RequestAdapter)(*DeviceManagementRequestBuilder) {\n m := &DeviceManagementRequestBuilder{\n }\n m.urlTemplate = \"{+baseurl}/deviceManagement{?%24select,%24expand}\";\n urlTplParams := make(map[string]string)\n for idx, item := range pathParameters {\n urlTplParams[idx] = item\n }\n m.pathParameters = urlTplParams;\n m.requestAdapter = requestAdapter;\n return m\n}", "func NewRequest(src, dest string) *Request {\n\treturn &Request{src: src, dest: dest}\n}", "func NewPageRequest(page, perPage int64, spec *Spec) *PageRequest {\n\tif page < 1 {\n\t\tpage = 1\n\t}\n\n\t// set limit to per page var\n\tif perPage <= 0 || perPage > 100 {\n\t\tperPage = 100\n\t}\n\n\treturn &PageRequest{\n\t\tPage: page,\n\t\tPerPage: perPage,\n\t\tSpec: spec,\n\t}\n}", "func (m *MapDisk) NewWriteRequest(fname string) Request {\n\treturn Request{\n\t\treqType: reqWrite,\n\t\tinChan: m.inChan,\n\t\tfname: fname,\n\t\tresChan: make(chan reply),\n\t}\n}", "func (s *HighAvailabilityService) newRequest(op *request.Operation, params, data interface{}) *request.Request {\n\treq := s.NewRequest(op, params, data)\n\n\treturn req\n}", "func NewRequest(command string) (qr quantum.Request, err error) {\n\tr := Request{\n\t\tCommand: command,\n\t}\n\td, err := json.Marshal(r)\n\tif err != nil {\n\t\treturn\n\t}\n\n\tqr = NewRequestJSON(d)\n\treturn\n}", "func newCreateRequest(path, name string) (*Request, error) {\n\t// XXX Better to delay reading the file content until it is needed\n\t// inside sendRequests() loop, and skip the overhead from copying data.\n\t// ==> Replace Request.Data by the file descriptor, then use\n\t// splice(2) (or other) for zero-copying (use\n\t// rpc.NewClientWithCodec() instead of rpc.NewClient())\n\tcontent, err := ioutil.ReadFile(path)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &Request{Type: requestCreate, Path: name, Data: content}, nil\n}", "func (c *OutputService5ProtocolTest) newRequest(op *aws.Operation, params, data interface{}) *aws.Request {\n\treq := c.NewRequest(op, params, data)\n\n\treturn req\n}" ]
[ "0.81985295", "0.61109", "0.610836", "0.6104865", "0.60955787", "0.60782826", "0.60756934", "0.60756934", "0.6053734", "0.6053734", "0.6046308", "0.6046308", "0.60427433", "0.60427433", "0.6041208", "0.6038668", "0.6038668", "0.6030468", "0.60247654", "0.60017973", "0.599973", "0.59888804", "0.5979851", "0.59786034", "0.59693706", "0.59625775", "0.5948042", "0.5946102", "0.59350514", "0.5929593", "0.5928567", "0.59124225", "0.59124225", "0.5901101", "0.58739734", "0.5857695", "0.5857695", "0.5810967", "0.5810967", "0.5742379", "0.571622", "0.569867", "0.56913805", "0.5664389", "0.5664389", "0.5635689", "0.5630794", "0.5630724", "0.56269115", "0.56269115", "0.56186163", "0.56186163", "0.56173927", "0.56173927", "0.55941075", "0.55929667", "0.555424", "0.5553132", "0.55476946", "0.55476946", "0.55169386", "0.549746", "0.5490731", "0.5476982", "0.5457708", "0.54474103", "0.54453343", "0.5444826", "0.5442899", "0.5442899", "0.54412216", "0.5426623", "0.53890276", "0.53890276", "0.53865576", "0.5374778", "0.5370076", "0.5311548", "0.5308121", "0.5308121", "0.5299877", "0.52746403", "0.52635765", "0.52634615", "0.5261502", "0.526081", "0.5256083", "0.52538663", "0.5250325", "0.5243646", "0.524185", "0.5237047", "0.5236157", "0.5229996", "0.5224522", "0.5224188", "0.52203614", "0.52163357", "0.5215828", "0.52134496" ]
0.8256631
0
NewFluxMonitorSpec initializes a new DirectFluxMonitorSpec from a job.FluxMonitorSpec
func NewFluxMonitorSpec(spec *job.FluxMonitorSpec) *FluxMonitorSpec { var drumbeatSchedulePtr *string if spec.DrumbeatEnabled { drumbeatSchedulePtr = &spec.DrumbeatSchedule } var drumbeatRandomDelayPtr *string if spec.DrumbeatRandomDelay > 0 { drumbeatRandomDelay := spec.DrumbeatRandomDelay.String() drumbeatRandomDelayPtr = &drumbeatRandomDelay } return &FluxMonitorSpec{ ContractAddress: spec.ContractAddress, Threshold: float32(spec.Threshold), AbsoluteThreshold: float32(spec.AbsoluteThreshold), PollTimerPeriod: spec.PollTimerPeriod.String(), PollTimerDisabled: spec.PollTimerDisabled, IdleTimerPeriod: spec.IdleTimerPeriod.String(), IdleTimerDisabled: spec.IdleTimerDisabled, DrumbeatEnabled: spec.DrumbeatEnabled, DrumbeatSchedule: drumbeatSchedulePtr, DrumbeatRandomDelay: drumbeatRandomDelayPtr, MinPayment: spec.MinPayment, CreatedAt: spec.CreatedAt, UpdatedAt: spec.UpdatedAt, EVMChainID: spec.EVMChainID, } }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func NewFluxMonitorSpec(spec *job.FluxMonitorSpec) *FluxMonitorSpec {\n\treturn &FluxMonitorSpec{\n\t\tContractAddress: spec.ContractAddress,\n\t\tPrecision: spec.Precision,\n\t\tThreshold: spec.Threshold,\n\t\tAbsoluteThreshold: spec.AbsoluteThreshold,\n\t\tPollTimerPeriod: spec.PollTimerPeriod.String(),\n\t\tPollTimerDisabled: spec.PollTimerDisabled,\n\t\tIdleTimerPeriod: spec.IdleTimerPeriod.String(),\n\t\tIdleTimerDisabled: spec.IdleTimerDisabled,\n\t\tMinPayment: spec.MinPayment,\n\t\tCreatedAt: spec.CreatedAt,\n\t\tUpdatedAt: spec.UpdatedAt,\n\t}\n}", "func NewMonitor(inputChan chan *TripsOfSec) *Monitor {\n\tm := &Monitor{\n\t\tInputChan: inputChan,\n\t\tprocessingStats: map[int64]*ProcessingStat{},\n\t\tResultChan: make(chan Stat, 1024),\n\t}\n\tgo m.consume()\n\treturn m\n}", "func NewMonitor(c context.Context) *Monitor {\n\treturn &Monitor{StatPB: make(chan pb.Stat, 1),\n\t\tticker: time.NewTicker(time.Duration(config.CfgWorker.LoadReport.LoadReportInterval) * time.Second),\n\t\tadapterIdx: -1,\n\t\tCtx: c,\n\t\tStat: stat.NewStat()}\n}", "func newMonitor(ui cli.Ui, client *api.Client, length int) *monitor {\n\tmon := &monitor{\n\t\tui: &cli.PrefixedUi{\n\t\t\tInfoPrefix: \"==> \",\n\t\t\tOutputPrefix: \" \",\n\t\t\tErrorPrefix: \"==> \",\n\t\t\tUi: ui,\n\t\t},\n\t\tclient: client,\n\t\tstate: newEvalState(),\n\t\tlength: length,\n\t}\n\treturn mon\n}", "func NewSpec(details *SpecDetails) *Spec {\n\treturn &Spec{\n\t\tDetails: details,\n\t\tServices: NewServiceList(),\n\t\tStatus: SpecWaiting,\n\t}\n}", "func NewMonitorReconciler(mgr ctrl.Manager) (*MonitorReconciler, error) {\n\tr := &MonitorReconciler{\n\t\tClient: mgr.GetClient(),\n\t\tLogger: ctrl.Log.WithName(\"controllers\").WithName(\"Monitor\"),\n\t\tstopCh: make(chan struct{}),\n\t\tperiodicReconcile: 1 * time.Minute,\n\t\tmongoURI: os.Getenv(database.MongoURI),\n\t}\n\tif r.mongoURI == \"\" {\n\t\treturn nil, fmt.Errorf(\"mongo uri is empty\")\n\t}\n\tr.initNamespaceFuncs()\n\terr := r.preApply()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tr.NvidiaGpu, err = gpu.GetNodeGpuModel(mgr.GetClient())\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to get node gpu model: %v\", err)\n\t}\n\tr.Logger.Info(\"get gpu model\", \"gpu model\", r.NvidiaGpu)\n\tr.startPeriodicReconcile()\n\treturn r, nil\n}", "func NewMonitor(\n\tname string,\n\tres Resource,\n\tcurCount *metric.Gauge,\n\tmaxHist *metric.Histogram,\n\tincrement int64,\n\tnoteworthy int64,\n) *BytesMonitor {\n\treturn NewMonitorWithLimit(\n\t\tname, res, math.MaxInt64, curCount, maxHist, increment, noteworthy)\n}", "func newFakeReconciler(initObjects ...runtime.Object) *ReconcileMachineRemediation {\n\tfakeClient := fake.NewFakeClient(initObjects...)\n\tremediator := &FakeRemedatior{}\n\treturn &ReconcileMachineRemediation{\n\t\tclient: fakeClient,\n\t\tremediator: remediator,\n\t\tnamespace: consts.NamespaceOpenshiftMachineAPI,\n\t}\n}", "func NewReactor(cfg Config) *Reactor {\n\tr := &Reactor{\n\t\tComponent: NewComponent(cfg),\n\t\tCoreTemp: cfg.BaseTempOrDefault(),\n\t\tContainmentTemp: cfg.BaseTempOrDefault(),\n\t\tControlRods: []*ControlRod{\n\t\t\tNewControlRod(cfg, 0),\n\t\t\tNewControlRod(cfg, 1),\n\t\t\tNewControlRod(cfg, 2),\n\t\t\tNewControlRod(cfg, 3),\n\t\t\tNewControlRod(cfg, 4),\n\t\t\tNewControlRod(cfg, 5),\n\t\t},\n\t\tFuelRods: []*FuelRod{\n\t\t\tNewFuelRod(cfg, 0),\n\t\t\tNewFuelRod(cfg, 1),\n\t\t\tNewFuelRod(cfg, 2),\n\t\t\tNewFuelRod(cfg, 3),\n\t\t\tNewFuelRod(cfg, 4),\n\t\t\tNewFuelRod(cfg, 5),\n\t\t\tNewFuelRod(cfg, 6),\n\t\t\tNewFuelRod(cfg, 7),\n\t\t\tNewFuelRod(cfg, 8),\n\t\t\tNewFuelRod(cfg, 9),\n\t\t\tNewFuelRod(cfg, 10),\n\t\t\tNewFuelRod(cfg, 11),\n\t\t},\n\t\tCoolant: NewCoolant(),\n\t\tPrimary: NewPump(\"primary\", cfg),\n\t\tSecondary: NewPump(\"secondary\", cfg),\n\t\tTurbine: NewTurbine(cfg),\n\t}\n\n\tr.Primary.Inlet = r.Coolant\n\tr.Primary.Outlet = r.Turbine.Coolant\n\tr.Secondary.Inlet = r.Turbine.Coolant\n\tr.Secondary.Outlet = r.Coolant\n\n\tr.ContainmentTempAlarm = NewThresholdAlarm(\n\t\t\"Containment Temp\",\n\t\tfunc() float64 { return r.ContainmentTemp },\n\t\tThresholds(ContainmentTempFatal, ContainmentTempCritical, ContainmentTempWarning),\n\t)\n\tr.CoreTempAlarm = NewThresholdAlarm(\n\t\t\"Core Temp\",\n\t\tfunc() float64 { return r.CoreTemp },\n\t\tThresholds(CoreTempFatal, CoreTempCritical, CoreTempWarning),\n\t)\n\treturn r\n}", "func New(threads int) *MonitorImpl {\n\treturn &MonitorImpl{\n\t\tflushingStats: make([]record, 0),\n\t\tmaxLength: 100 * threads,\n\t\tlambda: 1,\n\t}\n}", "func NewMonitor() *Monitor {\n\treturn &Monitor{\n\t\tValidators: make([]string, 0),\n\t\treceiveChannel: make(chan *connection.Packet, maxChannelSize),\n\t\taccAlgorithm: accountability.NewAccountability(),\n\t}\n}", "func NewMonitor(\n\tcctx *Context,\n\tiface string,\n\tdialer *system.Dialer,\n\twatchC <-chan netstate.Change,\n\tverbose bool,\n) *Monitor {\n\treturn &Monitor{\n\t\tcctx: cctx,\n\t\tiface: iface,\n\t\tverbose: verbose,\n\t\tdialer: dialer,\n\t\twatchC: watchC,\n\t\treadyC: make(chan struct{}),\n\n\t\t// By default use real time.\n\t\tnow: time.Now,\n\t}\n}", "func NewMonitor(p *Probe) (*Monitor, error) {\n\tvar err error\n\tm := &Monitor{\n\t\tprobe: p,\n\t}\n\n\t// instantiate a new load controller\n\tm.loadController, err = NewLoadController(p)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// instantiate a new event statistics monitor\n\tm.perfBufferMonitor, err = NewPerfBufferMonitor(p)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"couldn't create the events statistics monitor: %w\", err)\n\t}\n\n\tif p.config.ActivityDumpEnabled {\n\t\tm.activityDumpManager, err = NewActivityDumpManager(p)\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"couldn't create the activity dump manager: %w\", err)\n\t\t}\n\t}\n\n\tif p.config.RuntimeMonitor {\n\t\tm.runtimeMonitor = NewRuntimeMonitor(p.statsdClient)\n\t}\n\n\tm.discarderMonitor, err = NewDiscarderMonitor(p)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"couldn't create the discarder monitor: %w\", err)\n\t}\n\n\treturn m, nil\n}", "func newStageMonitor(g reflect.Value, v specsp) (*stageMonitor, error) {\n\ttarget := v[\"target\"].(string)\n\tval := v[\"val\"].(string)\n\teval := buildMonitorEval(g, target, val)\n\tif eval == nil {\n\t\treturn nil, syscall.ENOENT\n\t}\n\tmode := int(v[\"mode\"].(float64))\n\tcolor := int(v[\"color\"].(float64))\n\tlabel := v[\"label\"].(string)\n\tx := v[\"x\"].(float64)\n\ty := v[\"y\"].(float64)\n\tvisible := v[\"visible\"].(bool)\n\treturn &stageMonitor{\n\t\ttarget: target, val: val, eval: eval,\n\t\tvisible: visible, mode: mode, color: color, x: x, y: y, label: label,\n\t}, nil\n}", "func (in *MariaDBMonitorSpec) DeepCopy() *MariaDBMonitorSpec {\n\tif in == nil {\n\t\treturn nil\n\t}\n\tout := new(MariaDBMonitorSpec)\n\tin.DeepCopyInto(out)\n\treturn out\n}", "func newReconciler(mgr manager.Manager) reconcile.Reconciler {\n\tsnapClientset, err := snapclientset.NewForConfig(mgr.GetConfig())\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\treturn &ReconcileVolumeBackup{\n\t\tclient: mgr.GetClient(),\n\t\tsnapClientset: snapClientset,\n\t\tconfig: mgr.GetConfig(),\n\t\tscheme: mgr.GetScheme(),\n\t\texecutor: executor.CreateNewRemotePodExecutor(mgr.GetConfig()),\n\t}\n}", "func New(pythonPkg string) *MonitorCore {\n\tctx, cancel := context.WithCancel(context.Background())\n\n\treturn &MonitorCore{\n\t\tlogger: log.StandardLogger(),\n\t\tctx: ctx,\n\t\tcancel: cancel,\n\t\tpythonPkg: pythonPkg,\n\t\tconfigCond: sync.Cond{L: &sync.Mutex{}},\n\t}\n\n}", "func newReconciler(mgr manager.Manager) reconcile.Reconciler {\n\treturn &ReconcileWavefrontProxy{client: mgr.GetClient(), scheme: mgr.GetScheme()}\n}", "func NewMonitor(rootDir string, rules []*Rule, recursive bool, checkFrequencySeconds int) (*Monitor, error) {\n\tif rootDir == \"\" {\n\t\treturn nil, errors.New(\"invalid root directory\")\n\t}\n\n\tif checkFrequencySeconds == 0 {\n\t\treturn nil, errors.New(\"check frequency cannot be 0\")\n\t}\n\n\tdir, err := homedir.Expand(rootDir)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn &Monitor{\n\t\tRootDirectory: dir,\n\t\tRules: rules,\n\t\tRecursive: recursive,\n\t\tCheckFrequencySeconds: checkFrequencySeconds,\n\t\tlog: logrus.WithField(\"component\", \"monitor\"),\n\t}, nil\n}", "func newReconciler(mgr manager.Manager) reconcile.Reconciler {\n\n\treconcilerBase := util.NewReconcilerBase(mgr.GetClient(), mgr.GetScheme(), mgr.GetConfig(), mgr.GetEventRecorderFor(controllerName))\n\n\tdiscoveryClient, err := reconcilerBase.GetDiscoveryClient()\n\n\tif err != nil {\n\t\tlog.Error(err, \"failed to initialize discovery client\")\n\t\treturn nil\n\t}\n\n\tresources, resourcesErr := discoveryClient.ServerResourcesForGroupVersion(podMonitorAPIVersion)\n\n\tif resourcesErr != nil {\n\t\tlog.Error(err, \"failed to discover resources\")\n\t\treturn nil\n\t}\n\n\tfor _, apiResource := range resources.APIResources {\n\t\tif apiResource.Kind == podMonitorKind {\n\t\t\tsupportsPodMonitors = \"true\"\n\t\t\tbreak\n\t\t}\n\t}\n\n\treturn &ReconcileKeepalivedGroup{\n\t\tReconcilerBase: reconcilerBase,\n\t}\n}", "func New(c Config) (Monitor, error) {\n\tm := &monitor{\n\t\tblankThreshold: blankThreshold,\n\t\ttip: []string{\"\"},\n\t\tpath: c.Path,\n\t\tscanner: c.Scanner,\n\t\tsorter: c.Sorter,\n\t}\n\n\tif err := m.sync(); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn m, nil\n}", "func createMinimatchForTest(t *testing.T, evalTc *rpcTesting.TestContext) *rpcTesting.TestContext {\n\tvar closer func()\n\tcfg := viper.New()\n\n\t// TODO: Use insecure for now since minimatch and mmf only works with the same secure mode\n\t// Server a minimatch for testing using random port at tc.grpcAddress & tc.proxyAddress\n\ttc := rpcTesting.MustServeInsecure(t, func(p *rpc.ServerParams) {\n\t\tcloser = statestoreTesting.New(t, cfg)\n\t\tcfg.Set(\"storage.page.size\", 10)\n\t\t// Set up the attributes that a ticket will be indexed for.\n\t\tcfg.Set(\"ticketIndices\", Indices)\n\t\tassert.Nil(t, minimatch.BindService(p, cfg))\n\t})\n\t// TODO: Revisit the Minimatch test setup in future milestone to simplify passing config\n\t// values between components. The backend needs to connect to to the synchronizer but when\n\t// it is initialized, does not know what port the synchronizer is on. To work around this,\n\t// the backend sets up a connection to the synchronizer at runtime and hence can access these\n\t// config values to establish the connection.\n\tcfg.Set(\"api.synchronizer.hostname\", tc.GetHostname())\n\tcfg.Set(\"api.synchronizer.grpcport\", tc.GetGRPCPort())\n\tcfg.Set(\"api.synchronizer.httpport\", tc.GetHTTPPort())\n\tcfg.Set(\"synchronizer.registrationIntervalMs\", \"200ms\")\n\tcfg.Set(\"synchronizer.proposalCollectionIntervalMs\", \"200ms\")\n\tcfg.Set(\"api.evaluator.hostname\", evalTc.GetHostname())\n\tcfg.Set(\"api.evaluator.grpcport\", evalTc.GetGRPCPort())\n\tcfg.Set(\"api.evaluator.httpport\", evalTc.GetHTTPPort())\n\tcfg.Set(\"synchronizer.enabled\", true)\n\n\t// TODO: This is very ugly. Need a better story around closing resources.\n\ttc.AddCloseFunc(closer)\n\treturn tc\n}", "func New(mgr manager.Manager) *Reconciler {\n\treturn &Reconciler{\n\t\tclient: mgr.GetClient(),\n\t\tscheme: mgr.GetScheme(),\n\t\trecorder: mgr.GetEventRecorderFor(\"controller.fluentbit\"),\n\t}\n}", "func newReconciler(mgr manager.Manager) reconcile.Reconciler {\n\treturn &ReconcileBlackboxTarget{client: mgr.GetClient(), scheme: mgr.GetScheme()}\n}", "func NewPerfMonitor(name string) (perfmon *PerfMonitor) {\n\tperfmon = &PerfMonitor{\n\t\tName:name,\n\t\tObservations:make(map[ResourceTag]*JobFlow),\n\t\tMutex:&sync.Mutex{},\n\t}\n\treturn\n}", "func (m *MagneticFlux) From(u Uniter) error {\n\tif !DimensionsMatch(u, Weber) {\n\t\t*m = MagneticFlux(math.NaN())\n\t\treturn errors.New(\"Dimension mismatch\")\n\t}\n\t*m = MagneticFlux(u.Unit().Value())\n\treturn nil\n}", "func (*FluxMonitorConfig) Descriptor() ([]byte, []int) {\n\treturn file_pkg_noderpc_proto_feeds_manager_proto_rawDescGZIP(), []int{2}\n}", "func newFrameworkWatcher(ctx context.Context, addr string, prot Protocol, mkClient func() dcos.Client) (*FrameworkState, error) {\n\toperator := NewOperator(addr, mkClient)\n\treader, err := operator.Subscribe(ctx, prot)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"init framework watcher: %s\", err)\n\t}\n\n\t// This buffered channel is critical, if it were unbuffered then we can\n\t// potentially lose signals.\n\trelay := make(chan struct{}, 1)\n\n\tstate := &FrameworkState{\n\t\tsnap: mesos_v1.FrameworkSnapshot{\n\t\t\tFrameworks: make(map[string]*mesos_v1.FrameworkInfo),\n\t\t\tTasks: make(map[string]*mesos_v1.Task),\n\t\t\tAgents: make(map[string]*mesos_v1.AgentInfo),\n\t\t},\n\t\tfwTaskCount: make(map[string]int),\n\t\treadrelay: relay,\n\t\twriterelay: relay,\n\t\top: operator,\n\t\topsub: reader,\n\t\tprot: prot,\n\t}\n\treturn state, nil\n}", "func newReconciler(mgr manager.Manager, channelDescriptor *utils.ChannelDescriptor, logger logr.Logger) reconcile.Reconciler {\n\treturn &ReconcileDeployable{\n\t\tKubeClient: mgr.GetClient(),\n\t\tChannelDescriptor: channelDescriptor,\n\t\tLog: logger,\n\t}\n}", "func newReconciler(mgr manager.Manager) (reconcile.Reconciler, *reconcileGitTrackOpts) {\n\t// Create a restMapper (used by informer to look up resource kinds)\n\trestMapper, err := utils.NewRestMapper(mgr.GetConfig())\n\tif err != nil {\n\t\tpanic(fmt.Errorf(\"unable to create rest mapper: %v\", err))\n\t}\n\n\tgvrs, err := farosflags.ParseIgnoredResources()\n\tif err != nil {\n\t\tpanic(fmt.Errorf(\"unable to parse ignored resources: %v\", err))\n\t}\n\n\tapplier, err := farosclient.NewApplier(mgr.GetConfig(), farosclient.Options{})\n\tif err != nil {\n\t\tpanic(fmt.Errorf(\"unable to create applier: %v\", err))\n\t}\n\n\trec := &ReconcileGitTrack{\n\t\tClient: mgr.GetClient(),\n\t\tscheme: mgr.GetScheme(),\n\t\tstore: gitstore.NewRepoStore(farosflags.RepositoryDir),\n\t\trestMapper: restMapper,\n\t\trecorder: mgr.GetEventRecorderFor(\"gittrack-controller\"),\n\t\tignoredGVRs: gvrs,\n\t\tlastUpdateTimes: make(map[string]time.Time),\n\t\tmutex: &sync.RWMutex{},\n\t\tapplier: applier,\n\t\tlog: rlogr.Log.WithName(\"gittrack-controller\"),\n\t\tgitTrackMode: farosflags.GitTrack,\n\t\tnamespace: farosflags.Namespace,\n\t\tclusterGitTrackMode: farosflags.ClusterGitTrack,\n\t}\n\topts := &reconcileGitTrackOpts{\n\t\tgitTrackMode: farosflags.GitTrack,\n\t\tclusterGitTrackMode: farosflags.ClusterGitTrack,\n\t}\n\treturn rec, opts\n}", "func NewMonitor(config *MonitorConfig) *Monitor {\n\tmonitor := &Monitor{\n\t\tstatuses: config.Statuses,\n\t\tusername: config.Username,\n\t\tpassword: config.Password,\n\t\tupdate: make(chan StatusUpdate),\n\t}\n\n\tconfig.Router.Handle(\"/status\", monitor.basicAuth(monitor.getStatusHandler(), true)).Methods(http.MethodGet)\n\tconfig.Router.Handle(\"/login\", monitor.basicAuth(login(), false)).Methods(http.MethodGet)\n\t// This is not very REST friendly but will make updating less complicated\n\tconfig.Router.Handle(\"/update/{id}/{status}\", monitor.basicAuth(monitor.updateStatusHandler(), true)).Methods(http.MethodGet)\n\n\tmonitor.display = NewDisplay(config.Name)\n\tconfig.Router.Handle(\"/live\", monitor.basicAuth(monitor.display.LiveStatus(), true))\n\tmonitor.display.RouteStatic(config.Router)\n\n\tgo monitor.updateListener()\n\n\treturn monitor\n}", "func newReconciler(mgr manager.Manager) reconcile.Reconciler {\n\treturn &JenkinsReconciler{client: mgr.GetClient(), scheme: mgr.GetScheme()}\n}", "func New(cfg *Config) (*Monitor, error) {\n\tc, err := client.NewClient(cfg.Host, \"1.24\", nil, nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tctx, cancel := context.WithCancel(context.Background())\n\tm := &Monitor{\n\t\tevents: cfg.Events,\n\t\tclient: c,\n\t\tlog: logrus.WithField(\"context\", \"docker\"),\n\t\tstop: cancel,\n\t\tstopCh: make(chan bool),\n\t}\n\tgo m.run(ctx)\n\treturn m, nil\n}", "func newReconciler(mgr manager.Manager) reconcile.Reconciler {\n\treturn &ReconcileJenkinsInstance{\n\t\tClient: mgr.GetClient(),\n\t\tEventRecorder: mgr.GetRecorder(\"JenkinsInstanceController\"),\n\t\tscheme: mgr.GetScheme(),\n\t}\n}", "func newReconciler(\n\tmgr manager.Manager,\n\tmutationSystem *mutation.System,\n\ttracker *readiness.Tracker,\n\tgetPod func(context.Context) (*corev1.Pod, error),\n\tkind string,\n\tnewMutationObj func() client.Object,\n\tmutatorFor func(client.Object) (types.Mutator, error),\n\tevents chan event.GenericEvent,\n) *Reconciler {\n\tr := &Reconciler{\n\t\tsystem: mutationSystem,\n\t\tClient: mgr.GetClient(),\n\t\ttracker: tracker,\n\t\tgetPod: getPod,\n\t\tscheme: mgr.GetScheme(),\n\t\treporter: ctrlmutators.NewStatsReporter(),\n\t\tcache: ctrlmutators.NewMutationCache(),\n\t\tgvk: mutationsv1.GroupVersion.WithKind(kind),\n\t\tnewMutationObj: newMutationObj,\n\t\tmutatorFor: mutatorFor,\n\t\tlog: logf.Log.WithName(\"controller\").WithValues(logging.Process, fmt.Sprintf(\"%s_controller\", strings.ToLower(kind))),\n\t\tevents: events,\n\t}\n\tif getPod == nil {\n\t\tr.getPod = r.defaultGetPod\n\t}\n\treturn r\n}", "func NewCfosPowerBrainFromConfig(other map[string]interface{}) (api.Meter, error) {\n\tcc := modbus.TcpSettings{\n\t\tID: 1,\n\t}\n\n\tif err := util.DecodeOther(other, &cc); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn NewCfosPowerBrain(cc.URI, cc.ID)\n}", "func newReconciler(mgr manager.Manager) reconcile.Reconciler {\n\treturn &ReconcileDirectVolumeMigrationProgress{Client: mgr.GetClient(), scheme: mgr.GetScheme()}\n}", "func NewNotificationFromMonitorString(ms string, tf TimeFormatter) (*Notification, error) {\n\tbody := false\n\tsummary := false\n\n\tn := &Notification{\n\t\tTime: tf(time.Now()),\n\t}\n\n\ts := bufio.NewScanner(strings.NewReader(ms))\n\tfor s.Scan() {\n\t\tswitch {\n\t\tcase body:\n\t\t\tn.Body = extractString(s.Text())\n\t\t\tbody = false\n\t\tcase summary:\n\t\t\tn.Summary = extractString(s.Text())\n\t\t\tsummary = false\n\t\tcase strings.Contains(s.Text(), `string \"x-nemo-preview-body\"`):\n\t\t\tbody = true\n\t\tcase strings.Contains(s.Text(), `string \"x-nemo-owner\"`):\n\t\t\tif len(n.Summary) == 0 {\n\t\t\t\tsummary = true\n\t\t\t}\n\t\tcase strings.Contains(s.Text(), `string \"x-nemo-preview-summary\"`):\n\t\t\tsummary = true\n\t\t}\n\t}\n\tif err := s.Err(); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn n, nil\n}", "func NewMockMonitorListener(queueSize int) *MockMonitorListener {\n\treturn &MockMonitorListener{\n\t\tqueue: make(chan *payload.Payload, queueSize),\n\t}\n}", "func New(client api.Client, callbacks api.SyncerCallbacks, node string, watchAllNodes bool) api.Syncer {\r\n\t// Create the set of ResourceTypes required for Felix. Since the update processors\r\n\t// also cache state, we need to create individual ones per syncer rather than create\r\n\t// a common global set.\r\n\t// For BGP we always only care about affinity blocks assigned to our own particuar node.\r\n\t// However, depending on whether we are in full-mesh mode or not changes whether we want\r\n\t// to watch all Node resources or just our own.\r\n\tnodeToWatch := node\r\n\tif watchAllNodes {\r\n\t\tnodeToWatch = \"\"\r\n\t}\r\n\tresourceTypes := []watchersyncer.ResourceType{\r\n\t\t{\r\n\t\t\tListInterface: model.ResourceListOptions{Kind: apiv3.KindIPPool},\r\n\t\t\tUpdateProcessor: updateprocessors.NewIPPoolUpdateProcessor(),\r\n\t\t},\r\n\t\t{\r\n\t\t\tListInterface: model.ResourceListOptions{Kind: apiv3.KindBGPConfiguration},\r\n\t\t\tUpdateProcessor: updateprocessors.NewBGPConfigUpdateProcessor(),\r\n\t\t},\r\n\t\t{\r\n\t\t\tListInterface: model.ResourceListOptions{\r\n\t\t\t\tKind: apiv3.KindNode,\r\n\t\t\t\tName: nodeToWatch,\r\n\t\t\t},\r\n\t\t\tUpdateProcessor: updateprocessors.NewBGPNodeUpdateProcessor(),\r\n\t\t},\r\n\t\t{\r\n\t\t\tListInterface: model.ResourceListOptions{Kind: apiv3.KindBGPPeer},\r\n\t\t\tUpdateProcessor: updateprocessors.NewBGPPeerUpdateProcessor(),\r\n\t\t},\r\n\t}\r\n\t// When this syncer is used (via confd) in calico/node, it needs to know the affinity blocks\r\n\t// for that node so that it can set up the blackhole routes; in that case,\r\n\t// node is non-empty. When this syncer is used (also via confd) in\r\n\t// calico/routereflector, it has no need for affinity block information, so we skip that\r\n\t// here; in that case, node is empty.\r\n\tif node != \"\" {\r\n\t\tresourceTypes = append(resourceTypes, watchersyncer.ResourceType{\r\n\t\t\tListInterface: model.BlockAffinityListOptions{Host: node},\r\n\t\t})\r\n\t}\r\n\r\n\treturn watchersyncer.New(\r\n\t\tclient,\r\n\t\tresourceTypes,\r\n\t\tcallbacks,\r\n\t)\r\n}", "func newReconciler(mgr manager.Manager) reconcile.Reconciler {\n\treturn &ReconcileApplicationMonitoring{\n\t\tclient: mgr.GetClient(),\n\t\tscheme: mgr.GetScheme(),\n\t\thelper: NewKubeHelper(),\n\t\textraParams: make(map[string]string),\n\t}\n}", "func NewMonitor(port int) *Monitor {\n\tmon := &Monitor{\n\t\tconnectionCountChannels: map[int]chan int{},\n\t\tconnectionCounts: map[int]int{},\n\t\taddChannel: make(chan int, 10),\n\t\tcountMutex: sync.Mutex{},\n\t\tchannelMutex: sync.Mutex{},\n\t}\n\thttp.HandleFunc(\"/\", func(w http.ResponseWriter, r *http.Request) {\n\t\tmon.countMutex.Lock()\n\t\tbytes, err := json.MarshalIndent(mon.connectionCounts, \"\", \" \")\n\t\tmon.countMutex.Unlock()\n\t\tif err != nil {\n\t\t\tw.WriteHeader(500)\n\t\t\tw.Write([]byte(fmt.Sprintf(\"ERROR OCCURED: %v\", err)))\n\t\t\treturn\n\t\t}\n\t\tw.Header().Set(\"Content-Type\", \"application/json\")\n\t\tw.Write(bytes)\n\t})\n\tgo func() {\n\t\tif err := http.ListenAndServe(fmt.Sprintf(\":%v\", port), nil); err != nil {\n\t\t\tfmt.Printf(\"Failed to Start Monitor Service: %v\", err)\n\t\t\tos.Exit(5)\n\t\t}\n\t}()\n\tgo mon.addSubRoutine()\n\treturn mon\n}", "func newReconciler(mgr manager.Manager) reconcile.Reconciler {\n\treturn &ReconcileDfJob{client: mgr.GetClient(), scheme: mgr.GetScheme()}\n}", "func newReconciler(mgr manager.Manager) *ReconcileChaosBlade {\n\tcbClient := mgr.GetClient().(*channel.Client)\n\treturn &ReconcileChaosBlade{\n\t\tclient: cbClient,\n\t\tscheme: mgr.GetScheme(),\n\t\tExecutor: exec.NewDispatcherExecutor(cbClient),\n\t}\n}", "func NewMonitor(api model.API) {\n\tjob := interval.NewJob(requestGET(api), int(api.IntervalTime))\n\tapiJobMap[api.ID] = job\n}", "func newReconciler(mgr manager.Manager) reconcile.Reconciler {\n\treturn &ReconcileInfluxdb{client: mgr.GetClient(), scheme: mgr.GetScheme()}\n}", "func init() {\n\tmonitors.Register(monitorType, func() interface{} {\n\t\treturn &Monitor{\n\t\t\t*collectd.NewMonitorCore(CollectdTemplate),\n\t\t}\n\t}, &Config{})\n}", "func newLightFetcher(h *clientHandler) *lightFetcher {\n\tf := &lightFetcher{\n\t\thandler: h,\n\t\tchain: h.backend.blockchain,\n\t\tpeers: make(map[*peer]*fetcherPeerInfo),\n\t\tdeliverChn: make(chan fetchResponse, 100),\n\t\trequested: make(map[uint64]fetchRequest),\n\t\ttimeoutChn: make(chan uint64),\n\t\trequestTrigger: make(chan struct{}, 1),\n\t\tsyncDone: make(chan *peer),\n\t\tcloseCh: make(chan struct{}),\n\t\tmaxConfirmedTd: big.NewInt(0),\n\t}\n\th.backend.peers.notify(f)\n\n\tf.wg.Add(1)\n\tgo f.syncLoop()\n\treturn f\n}", "func newReconciler(mgr manager.Manager) reconcile.Reconciler {\n\treturn &ReconcileBareMetalAsset{client: mgr.GetClient(), scheme: mgr.GetScheme()}\n}", "func New(workernum int) *disp {\n\treturn &disp{\n\t\t//Pipelines: make([]*worker2.Pipeline, pipelinenum),\n\t\tPipelineChan: make(worker2.PipelineChannel),\n\t\tPipelineQueue: make(worker2.PipelineQueue),\n\t\tWorkers: make([]*worker2.Worker, workernum),\n\t\tJobChan: make(worker2.JobChannel),\n\t\tQueue: make(worker2.JobQueue),\n\t}\n}", "func newReconciler(mgr manager.Manager) reconcile.Reconciler {\n\n\tserverVersion, err := getServerVersion()\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\tlog.Info(fmt.Sprintf(\"Kubernetes Version: %s\", serverVersion))\n\n\treturn &ReconcileIBMBlockCSI{\n\t\tclient: mgr.GetClient(),\n\t\tscheme: mgr.GetScheme(),\n\t\trecorder: mgr.GetEventRecorderFor(\"controller_ibmblockcsi\"),\n\t\tserverVersion: serverVersion,\n\t}\n}", "func NewMonitor(targets []string) *Monitor {\n\tbroker := NewBroker()\n\tgo broker.Start()\n\t// Create observers\n\tobservers := make([]Observer, len(targets))\n\tfor i, target := range targets {\n\t\to, oErr := NewPingObserver(target, broker)\n\t\tif oErr != nil {\n\t\t\tlog.Printf(\"Ignoring bad target: %s\", target)\n\t\t\tcontinue\n\t\t}\n\t\tgo o.Start()\n\t\tobservers[i] = o\n\t}\n\treturn &Monitor{\n\t\tBroker: *broker,\n\t\tTargets: targets,\n\t\tObservers: observers,\n\t}\n}", "func newReconciler(mgr manager.Manager) reconcile.Reconciler {\n\teventBroadcaster := record.NewBroadcaster()\n\teventBroadcaster.StartLogging(klog.Infof)\n\tclient, err :=clientset.NewForConfig(mgr.GetConfig())\n\tif err != nil {\n\t\tklog.Errorln(err)\n\t}\n\tkclient, err := kubeclient.NewForConfig(mgr.GetConfig())\n\tif err != nil {\n\t\tklog.Errorln(err)\n\t}\n\tdc := &ReconcileDeployment{\n\t\tclient: mgr.GetClient(),\n\t\tscheme: mgr.GetScheme(),\n\t\tKetiClient: client,\n\t\tKubeClient: kclient,\n\t\tqueue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), \"deployment\"),\n\t}\n\tdc.rsControl = util.RealRSControl{\n\t\tKubeClient: kclient,\n\t}\n\tdc.syncHandler = dc.Reconcile\n\tdc.enqueueDeployment = dc.enqueue\n\n\treturn dc\n}", "func New(smName, namespace string, labels map[string]string) *Builder {\n\treturn &Builder{sm: newServiceMonitor(smName, namespace, labels)}\n}", "func New(housekeepingInterval time.Duration, config *InfluxConfig) (Manager, error) {\n\n\t// Initialize influxdb\n\thostname, err := os.Hostname() // Agent's host name\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tinfluxdbStorage, err := influxdb.New(hostname,\n\t\tconfig.Table,\n\t\tconfig.Database,\n\t\tconfig.Username,\n\t\tconfig.Password,\n\t\tconfig.Host,\n\t\tconfig.BufferDuration,\n\t\tconfig.FilterPrefix)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\t//log.Printf(\"[Info] Connected to influxdb on: %q\", config.Host)\n\n\tsysfs, err := sysfs.NewRealSysFs()\n\tif err != nil {\n\t\tlog.Printf(\"[Error] Failed to create a system interface: %s\", err)\n\t\treturn nil, err\n\t}\n\t//log.Printf(\"[Info] Created a system interface)\n\n\t// Detect the container we are running on.\n\tselfContainer, err := cgroups.GetThisCgroupDir(\"cpu\")\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\t//log.Printf(\"[Info] Running in container: %q\", selfContainer)\n\n\tdockerInfo, err := docker.DockerInfo()\n\tif err != nil {\n\t\tlog.Printf(\"[Error] Unable to connect to Docker: %v\", err)\n\t}\n\n\tcontext := fs.Context{DockerRoot: docker.RootDir(), DockerInfo: dockerInfo}\n\tfsInfo, err := fs.NewFsInfo(context)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// If started with host's rootfs mounted, assume that its running\n\t// in its own namespaces.\n\tinHostNamespace := false\n\tif _, err := os.Stat(\"/rootfs/proc\"); os.IsNotExist(err) {\n\t\tinHostNamespace = true\n\t}\n\n\tnewManager := &manager{\n\t\tcontainers: make(map[namespacedContainerName]*containerData),\n\t\tbackendStorage: influxdbStorage,\n\t\tquitChannels: make([]chan error, 0, 2),\n\t\tfsInfo: fsInfo,\n\t\tselfContainer: selfContainer,\n\t\tinHostNamespace: inHostNamespace,\n\t\tstartupTime: time.Now(),\n\t\thousekeepingInterval: housekeepingInterval,\n\t}\n\n\tmachineInfo, err := getMachineInfo(sysfs, fsInfo)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tnewManager.machineInfo = *machineInfo\n\t//log.Printf(\"[Info] Machine: %+v\", newManager.machineInfo)\n\n\tversionInfo, err := getVersionInfo()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tnewManager.versionInfo = *versionInfo\n\t//log.Printf(\"[Info] Version: %+v\", newManager.versionInfo)\n\n\tnewManager.eventHandler = events.NewEventManager(events.DefaultStoragePolicy())\n\treturn newManager, nil\n}", "func New(b *beat.Beat, cfg *common.Config) (beat.Beater, error) {\n\tc := config.DefaultConfig\n\tif err := cfg.Unpack(&c); err != nil {\n\t\treturn nil, fmt.Errorf(\"Error reading config file: %v\", err)\n\t}\n\n\tbt := &Zmqbeat{\n\t\tdone: make(chan struct{}),\n\t\tconfig: c,\n\t\tclosing: false,\n\t\treceivers: make([]*zmq.Socket, len(c.Pull)),\n\t}\n\treturn bt, nil\n}", "func newReconciler(mgr manager.Manager) reconcile.Reconciler {\n\treturn &ReconcileDeploymentConfig{Client: mgr.GetClient(), scheme: mgr.GetScheme()}\n}", "func (r *DatadogMonitorReconciler) SetupWithManager(mgr ctrl.Manager) error {\n\tinternal, err := datadogmonitor.NewReconciler(r.Client, r.DDClient, r.VersionInfo, r.Scheme, r.Log, r.Recorder)\n\tif err != nil {\n\t\treturn err\n\t}\n\tr.internal = internal\n\n\tbuilder := ctrl.NewControllerManagedBy(mgr).\n\t\tFor(&datadoghqv1alpha1.DatadogMonitor{})\n\n\terr = builder.Complete(r)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}", "func newReconciler(mgr manager.Manager) reconcile.Reconciler {\n\tapiConfig := swagger.NewConfiguration()\n\t//TODO Make it configurable\n\tapiConfig.BasePath = \"http://localhost:5000\"\n\n\treturn &ReconcileNotebookJob{\n\t\tClient: mgr.GetClient(),\n\t\tscheme: mgr.GetScheme(),\n\t\trecorder: mgr.GetRecorder(\"notebookjob-controller\"),\n\t\tapiClient: swagger.NewAPIClient(apiConfig),\n\t}\n}", "func newReconciler(mgr manager.Manager) reconcile.Reconciler {\n\treturn &ReconcileParameterStore{client: mgr.GetClient(), scheme: mgr.GetScheme()}\n}", "func NewWatcher(cfg *rest.Config) (*NetWatcher,error) {\n netWatcher := &NetWatcher{\n Factories: make(map[string]danminformers.SharedInformerFactory),\n Clients: make(map[string]danmclientset.Interface),\n Controllers: make(map[string]cache.Controller),\n }\n //this is how we test if the specific API is used within the cluster, or not\n //we can only create an Informer for an existing API, otherwise we get errors\n dnetClient, err := danmclientset.NewForConfig(cfg)\n if err != nil {\n return nil, err\n }\n _, err = dnetClient.DanmV1().DanmNets(\"\").List(meta_v1.ListOptions{})\n if err == nil {\n netWatcher.createDnetInformer(dnetClient)\n }\n tnetClient, err := danmclientset.NewForConfig(cfg)\n if err != nil {\n return nil, err\n }\n _, err = tnetClient.DanmV1().TenantNetworks(\"\").List(meta_v1.ListOptions{})\n if err == nil {\n netWatcher.createTnetInformer(tnetClient)\n }\n cnetClient, err := danmclientset.NewForConfig(cfg)\n if err != nil {\n return nil, err\n }\n _, err = cnetClient.DanmV1().ClusterNetworks().List(meta_v1.ListOptions{})\n if err == nil {\n netWatcher.createCnetInformer(cnetClient)\n }\n log.Println(\"Number of watcher's started for recognized APIs:\" + strconv.Itoa(len(netWatcher.Controllers)))\n if len(netWatcher.Controllers) == 0 {\n return nil, errors.New(\"no network management APIs are installed in the cluster, netwatcher cannot start!\")\n }\n return netWatcher, nil\n}", "func (ext *Extender) CreateMonitor() cache.Controller {\n\tlw := cache.NewListWatchFromClient(\n\t\text.client.Core().RESTClient(), \"pods\", meta_v1.NamespaceAll,\n\t\tfields.ParseSelectorOrDie(\"spec.nodeName!=\"+\"\"),\n\t)\n\treturn ext.createMonitorFromSource(lw)\n}", "func NewWatcher(factory informers.SharedInformerFactory, stopper chan struct{}) *Watcher {\n\tpodInformer := factory.Core().V1().Pods()\n\tpvcInformer := factory.Core().V1().PersistentVolumeClaims()\n\tnsInformer := factory.Core().V1().Namespaces()\n\n\tpromMissingBackups := prometheus.NewGaugeVec(prometheus.GaugeOpts{\n\t\tName: \"backupmonitor_missing\",\n\t\tHelp: \"Unconfigured PXC Backups\",\n\t}, []string{\n\t\t\"namespace\",\n\t\t\"pvc_name\",\n\t})\n\n\treturn &Watcher{\n\t\tfactory: factory,\n\t\tpodInformer: podInformer,\n\t\tpvcInformer: pvcInformer,\n\t\tnsInformer: nsInformer,\n\t\tpromMissingBackups: promMissingBackups,\n\t}\n}", "func NewMonitorSubscriber(address string, subTypes []string, options *access.NotificationSubscriptionOptions) (*MonitorSubscriber, error) {\n\tnotificationChannel := make(chan string, notificationBuffer)\n\tid, err := uuid.NewV4()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn &MonitorSubscriber{\n\t\tid: id.String(),\n\t\tnotificationChannel: notificationChannel,\n\t\taddr: address,\n\t\tsinceTime: time.Now(),\n\t\tacceptedTypes: subTypes,\n\t\tsubscriberOptions: options,\n\t}, nil\n}", "func NewMockMeshWorkloadEventWatcher(ctrl *gomock.Controller) *MockMeshWorkloadEventWatcher {\n\tmock := &MockMeshWorkloadEventWatcher{ctrl: ctrl}\n\tmock.recorder = &MockMeshWorkloadEventWatcherMockRecorder{mock}\n\treturn mock\n}", "func newReconciler(mgr manager.Manager) reconcile.Reconciler {\n\treturn &ReconcileTektonInstallation{client: mgr.GetClient(), scheme: mgr.GetScheme()}\n}", "func New(reap, dryRun bool, namespace, hostIP string) *Monitor {\n\tm := &Monitor{\n\t\tc: make(chan error),\n\t\tdryRun: dryRun,\n\t\thostIP: hostIP,\n\t\tnamespace: namespace,\n\t\treap: reap,\n\t}\n\treturn m\n}", "func NewBenchmarkMonitor(listenAddr string) *Server {\n\ts := &Server{}\n\tif listenAddr == \"\" {\n\t\treturn s\n\t}\n\n\ts.ctx, s.cancel = context.WithCancel(context.Background())\n\tmux := http.NewServeMux()\n\tmux.HandleFunc(\"/v1/stop\", s.handleStop)\n\tmux.HandleFunc(\"/v1/status\", s.handleStatus)\n\tmux.HandleFunc(\"/v1/aggregated\", s.handleAggregated)\n\tmux.HandleFunc(\"/v1/operations/json\", s.handleDownloadJSON)\n\tmux.HandleFunc(\"/v1/operations\", s.handleDownloadZst)\n\n\ts.server = &http.Server{\n\t\tAddr: listenAddr,\n\t\tHandler: mux,\n\t\tTLSConfig: nil,\n\t\tReadTimeout: time.Minute,\n\t\tReadHeaderTimeout: time.Second,\n\t\tWriteTimeout: time.Minute,\n\t\tIdleTimeout: time.Minute,\n\t\tMaxHeaderBytes: 0,\n\t\tTLSNextProto: nil,\n\t\tConnState: nil,\n\t\tErrorLog: nil,\n\t\tBaseContext: nil,\n\t\tConnContext: nil,\n\t}\n\tgo func() {\n\t\tdefer s.cancel()\n\t\tconsole.Infoln(\"opening server on\", listenAddr)\n\t\ts.Errorln(s.server.ListenAndServe())\n\t}()\n\treturn s\n}", "func (ruleset *DnsForwardingRuleset) InitializeSpec(status genruntime.ConvertibleStatus) error {\n\tif s, ok := status.(*DnsForwardingRuleset_STATUS); ok {\n\t\treturn ruleset.Spec.Initialize_From_DnsForwardingRuleset_STATUS(s)\n\t}\n\n\treturn fmt.Errorf(\"expected Status of type DnsForwardingRuleset_STATUS but received %T instead\", status)\n}", "func NewBalanceMonitor(chainID string, cfg Config, lggr logger.Logger, ks Keystore, newReader func() (solanaClient.Reader, error)) services.ServiceCtx {\n\treturn newBalanceMonitor(chainID, cfg, lggr, ks, newReader)\n}", "func NewWebhookSpec(spec *job.WebhookSpec) *WebhookSpec {\n\treturn &WebhookSpec{\n\t\tCreatedAt: spec.CreatedAt,\n\t\tUpdatedAt: spec.UpdatedAt,\n\t}\n}", "func newReconciler(mgr manager.Manager) reconcile.Reconciler {\n\treturn &ReconcileCopybird{client: mgr.GetClient(), scheme: mgr.GetScheme()}\n}", "func sniffDbus(rf dbusReaderFunc) <-chan *jn.Notification {\n\tout := make(chan *jn.Notification)\n\n\tgo func() {\n\t\tr, err := rf()\n\t\tif err != nil {\n\t\t\tlog.Panic(err)\n\t\t}\n\t\tdefer r.Close()\n\n\t\ts := bufio.NewScanner(r)\n\t\ts.Split(jn.ScanNotifications)\n\t\tfor s.Scan() {\n\t\t\tif *verbose {\n\t\t\t\tlog.Printf(\"D-Bus record: %v\", s.Text())\n\t\t\t}\n\n\t\t\tn, err := jn.NewNotificationFromMonitorString(s.Text(), timeFormatter)\n\t\t\tif err != nil {\n\t\t\t\tlog.Printf(\"Error: NewNotificationFromMonitorString: %v\", err)\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tif *verbose {\n\t\t\t\tlog.Printf(\"New Notification: %v\", n)\n\t\t\t}\n\n\t\t\tif n.IsEmpty() {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tout <- n\n\t\t}\n\t\tif err := s.Err(); err != nil {\n\t\t\tlog.Panic(err)\n\t\t}\n\n\t\tclose(out)\n\t}()\n\n\treturn out\n}", "func InitFromSpec(spec string) string {\n\terr := Global.ActivateSpec(spec)\n\tif err != nil {\n\t\tfmt.Fprintf(os.Stderr, \"failed to activate logging spec: %s\", err)\n\t}\n\treturn DefaultLevel()\n}", "func newReconciler(mgr manager.Manager) (reconcile.Reconciler, error) {\n\tsv, err := version.Server()\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"get version: %v\", err)\n\t}\n\n\treturn &ReconcilePerconaXtraDBClusterBackup{\n\t\tclient: mgr.GetClient(),\n\t\tscheme: mgr.GetScheme(),\n\t\tserverVersion: sv,\n\t}, nil\n}", "func NewMonitorInheritWithLimit(name string, limit int64, m *BytesMonitor) *BytesMonitor {\n\tm.mu.Lock()\n\tdefer m.mu.Unlock()\n\treturn NewMonitorWithLimit(\n\t\tname,\n\t\tm.resource,\n\t\tlimit,\n\t\tm.mu.curBytesCount,\n\t\tm.mu.maxBytesHist,\n\t\tm.poolAllocationSize,\n\t\tm.noteworthyUsageBytes,\n\t)\n}", "func newReconciler(mgr manager.Manager) reconcile.Reconciler {\n\treturn &ReconcileConfigMap{client: mgr.GetClient(), scheme: mgr.GetScheme()}\n}", "func newReconciler(mgr manager.Manager) reconcile.Reconciler {\n\tr := &Reconciler{\n\t\tClient: mgr.GetClient(),\n\t\tscheme: mgr.GetScheme(),\n\t\tkubeclient: kubernetes.NewForConfigOrDie(mgr.GetConfig()),\n\t\trecorder: mgr.GetRecorder(controllerName),\n\t}\n\tr.validate = r._validate\n\treturn r\n}", "func New(b *beat.Beat, cfg *common.Config) (beat.Beater, error) {\n\tc := config.DefaultConfig\n\tif err := cfg.Unpack(&c); err != nil {\n\t\treturn nil, fmt.Errorf(\"Error reading config file: %v\", err)\n\t}\n\n\tbt := &Zfsbeat{\n\t\tdone: make(chan struct{}),\n\t\tconfig: c,\n\t}\n\treturn bt, nil\n}", "func newReconciler(mgr manager.Manager) reconcile.Reconciler {\n\treturn &ReconcileKubemanager{Client: mgr.GetClient(), Scheme: mgr.GetScheme()}\n}", "func NewSpec(yamlConfig string) (*Spec, error) {\n\ts := &Spec{\n\t\tyamlConfig: yamlConfig,\n\t}\n\n\tmeta := &MetaSpec{}\n\terr := yaml.Unmarshal([]byte(yamlConfig), meta)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"unmarshal failed: %v\", err)\n\t}\n\tvr := v.Validate(meta, []byte(yamlConfig))\n\tif !vr.Valid() {\n\t\treturn nil, fmt.Errorf(\"validate metadata failed: \\n%s\", vr)\n\t}\n\n\trootObject, exists := objectRegistry[meta.Kind]\n\tif !exists {\n\t\treturn nil, fmt.Errorf(\"kind %s not found\", meta.Kind)\n\t}\n\n\ts.meta, s.objectSpec = meta, rootObject.DefaultSpec()\n\n\terr = yaml.Unmarshal([]byte(yamlConfig), s.objectSpec)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"unmarshal failed: %v\", err)\n\t}\n\tvr = v.Validate(s.objectSpec, []byte(yamlConfig))\n\tif !vr.Valid() {\n\t\treturn nil, fmt.Errorf(\"validate spec failed: \\n%s\", vr)\n\t}\n\n\treturn s, nil\n}", "func newReconciler(mgr manager.Manager) reconcile.Reconciler {\n\treturn &ReconcileHostOperatorConfig{client: mgr.GetClient()}\n}", "func NewSpec(api *gmail.Service, db *db.DB) (*Spec, error) {\n\tlog.SetLevel(log.DebugLevel)\n\tlog.Info(\"starting new spec\")\n\n\tbytes, err := ioutil.ReadFile(\"./spec.yaml\")\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to read file: %v\", err)\n\t}\n\n\tspec := &Spec{\n\t\tapi: api,\n\t\tdb: db,\n\t}\n\n\terr = yaml.Unmarshal(bytes, spec)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to unmarshal: %v\", err)\n\t}\n\n\treturn spec, nil\n}", "func newReconciler(mgr manager.Manager) reconcile.Reconciler {\n\tr := &Reconciler{\n\t\tClient: mgr.GetClient(),\n\t\tscheme: mgr.GetScheme(),\n\t\tkubeclient: kubernetes.NewForConfigOrDie(mgr.GetConfig()),\n\t\trecorder: mgr.GetRecorder(controllerName),\n\t}\n\tr.provision = r._provision\n\tr.bind = r._bind\n\tr.delete = r._delete\n\treturn r\n}", "func mustMakeStatusWatcher(ctx context.Context, vcs vcsinfo.VCS, expStore expectations.Store, expChangeHandler expectations.ChangeEventRegisterer, tileSource tilesource.TileSource) *status.StatusWatcher {\n\tswc := status.StatusWatcherConfig{\n\t\tExpChangeListener: expChangeHandler,\n\t\tExpectationsStore: expStore,\n\t\tTileSource: tileSource,\n\t\tVCS: vcs,\n\t}\n\n\tstatusWatcher, err := status.New(ctx, swc)\n\tif err != nil {\n\t\tsklog.Fatalf(\"Failed to initialize status watcher: %s\", err)\n\t}\n\tsklog.Infof(\"statusWatcher created\")\n\n\treturn statusWatcher\n}", "func newReconciler(mgr manager.Manager) reconcile.Reconciler {\n\treturn &ReconcileMinecraft{client: mgr.GetClient(), scheme: mgr.GetScheme()}\n}", "func newReconciler(mgr manager.Manager) reconcile.Reconciler {\n\treturn &ReconcileMinecraft{client: mgr.GetClient(), scheme: mgr.GetScheme()}\n}", "func NewMonitor(conf *config.Config, neomq, ethmq gomq.Consumer) (*Monitor, error) {\n\n\ttokenswapdb, err := createEngine(conf, \"tokenswapdb\")\n\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"create tokenswap db engine error %s\", err)\n\t}\n\n\tethdb, err := createEngine(conf, \"ethdb\")\n\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"create eth db engine error %s\", err)\n\t}\n\n\tneodb, err := createEngine(conf, \"neodb\")\n\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"create neo db engine error %s\", err)\n\t}\n\n\tethKey, err := readETHKeyStore(conf, \"eth.keystore\", conf.GetString(\"eth.keystorepassword\", \"\"))\n\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"create neo db engine error %s\", err)\n\t}\n\n\tneoKey, err := readNEOKeyStore(conf, \"neo.keystore\", conf.GetString(\"neo.keystorepassword\", \"\"))\n\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"create neo db engine error %s\", err)\n\t}\n\n\tneo2ethtax, err := strconv.ParseFloat(conf.GetString(\"tokenswap.neo2ethtax\", \"0.001\"), 64)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"ParseFloat neo2ethtax error %s\", err)\n\t}\n\n\teth2neotax, err := strconv.ParseFloat(conf.GetString(\"tokenswap.eth2neotax\", \"0.001\"), 64)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"ParseFloat eth2neotax error %s\", err)\n\t}\n\n\treturn &Monitor{\n\t\tLogger: slf4go.Get(\"tokenswap-server\"),\n\t\tneomq: neomq,\n\t\tethmq: ethmq,\n\t\ttokenswapdb: tokenswapdb,\n\t\tethdb: ethdb,\n\t\tneodb: neodb,\n\t\ttncOfETH: conf.GetString(\"eth.tnc\", \"\"),\n\t\ttncOfNEO: conf.GetString(\"neo.tnc\", \"\"),\n\t\tETHKeyAddress: strings.ToLower(ethKey.Address),\n\t\tNEOKeyAddress: neoKey.Address,\n\t\tethClient: ethrpc.NewClient(conf.GetString(\"eth.node\", \"\")),\n\t\tneoClient: neorpc.NewClient(conf.GetString(\"neo.node\", \"\")),\n\t\tneo2ethtax: neo2ethtax,\n\t\teth2neotax: eth2neotax,\n\t\tconfig: conf,\n\t\tethConfirmCount: conf.GetInt64(\"tokenswap.ethConfirmCount\", 12),\n\t\tethGetBlockInterval: conf.GetInt64(\"tokenswap.ethGetBlockInterval\", 20),\n\t\tneoConfirmCount: conf.GetInt64(\"tokenswap.neoConfirmCount\", 12),\n\t\tneoGetBlockInterval: conf.GetInt64(\"tokenswap.neoGetBlockInterval\", 10),\n\t}, nil\n}", "func NewHostMonitor(duration time.Duration) (pkg.HostMonitor, error) {\n\tif duration == 0 {\n\t\tduration = 2 * time.Second\n\t}\n\twatcher, err := fsnotify.NewWatcher()\n\tif err != nil {\n\t\treturn nil, errors.Wrap(err, \"failed to initialize fs watcher\")\n\t}\n\n\t// the file can not exist if the system was booted from overlay\n\tif _, err := os.Stat(upgrade.FlistInfoFile); err == nil {\n\t\tif err := watcher.Add(upgrade.FlistInfoFile); err != nil {\n\t\t\treturn nil, errors.Wrapf(err, \"failed to watch '%s'\", upgrade.FlistInfoFile)\n\t\t}\n\t}\n\n\treturn &hostMonitor{\n\t\tduration: duration,\n\t}, nil\n}", "func newMock(deps mockDependencies, t testing.TB) (Component, error) {\n\tbackupConfig := config.NewConfig(\"\", \"\", strings.NewReplacer())\n\tbackupConfig.CopyConfig(config.Datadog)\n\n\tconfig.Datadog.CopyConfig(config.NewConfig(\"mock\", \"XXXX\", strings.NewReplacer()))\n\n\tconfig.SetFeatures(t, deps.Params.Features...)\n\n\t// call InitConfig to set defaults.\n\tconfig.InitConfig(config.Datadog)\n\tc := &cfg{\n\t\tConfig: config.Datadog,\n\t}\n\n\tif !deps.Params.SetupConfig {\n\n\t\tif deps.Params.ConfFilePath != \"\" {\n\t\t\tconfig.Datadog.SetConfigType(\"yaml\")\n\t\t\terr := config.Datadog.ReadConfig(strings.NewReader(deps.Params.ConfFilePath))\n\t\t\tif err != nil {\n\t\t\t\t// The YAML was invalid, fail initialization of the mock config.\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t}\n\t} else {\n\t\twarnings, _ := setupConfig(deps)\n\t\tc.warnings = warnings\n\t}\n\n\t// Overrides are explicit and will take precedence over any other\n\t// setting\n\tfor k, v := range deps.Params.Overrides {\n\t\tconfig.Datadog.Set(k, v)\n\t}\n\n\t// swap the existing config back at the end of the test.\n\tt.Cleanup(func() { config.Datadog.CopyConfig(backupConfig) })\n\n\treturn c, nil\n}", "func newContainer(rspec *spec.Spec, lockDir string) (*Container, error) {\n\tif rspec == nil {\n\t\treturn nil, errors.Wrapf(ErrInvalidArg, \"must provide a valid runtime spec to create container\")\n\t}\n\n\tctr := new(Container)\n\tctr.config = new(ContainerConfig)\n\tctr.state = new(containerRuntimeInfo)\n\n\tctr.config.ID = stringid.GenerateNonCryptoID()\n\tctr.config.Name = namesgenerator.GetRandomName(0)\n\n\tctr.config.Spec = new(spec.Spec)\n\tdeepcopier.Copy(rspec).To(ctr.config.Spec)\n\tctr.config.CreatedTime = time.Now()\n\n\t// Path our lock file will reside at\n\tlockPath := filepath.Join(lockDir, ctr.config.ID)\n\t// Grab a lockfile at the given path\n\tlock, err := storage.GetLockfile(lockPath)\n\tif err != nil {\n\t\treturn nil, errors.Wrapf(err, \"error creating lockfile for new container\")\n\t}\n\tctr.lock = lock\n\n\treturn ctr, nil\n}", "func NewBootstrapSpec(spec *job.BootstrapSpec) *BootstrapSpec {\n\treturn &BootstrapSpec{\n\t\tContractID: spec.ContractID,\n\t\tRelay: spec.Relay,\n\t\tRelayConfig: spec.RelayConfig,\n\t\tBlockchainTimeout: spec.BlockchainTimeout,\n\t\tContractConfigTrackerPollInterval: spec.ContractConfigTrackerPollInterval,\n\t\tContractConfigConfirmations: spec.ContractConfigConfirmations,\n\t\tCreatedAt: spec.CreatedAt,\n\t\tUpdatedAt: spec.UpdatedAt,\n\t}\n}", "func (server *FlexibleServer_Spec) ConvertSpecFrom(source genruntime.ConvertibleSpec) error {\n\tif source == server {\n\t\treturn errors.New(\"attempted conversion between unrelated implementations of github.com/Azure/azure-service-operator/v2/pkg/genruntime/ConvertibleSpec\")\n\t}\n\n\treturn source.ConvertSpecTo(server)\n}", "func newAlfredWatcher() *alfredWatcher {\n w, _ := inotify.NewWatcher()\n aw := &alfredWatcher{\n watcher: w,\n list: make(map[string]uint32),\n }\n return aw\n}", "func newReconciler(mgr manager.Manager) reconcile.Reconciler {\n\treturn &ReconcileHiveConfig{Client: mgr.GetClient(), scheme: mgr.GetScheme(), restConfig: mgr.GetConfig()}\n}", "func New(b *beat.Beat, cfg *common.Config) (beat.Beater, error) {\n\tc := defaultConfig\n\tif err := cfg.Unpack(&c); err != nil {\n\t\treturn nil, errors.Wrap(err, \"failed to unpack config\")\n\t}\n\n\tbt := &Flowbeat{\n\t\tdone: make(chan struct{}),\n\t\tconfig: c,\n\t}\n\treturn bt, nil\n}", "func NewKeeperSpec(spec *job.KeeperSpec) *KeeperSpec {\n\treturn &KeeperSpec{\n\t\tContractAddress: spec.ContractAddress,\n\t\tFromAddress: spec.FromAddress,\n\t\tCreatedAt: spec.CreatedAt,\n\t\tUpdatedAt: spec.UpdatedAt,\n\t}\n}", "func newReconciler(mgr manager.Manager) reconcile.Reconciler {\n\treturn &ReconcileContainerJFR{scheme: mgr.GetScheme(), client: mgr.GetClient(),\n\t\tReconcilerTLS: common.NewReconcilerTLS(&common.ReconcilerTLSConfig{\n\t\t\tClient: mgr.GetClient(),\n\t\t}),\n\t}\n}", "func newReconciler(mgr manager.Manager) reconcile.Reconciler {\n\treturn &ReconcileCollectd{client: mgr.GetClient(), scheme: mgr.GetScheme()}\n}", "func NewReconciler(base *reconciler.Base, ls listers) (*Reconciler, error) {\n\tvar env envConfig\n\tif err := envconfig.Process(\"BROKER_CELL\", &env); err != nil {\n\t\treturn nil, err\n\t}\n\tsvcRec := &reconcilerutils.ServiceReconciler{\n\t\tKubeClient: base.KubeClientSet,\n\t\tServiceLister: ls.serviceLister,\n\t\tEndpointsLister: ls.endpointsLister,\n\t\tRecorder: base.Recorder,\n\t}\n\tdeploymentRec := &reconcilerutils.DeploymentReconciler{\n\t\tKubeClient: base.KubeClientSet,\n\t\tLister: ls.deploymentLister,\n\t\tRecorder: base.Recorder,\n\t}\n\tcmRec := &reconcilerutils.ConfigMapReconciler{\n\t\tKubeClient: base.KubeClientSet,\n\t\tLister: ls.configMapLister,\n\t\tRecorder: base.Recorder,\n\t}\n\tr := &Reconciler{\n\t\tBase: base,\n\t\tenv: env,\n\t\tlisters: ls,\n\t\tsvcRec: svcRec,\n\t\tdeploymentRec: deploymentRec,\n\t\tcmRec: cmRec,\n\t}\n\treturn r, nil\n}" ]
[ "0.7735614", "0.52191037", "0.51470137", "0.5015607", "0.49694467", "0.4964988", "0.49210796", "0.49060473", "0.49040398", "0.4856898", "0.48292074", "0.48211414", "0.4782509", "0.4765241", "0.47645527", "0.4729567", "0.4721767", "0.47063202", "0.46926945", "0.46776727", "0.464708", "0.46340433", "0.4632203", "0.46039003", "0.4559477", "0.45547262", "0.45454243", "0.45223898", "0.4484047", "0.44808403", "0.44668475", "0.44651774", "0.44626075", "0.44549286", "0.44292673", "0.44281492", "0.44181907", "0.4409414", "0.4394803", "0.43865272", "0.4376843", "0.4372862", "0.43665934", "0.43652666", "0.43623248", "0.43514323", "0.43376148", "0.43336892", "0.43319193", "0.43243194", "0.4319261", "0.43098828", "0.42977473", "0.42891797", "0.42872652", "0.4272591", "0.4269833", "0.42646196", "0.42574805", "0.42567268", "0.42534867", "0.42509925", "0.42482176", "0.42468685", "0.42437", "0.4230989", "0.42256904", "0.42181444", "0.42151898", "0.42119688", "0.42067963", "0.4205104", "0.41961676", "0.4191356", "0.41879714", "0.41841856", "0.41839424", "0.4183644", "0.41813397", "0.4177893", "0.41747832", "0.41732055", "0.41719836", "0.41630408", "0.41626534", "0.41622436", "0.41622436", "0.4156818", "0.41538116", "0.41496438", "0.41404763", "0.41376904", "0.41372246", "0.41372097", "0.41308138", "0.41288134", "0.41243744", "0.4117465", "0.41027883", "0.41005117" ]
0.8116492
0
NewOffChainReportingSpec initializes a new OffChainReportingSpec from a job.OCROracleSpec
func NewOffChainReportingSpec(spec *job.OCROracleSpec) *OffChainReportingSpec { return &OffChainReportingSpec{ ContractAddress: spec.ContractAddress, P2PBootstrapPeers: spec.P2PBootstrapPeers, P2PV2Bootstrappers: spec.P2PV2Bootstrappers, IsBootstrapPeer: spec.IsBootstrapPeer, EncryptedOCRKeyBundleID: spec.EncryptedOCRKeyBundleID, TransmitterAddress: spec.TransmitterAddress, ObservationTimeout: spec.ObservationTimeout, ObservationTimeoutEnv: spec.ObservationTimeoutEnv, BlockchainTimeout: spec.BlockchainTimeout, BlockchainTimeoutEnv: spec.BlockchainTimeoutEnv, ContractConfigTrackerSubscribeInterval: spec.ContractConfigTrackerSubscribeInterval, ContractConfigTrackerSubscribeIntervalEnv: spec.ContractConfigTrackerSubscribeIntervalEnv, ContractConfigTrackerPollInterval: spec.ContractConfigTrackerPollInterval, ContractConfigTrackerPollIntervalEnv: spec.ContractConfigTrackerPollIntervalEnv, ContractConfigConfirmations: spec.ContractConfigConfirmations, ContractConfigConfirmationsEnv: spec.ContractConfigConfirmationsEnv, CreatedAt: spec.CreatedAt, UpdatedAt: spec.UpdatedAt, EVMChainID: spec.EVMChainID, DatabaseTimeout: spec.DatabaseTimeout, DatabaseTimeoutEnv: spec.DatabaseTimeoutEnv, ObservationGracePeriod: spec.ObservationGracePeriod, ObservationGracePeriodEnv: spec.ObservationGracePeriodEnv, ContractTransmitterTransmitTimeout: spec.ContractTransmitterTransmitTimeout, ContractTransmitterTransmitTimeoutEnv: spec.ContractTransmitterTransmitTimeoutEnv, CollectTelemetry: spec.CaptureEATelemetry, } }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func NewOffChainReportingSpec(spec *job.OffchainReportingOracleSpec) *OffChainReportingSpec {\n\treturn &OffChainReportingSpec{\n\t\tContractAddress: spec.ContractAddress,\n\t\tP2PPeerID: spec.P2PPeerID,\n\t\tP2PBootstrapPeers: spec.P2PBootstrapPeers,\n\t\tIsBootstrapPeer: spec.IsBootstrapPeer,\n\t\tEncryptedOCRKeyBundleID: spec.EncryptedOCRKeyBundleID,\n\t\tTransmitterAddress: spec.TransmitterAddress,\n\t\tObservationTimeout: spec.ObservationTimeout,\n\t\tBlockchainTimeout: spec.BlockchainTimeout,\n\t\tContractConfigTrackerSubscribeInterval: spec.ContractConfigTrackerSubscribeInterval,\n\t\tContractConfigTrackerPollInterval: spec.ContractConfigTrackerPollInterval,\n\t\tContractConfigConfirmations: spec.ContractConfigConfirmations,\n\t\tCreatedAt: spec.CreatedAt,\n\t\tUpdatedAt: spec.UpdatedAt,\n\t}\n}", "func NewOffChainReporting2Spec(spec *job.OCR2OracleSpec) *OffChainReporting2Spec {\n\treturn &OffChainReporting2Spec{\n\t\tContractID: spec.ContractID,\n\t\tRelay: spec.Relay,\n\t\tRelayConfig: spec.RelayConfig,\n\t\tP2PV2Bootstrappers: spec.P2PV2Bootstrappers,\n\t\tOCRKeyBundleID: spec.OCRKeyBundleID,\n\t\tTransmitterID: spec.TransmitterID,\n\t\tBlockchainTimeout: spec.BlockchainTimeout,\n\t\tContractConfigTrackerPollInterval: spec.ContractConfigTrackerPollInterval,\n\t\tContractConfigConfirmations: spec.ContractConfigConfirmations,\n\t\tCreatedAt: spec.CreatedAt,\n\t\tUpdatedAt: spec.UpdatedAt,\n\t\tCollectTelemetry: spec.CaptureEATelemetry,\n\t}\n}", "func NewSpec(details *SpecDetails) *Spec {\n\treturn &Spec{\n\t\tDetails: details,\n\t\tServices: NewServiceList(),\n\t\tStatus: SpecWaiting,\n\t}\n}", "func newOtlpReceiver(cfg *Config, set receiver.CreateSettings) (*otlpReceiver, error) {\n\tr := &otlpReceiver{\n\t\tcfg: cfg,\n\t\tsettings: set,\n\t}\n\tif cfg.HTTP != nil {\n\t\tr.httpMux = http.NewServeMux()\n\t}\n\n\tvar err error\n\tr.obsrepGRPC, err = obsreport.NewReceiver(obsreport.ReceiverSettings{\n\t\tReceiverID: set.ID,\n\t\tTransport: \"grpc\",\n\t\tReceiverCreateSettings: set,\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tr.obsrepHTTP, err = obsreport.NewReceiver(obsreport.ReceiverSettings{\n\t\tReceiverID: set.ID,\n\t\tTransport: \"http\",\n\t\tReceiverCreateSettings: set,\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn r, nil\n}", "func NewKeeperSpec(spec *job.KeeperSpec) *KeeperSpec {\n\treturn &KeeperSpec{\n\t\tContractAddress: spec.ContractAddress,\n\t\tFromAddress: spec.FromAddress,\n\t\tCreatedAt: spec.CreatedAt,\n\t\tUpdatedAt: spec.UpdatedAt,\n\t}\n}", "func NewKeeperSpec(spec *job.KeeperSpec) *KeeperSpec {\n\treturn &KeeperSpec{\n\t\tContractAddress: spec.ContractAddress,\n\t\tFromAddress: spec.FromAddress,\n\t\tCreatedAt: spec.CreatedAt,\n\t\tUpdatedAt: spec.UpdatedAt,\n\t\tEVMChainID: spec.EVMChainID,\n\t}\n}", "func NewBootstrapSpec(spec *job.BootstrapSpec) *BootstrapSpec {\n\treturn &BootstrapSpec{\n\t\tContractID: spec.ContractID,\n\t\tRelay: spec.Relay,\n\t\tRelayConfig: spec.RelayConfig,\n\t\tBlockchainTimeout: spec.BlockchainTimeout,\n\t\tContractConfigTrackerPollInterval: spec.ContractConfigTrackerPollInterval,\n\t\tContractConfigConfirmations: spec.ContractConfigConfirmations,\n\t\tCreatedAt: spec.CreatedAt,\n\t\tUpdatedAt: spec.UpdatedAt,\n\t}\n}", "func newPeerAuthenticationWithSpec() *securityv1beta1.PeerAuthentication {\n\tpeerAuthentication := newPeerAuthentication()\n\tpeerAuthentication.Spec.PortLevelMtls = map[uint32]*securityv1beta1apis.PeerAuthentication_MutualTLS{\n\t\ttTargetPort: {\n\t\t\tMode: securityv1beta1apis.PeerAuthentication_MutualTLS_PERMISSIVE,\n\t\t},\n\t}\n\tpeerAuthentication.Spec.Selector = &istiov1beta1apis.WorkloadSelector{\n\t\tMatchLabels: map[string]string{\n\t\t\tapplicationLabelKey: tName,\n\t\t},\n\t}\n\treturn peerAuthentication\n}", "func newE2ESetup(msg *e2e.SetupReq) *colibri_mgmt.E2ESetup {\n\tallocTrail := make([]uint8, len(msg.AllocationTrail))\n\tfor i := range msg.AllocationTrail {\n\t\tallocTrail[i] = uint8(msg.AllocationTrail[i])\n\t}\n\treturn &colibri_mgmt.E2ESetup{\n\t\tBase: newE2EBase(&msg.Request),\n\t\tSegmentRsvs: newSegmentIDs(msg.SegmentRsvs),\n\t\tSegmentRsvASCount: msg.SegmentRsvASCount,\n\t\tRequestedBW: uint8(msg.RequestedBW),\n\t\tAllocationTrail: allocTrail,\n\t}\n}", "func setupDiffReport(r *Report) {\n\tr.format.output = printDiffReport\n\tr.format.changestyles = make(map[string]ChangeStyle)\n\tr.format.changestyles[\"ADD\"] = ChangeStyle{color: \"green\", message: \"has been added:\"}\n\tr.format.changestyles[\"REMOVE\"] = ChangeStyle{color: \"red\", message: \"has been removed:\"}\n\tr.format.changestyles[\"MODIFY\"] = ChangeStyle{color: \"yellow\", message: \"has changed:\"}\n}", "func NewWebhookSpec(spec *job.WebhookSpec) *WebhookSpec {\n\treturn &WebhookSpec{\n\t\tCreatedAt: spec.CreatedAt,\n\t\tUpdatedAt: spec.UpdatedAt,\n\t}\n}", "func NewCoherenceInternalSpec(cluster *CoherenceCluster, role *CoherenceRole) *CoherenceInternalSpec {\n\tout := CoherenceInternalSpec{}\n\n\tout.FullnameOverride = role.Name\n\tout.Cluster = cluster.Name\n\tout.ServiceAccountName = cluster.Spec.ServiceAccountName\n\tout.AutomountServiceAccountToken = cluster.Spec.AutomountServiceAccountToken\n\tout.ImagePullSecrets = cluster.Spec.ImagePullSecrets\n\tout.WKA = cluster.GetWkaServiceName()\n\tout.OperatorRequestTimeout = cluster.Spec.OperatorRequestTimeout\n\n\tout.CoherenceRoleSpec = CoherenceRoleSpec{}\n\trole.Spec.DeepCopyInto(&out.CoherenceRoleSpec)\n\n\treturn &out\n}", "func newReconciler(mgr manager.Manager) reconcile.Reconciler {\n\tapiConfig := swagger.NewConfiguration()\n\t//TODO Make it configurable\n\tapiConfig.BasePath = \"http://localhost:5000\"\n\n\treturn &ReconcileNotebookJob{\n\t\tClient: mgr.GetClient(),\n\t\tscheme: mgr.GetScheme(),\n\t\trecorder: mgr.GetRecorder(\"notebookjob-controller\"),\n\t\tapiClient: swagger.NewAPIClient(apiConfig),\n\t}\n}", "func newWorker(\n\tm *manager,\n\tthirdComponent *v1alpha1.ThirdComponent,\n\tendpoint v1alpha1.ThirdComponentEndpointStatus) *worker {\n\n\tw := &worker{\n\t\tstopCh: make(chan struct{}, 1), // Buffer so stop() can be non-blocking.\n\t\tprobeManager: m,\n\t\tthirdComponent: thirdComponent,\n\t\tendpoint: endpoint,\n\t}\n\n\tw.spec = thirdComponent.Spec.Probe\n\tw.resultsManager = m.readinessManager\n\tw.initialValue = results.Failure\n\n\tbasicMetricLabels := metrics.Labels{\n\t\t\"endpoint\": string(w.endpoint.Address),\n\t\t\"pod\": w.thirdComponent.Name,\n\t\t\"namespace\": w.thirdComponent.Namespace,\n\t}\n\n\tw.proberResultsSuccessfulMetricLabels = deepCopyPrometheusLabels(basicMetricLabels)\n\tw.proberResultsSuccessfulMetricLabels[\"result\"] = probeResultSuccessful\n\n\tw.proberResultsFailedMetricLabels = deepCopyPrometheusLabels(basicMetricLabels)\n\tw.proberResultsFailedMetricLabels[\"result\"] = probeResultFailed\n\n\tw.proberResultsUnknownMetricLabels = deepCopyPrometheusLabels(basicMetricLabels)\n\tw.proberResultsUnknownMetricLabels[\"result\"] = probeResultUnknown\n\n\treturn w\n}", "func NewStatusReportRequestWithoutParam() *StatusReportRequest {\n\n return &StatusReportRequest{\n JDCloudRequest: core.JDCloudRequest{\n URL: \"/regions/{regionId}/statusReport\",\n Method: \"POST\",\n Header: nil,\n Version: \"v1\",\n },\n }\n}", "func NewPrintConnector()(*PrintConnector) {\n m := &PrintConnector{\n Entity: *NewEntity(),\n }\n return m\n}", "func makePodSpec(t thanosv1beta1.Receiver) (*corev1.PodSpec, error) {\n\n\tif t.Spec.ReceivePrefix == \"\" {\n\t\tt.Spec.ReceivePrefix = receiverDir\n\t}\n\tif t.Spec.Retention == \"\" {\n\t\tt.Spec.Retention = defaultRetetion\n\t}\n\t// TODO set args to spec\n\tthanosArgs := []string{\n\t\t\"receive\",\n\t\tfmt.Sprintf(\"--tsdb.path=%s\", t.Spec.ReceivePrefix),\n\t\tfmt.Sprintf(\"--tsdb.retention=%s\", t.Spec.Retention),\n\t\tfmt.Sprintf(\"--labels=receive=\\\"%s\\\"\", t.Spec.ReceiveLables),\n\t\tfmt.Sprintf(\"--objstore.config=type: %s\\nconfig:\\n bucket: \\\"%s\\\"\", t.Spec.ObjectStorageType, t.Spec.BucketName),\n\t}\n\tif t.Spec.LogLevel != \"\" && t.Spec.LogLevel != \"info\" {\n\t\tthanosArgs = append(thanosArgs, fmt.Sprintf(\"--log.level=%s\", t.Spec.LogLevel))\n\t}\n\tenv := []corev1.EnvVar{\n\t\t{\n\t\t\tName: \"GOOGLE_APPLICATION_CREDENTIALS\",\n\t\t\tValue: secretsDir + t.Spec.SecretName + \".json\",\n\t\t},\n\t}\n\n\tports := []corev1.ContainerPort{\n\t\t{\n\t\t\tContainerPort: 10902,\n\t\t\tName: \"http\",\n\t\t},\n\t\t{\n\t\t\tContainerPort: 10901,\n\t\t\tName: \"grpc\",\n\t\t},\n\t}\n\n\tif strings.Contains(t.Name, \"receiver\") {\n\t\tports = append(ports, corev1.ContainerPort{\n\t\t\tContainerPort: 19291,\n\t\t\tName: \"receive\",\n\t\t})\n\t}\n\n\t// mount to pod\n\tvolumemounts := []corev1.VolumeMount{\n\t\t{\n\t\t\tName: \"thanos-persistent-storage\",\n\t\t\tMountPath: t.Spec.Retention,\n\t\t},\n\t\t{\n\t\t\tName: \"google-cloud-key\",\n\t\t\tMountPath: secretsDir,\n\t\t},\n\t}\n\n\tcontainers := []corev1.Container{\n\t\t{\n\t\t\tName: \"receiver\",\n\t\t\tImage: *t.Spec.Image,\n\t\t\tArgs: thanosArgs,\n\t\t\tEnv: env,\n\t\t\tPorts: ports,\n\t\t\tVolumeMounts: volumemounts,\n\t\t},\n\t}\n\n\t// Need create json from gcp iam\n\t// https://github.com/orangesys/blueprint/tree/master/prometheus-thanos\n\t// kubectl create secret generic ${SERVICE_ACCOUNT_NAME} --from-file=${SERVICE_ACCOUNT_NAME}.json=${SERVICE_ACCOUNT_NAME}.json\n\t// secret name is thanos-demo-gcs\n\t// TODO setting secret name with spec\n\tvolumes := []corev1.Volume{\n\t\t{\n\t\t\tName: \"google-cloud-key\",\n\t\t\tVolumeSource: corev1.VolumeSource{\n\t\t\t\tSecret: &corev1.SecretVolumeSource{\n\t\t\t\t\tSecretName: t.Spec.SecretName,\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n\n\treturn &corev1.PodSpec{\n\t\tTerminationGracePeriodSeconds: &gracePeriodTerm,\n\t\tContainers: containers,\n\t\tVolumes: volumes,\n\t}, nil\n}", "func NewConnectorStatusDetails()(*ConnectorStatusDetails) {\n m := &ConnectorStatusDetails{\n }\n m.backingStore = ie8677ce2c7e1b4c22e9c3827ecd078d41185424dd9eeb92b7d971ed2d49a392e.BackingStoreFactoryInstance();\n m.SetAdditionalData(make(map[string]any))\n return m\n}", "func New(opts ...Option) (oci.SpecModifier, error) {\n\tb := &builder{}\n\tfor _, opt := range opts {\n\t\topt(b)\n\t}\n\tif b.logger == nil {\n\t\tb.logger = logger.New()\n\t}\n\treturn b.build()\n}", "func newResourceDelta(\n\ta *resource,\n\tb *resource,\n) *ackcompare.Delta {\n\tdelta := ackcompare.NewDelta()\n\tif (a == nil && b != nil) ||\n\t\t(a != nil && b == nil) {\n\t\tdelta.Add(\"\", a, b)\n\t\treturn delta\n\t}\n\n\tif ackcompare.HasNilDifference(a.ko.Spec.MonitoringScheduleConfig, b.ko.Spec.MonitoringScheduleConfig) {\n\t\tdelta.Add(\"Spec.MonitoringScheduleConfig\", a.ko.Spec.MonitoringScheduleConfig, b.ko.Spec.MonitoringScheduleConfig)\n\t} else if a.ko.Spec.MonitoringScheduleConfig != nil && b.ko.Spec.MonitoringScheduleConfig != nil {\n\t\tif ackcompare.HasNilDifference(a.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition, b.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition) {\n\t\t\tdelta.Add(\"Spec.MonitoringScheduleConfig.MonitoringJobDefinition\", a.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition, b.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition)\n\t\t} else if a.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition != nil && b.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition != nil {\n\t\t\tif ackcompare.HasNilDifference(a.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.BaselineConfig, b.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.BaselineConfig) {\n\t\t\t\tdelta.Add(\"Spec.MonitoringScheduleConfig.MonitoringJobDefinition.BaselineConfig\", a.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.BaselineConfig, b.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.BaselineConfig)\n\t\t\t} else if a.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.BaselineConfig != nil && b.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.BaselineConfig != nil {\n\t\t\t\tif ackcompare.HasNilDifference(a.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.BaselineConfig.BaseliningJobName, b.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.BaselineConfig.BaseliningJobName) {\n\t\t\t\t\tdelta.Add(\"Spec.MonitoringScheduleConfig.MonitoringJobDefinition.BaselineConfig.BaseliningJobName\", a.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.BaselineConfig.BaseliningJobName, b.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.BaselineConfig.BaseliningJobName)\n\t\t\t\t} else if a.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.BaselineConfig.BaseliningJobName != nil && b.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.BaselineConfig.BaseliningJobName != nil {\n\t\t\t\t\tif *a.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.BaselineConfig.BaseliningJobName != *b.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.BaselineConfig.BaseliningJobName {\n\t\t\t\t\t\tdelta.Add(\"Spec.MonitoringScheduleConfig.MonitoringJobDefinition.BaselineConfig.BaseliningJobName\", a.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.BaselineConfig.BaseliningJobName, b.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.BaselineConfig.BaseliningJobName)\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\tif ackcompare.HasNilDifference(a.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.BaselineConfig.ConstraintsResource, b.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.BaselineConfig.ConstraintsResource) {\n\t\t\t\t\tdelta.Add(\"Spec.MonitoringScheduleConfig.MonitoringJobDefinition.BaselineConfig.ConstraintsResource\", a.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.BaselineConfig.ConstraintsResource, b.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.BaselineConfig.ConstraintsResource)\n\t\t\t\t} else if a.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.BaselineConfig.ConstraintsResource != nil && b.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.BaselineConfig.ConstraintsResource != nil {\n\t\t\t\t\tif ackcompare.HasNilDifference(a.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.BaselineConfig.ConstraintsResource.S3URI, b.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.BaselineConfig.ConstraintsResource.S3URI) {\n\t\t\t\t\t\tdelta.Add(\"Spec.MonitoringScheduleConfig.MonitoringJobDefinition.BaselineConfig.ConstraintsResource.S3URI\", a.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.BaselineConfig.ConstraintsResource.S3URI, b.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.BaselineConfig.ConstraintsResource.S3URI)\n\t\t\t\t\t} else if a.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.BaselineConfig.ConstraintsResource.S3URI != nil && b.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.BaselineConfig.ConstraintsResource.S3URI != nil {\n\t\t\t\t\t\tif *a.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.BaselineConfig.ConstraintsResource.S3URI != *b.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.BaselineConfig.ConstraintsResource.S3URI {\n\t\t\t\t\t\t\tdelta.Add(\"Spec.MonitoringScheduleConfig.MonitoringJobDefinition.BaselineConfig.ConstraintsResource.S3URI\", a.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.BaselineConfig.ConstraintsResource.S3URI, b.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.BaselineConfig.ConstraintsResource.S3URI)\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\tif ackcompare.HasNilDifference(a.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.BaselineConfig.StatisticsResource, b.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.BaselineConfig.StatisticsResource) {\n\t\t\t\t\tdelta.Add(\"Spec.MonitoringScheduleConfig.MonitoringJobDefinition.BaselineConfig.StatisticsResource\", a.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.BaselineConfig.StatisticsResource, b.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.BaselineConfig.StatisticsResource)\n\t\t\t\t} else if a.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.BaselineConfig.StatisticsResource != nil && b.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.BaselineConfig.StatisticsResource != nil {\n\t\t\t\t\tif ackcompare.HasNilDifference(a.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.BaselineConfig.StatisticsResource.S3URI, b.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.BaselineConfig.StatisticsResource.S3URI) {\n\t\t\t\t\t\tdelta.Add(\"Spec.MonitoringScheduleConfig.MonitoringJobDefinition.BaselineConfig.StatisticsResource.S3URI\", a.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.BaselineConfig.StatisticsResource.S3URI, b.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.BaselineConfig.StatisticsResource.S3URI)\n\t\t\t\t\t} else if a.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.BaselineConfig.StatisticsResource.S3URI != nil && b.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.BaselineConfig.StatisticsResource.S3URI != nil {\n\t\t\t\t\t\tif *a.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.BaselineConfig.StatisticsResource.S3URI != *b.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.BaselineConfig.StatisticsResource.S3URI {\n\t\t\t\t\t\t\tdelta.Add(\"Spec.MonitoringScheduleConfig.MonitoringJobDefinition.BaselineConfig.StatisticsResource.S3URI\", a.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.BaselineConfig.StatisticsResource.S3URI, b.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.BaselineConfig.StatisticsResource.S3URI)\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t\tif ackcompare.HasNilDifference(a.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.Environment, b.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.Environment) {\n\t\t\t\tdelta.Add(\"Spec.MonitoringScheduleConfig.MonitoringJobDefinition.Environment\", a.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.Environment, b.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.Environment)\n\t\t\t} else if a.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.Environment != nil && b.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.Environment != nil {\n\t\t\t\tif !ackcompare.MapStringStringPEqual(a.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.Environment, b.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.Environment) {\n\t\t\t\t\tdelta.Add(\"Spec.MonitoringScheduleConfig.MonitoringJobDefinition.Environment\", a.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.Environment, b.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.Environment)\n\t\t\t\t}\n\t\t\t}\n\t\t\tif ackcompare.HasNilDifference(a.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.MonitoringAppSpecification, b.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.MonitoringAppSpecification) {\n\t\t\t\tdelta.Add(\"Spec.MonitoringScheduleConfig.MonitoringJobDefinition.MonitoringAppSpecification\", a.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.MonitoringAppSpecification, b.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.MonitoringAppSpecification)\n\t\t\t} else if a.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.MonitoringAppSpecification != nil && b.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.MonitoringAppSpecification != nil {\n\t\t\t\tif !ackcompare.SliceStringPEqual(a.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.MonitoringAppSpecification.ContainerArguments, b.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.MonitoringAppSpecification.ContainerArguments) {\n\t\t\t\t\tdelta.Add(\"Spec.MonitoringScheduleConfig.MonitoringJobDefinition.MonitoringAppSpecification.ContainerArguments\", a.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.MonitoringAppSpecification.ContainerArguments, b.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.MonitoringAppSpecification.ContainerArguments)\n\t\t\t\t}\n\t\t\t\tif !ackcompare.SliceStringPEqual(a.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.MonitoringAppSpecification.ContainerEntrypoint, b.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.MonitoringAppSpecification.ContainerEntrypoint) {\n\t\t\t\t\tdelta.Add(\"Spec.MonitoringScheduleConfig.MonitoringJobDefinition.MonitoringAppSpecification.ContainerEntrypoint\", a.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.MonitoringAppSpecification.ContainerEntrypoint, b.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.MonitoringAppSpecification.ContainerEntrypoint)\n\t\t\t\t}\n\t\t\t\tif ackcompare.HasNilDifference(a.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.MonitoringAppSpecification.ImageURI, b.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.MonitoringAppSpecification.ImageURI) {\n\t\t\t\t\tdelta.Add(\"Spec.MonitoringScheduleConfig.MonitoringJobDefinition.MonitoringAppSpecification.ImageURI\", a.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.MonitoringAppSpecification.ImageURI, b.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.MonitoringAppSpecification.ImageURI)\n\t\t\t\t} else if a.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.MonitoringAppSpecification.ImageURI != nil && b.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.MonitoringAppSpecification.ImageURI != nil {\n\t\t\t\t\tif *a.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.MonitoringAppSpecification.ImageURI != *b.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.MonitoringAppSpecification.ImageURI {\n\t\t\t\t\t\tdelta.Add(\"Spec.MonitoringScheduleConfig.MonitoringJobDefinition.MonitoringAppSpecification.ImageURI\", a.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.MonitoringAppSpecification.ImageURI, b.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.MonitoringAppSpecification.ImageURI)\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\tif ackcompare.HasNilDifference(a.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.MonitoringAppSpecification.PostAnalyticsProcessorSourceURI, b.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.MonitoringAppSpecification.PostAnalyticsProcessorSourceURI) {\n\t\t\t\t\tdelta.Add(\"Spec.MonitoringScheduleConfig.MonitoringJobDefinition.MonitoringAppSpecification.PostAnalyticsProcessorSourceURI\", a.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.MonitoringAppSpecification.PostAnalyticsProcessorSourceURI, b.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.MonitoringAppSpecification.PostAnalyticsProcessorSourceURI)\n\t\t\t\t} else if a.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.MonitoringAppSpecification.PostAnalyticsProcessorSourceURI != nil && b.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.MonitoringAppSpecification.PostAnalyticsProcessorSourceURI != nil {\n\t\t\t\t\tif *a.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.MonitoringAppSpecification.PostAnalyticsProcessorSourceURI != *b.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.MonitoringAppSpecification.PostAnalyticsProcessorSourceURI {\n\t\t\t\t\t\tdelta.Add(\"Spec.MonitoringScheduleConfig.MonitoringJobDefinition.MonitoringAppSpecification.PostAnalyticsProcessorSourceURI\", a.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.MonitoringAppSpecification.PostAnalyticsProcessorSourceURI, b.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.MonitoringAppSpecification.PostAnalyticsProcessorSourceURI)\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\tif ackcompare.HasNilDifference(a.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.MonitoringAppSpecification.RecordPreprocessorSourceURI, b.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.MonitoringAppSpecification.RecordPreprocessorSourceURI) {\n\t\t\t\t\tdelta.Add(\"Spec.MonitoringScheduleConfig.MonitoringJobDefinition.MonitoringAppSpecification.RecordPreprocessorSourceURI\", a.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.MonitoringAppSpecification.RecordPreprocessorSourceURI, b.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.MonitoringAppSpecification.RecordPreprocessorSourceURI)\n\t\t\t\t} else if a.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.MonitoringAppSpecification.RecordPreprocessorSourceURI != nil && b.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.MonitoringAppSpecification.RecordPreprocessorSourceURI != nil {\n\t\t\t\t\tif *a.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.MonitoringAppSpecification.RecordPreprocessorSourceURI != *b.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.MonitoringAppSpecification.RecordPreprocessorSourceURI {\n\t\t\t\t\t\tdelta.Add(\"Spec.MonitoringScheduleConfig.MonitoringJobDefinition.MonitoringAppSpecification.RecordPreprocessorSourceURI\", a.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.MonitoringAppSpecification.RecordPreprocessorSourceURI, b.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.MonitoringAppSpecification.RecordPreprocessorSourceURI)\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t\tif !reflect.DeepEqual(a.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.MonitoringInputs, b.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.MonitoringInputs) {\n\t\t\t\tdelta.Add(\"Spec.MonitoringScheduleConfig.MonitoringJobDefinition.MonitoringInputs\", a.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.MonitoringInputs, b.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.MonitoringInputs)\n\t\t\t}\n\t\t\tif ackcompare.HasNilDifference(a.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.MonitoringOutputConfig, b.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.MonitoringOutputConfig) {\n\t\t\t\tdelta.Add(\"Spec.MonitoringScheduleConfig.MonitoringJobDefinition.MonitoringOutputConfig\", a.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.MonitoringOutputConfig, b.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.MonitoringOutputConfig)\n\t\t\t} else if a.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.MonitoringOutputConfig != nil && b.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.MonitoringOutputConfig != nil {\n\t\t\t\tif ackcompare.HasNilDifference(a.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.MonitoringOutputConfig.KMSKeyID, b.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.MonitoringOutputConfig.KMSKeyID) {\n\t\t\t\t\tdelta.Add(\"Spec.MonitoringScheduleConfig.MonitoringJobDefinition.MonitoringOutputConfig.KMSKeyID\", a.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.MonitoringOutputConfig.KMSKeyID, b.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.MonitoringOutputConfig.KMSKeyID)\n\t\t\t\t} else if a.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.MonitoringOutputConfig.KMSKeyID != nil && b.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.MonitoringOutputConfig.KMSKeyID != nil {\n\t\t\t\t\tif *a.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.MonitoringOutputConfig.KMSKeyID != *b.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.MonitoringOutputConfig.KMSKeyID {\n\t\t\t\t\t\tdelta.Add(\"Spec.MonitoringScheduleConfig.MonitoringJobDefinition.MonitoringOutputConfig.KMSKeyID\", a.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.MonitoringOutputConfig.KMSKeyID, b.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.MonitoringOutputConfig.KMSKeyID)\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\tif !reflect.DeepEqual(a.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.MonitoringOutputConfig.MonitoringOutputs, b.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.MonitoringOutputConfig.MonitoringOutputs) {\n\t\t\t\t\tdelta.Add(\"Spec.MonitoringScheduleConfig.MonitoringJobDefinition.MonitoringOutputConfig.MonitoringOutputs\", a.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.MonitoringOutputConfig.MonitoringOutputs, b.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.MonitoringOutputConfig.MonitoringOutputs)\n\t\t\t\t}\n\t\t\t}\n\t\t\tif ackcompare.HasNilDifference(a.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.MonitoringResources, b.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.MonitoringResources) {\n\t\t\t\tdelta.Add(\"Spec.MonitoringScheduleConfig.MonitoringJobDefinition.MonitoringResources\", a.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.MonitoringResources, b.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.MonitoringResources)\n\t\t\t} else if a.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.MonitoringResources != nil && b.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.MonitoringResources != nil {\n\t\t\t\tif ackcompare.HasNilDifference(a.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.MonitoringResources.ClusterConfig, b.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.MonitoringResources.ClusterConfig) {\n\t\t\t\t\tdelta.Add(\"Spec.MonitoringScheduleConfig.MonitoringJobDefinition.MonitoringResources.ClusterConfig\", a.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.MonitoringResources.ClusterConfig, b.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.MonitoringResources.ClusterConfig)\n\t\t\t\t} else if a.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.MonitoringResources.ClusterConfig != nil && b.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.MonitoringResources.ClusterConfig != nil {\n\t\t\t\t\tif ackcompare.HasNilDifference(a.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.MonitoringResources.ClusterConfig.InstanceCount, b.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.MonitoringResources.ClusterConfig.InstanceCount) {\n\t\t\t\t\t\tdelta.Add(\"Spec.MonitoringScheduleConfig.MonitoringJobDefinition.MonitoringResources.ClusterConfig.InstanceCount\", a.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.MonitoringResources.ClusterConfig.InstanceCount, b.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.MonitoringResources.ClusterConfig.InstanceCount)\n\t\t\t\t\t} else if a.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.MonitoringResources.ClusterConfig.InstanceCount != nil && b.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.MonitoringResources.ClusterConfig.InstanceCount != nil {\n\t\t\t\t\t\tif *a.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.MonitoringResources.ClusterConfig.InstanceCount != *b.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.MonitoringResources.ClusterConfig.InstanceCount {\n\t\t\t\t\t\t\tdelta.Add(\"Spec.MonitoringScheduleConfig.MonitoringJobDefinition.MonitoringResources.ClusterConfig.InstanceCount\", a.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.MonitoringResources.ClusterConfig.InstanceCount, b.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.MonitoringResources.ClusterConfig.InstanceCount)\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t\tif ackcompare.HasNilDifference(a.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.MonitoringResources.ClusterConfig.InstanceType, b.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.MonitoringResources.ClusterConfig.InstanceType) {\n\t\t\t\t\t\tdelta.Add(\"Spec.MonitoringScheduleConfig.MonitoringJobDefinition.MonitoringResources.ClusterConfig.InstanceType\", a.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.MonitoringResources.ClusterConfig.InstanceType, b.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.MonitoringResources.ClusterConfig.InstanceType)\n\t\t\t\t\t} else if a.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.MonitoringResources.ClusterConfig.InstanceType != nil && b.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.MonitoringResources.ClusterConfig.InstanceType != nil {\n\t\t\t\t\t\tif *a.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.MonitoringResources.ClusterConfig.InstanceType != *b.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.MonitoringResources.ClusterConfig.InstanceType {\n\t\t\t\t\t\t\tdelta.Add(\"Spec.MonitoringScheduleConfig.MonitoringJobDefinition.MonitoringResources.ClusterConfig.InstanceType\", a.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.MonitoringResources.ClusterConfig.InstanceType, b.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.MonitoringResources.ClusterConfig.InstanceType)\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t\tif ackcompare.HasNilDifference(a.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.MonitoringResources.ClusterConfig.VolumeKMSKeyID, b.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.MonitoringResources.ClusterConfig.VolumeKMSKeyID) {\n\t\t\t\t\t\tdelta.Add(\"Spec.MonitoringScheduleConfig.MonitoringJobDefinition.MonitoringResources.ClusterConfig.VolumeKMSKeyID\", a.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.MonitoringResources.ClusterConfig.VolumeKMSKeyID, b.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.MonitoringResources.ClusterConfig.VolumeKMSKeyID)\n\t\t\t\t\t} else if a.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.MonitoringResources.ClusterConfig.VolumeKMSKeyID != nil && b.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.MonitoringResources.ClusterConfig.VolumeKMSKeyID != nil {\n\t\t\t\t\t\tif *a.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.MonitoringResources.ClusterConfig.VolumeKMSKeyID != *b.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.MonitoringResources.ClusterConfig.VolumeKMSKeyID {\n\t\t\t\t\t\t\tdelta.Add(\"Spec.MonitoringScheduleConfig.MonitoringJobDefinition.MonitoringResources.ClusterConfig.VolumeKMSKeyID\", a.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.MonitoringResources.ClusterConfig.VolumeKMSKeyID, b.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.MonitoringResources.ClusterConfig.VolumeKMSKeyID)\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t\tif ackcompare.HasNilDifference(a.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.MonitoringResources.ClusterConfig.VolumeSizeInGB, b.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.MonitoringResources.ClusterConfig.VolumeSizeInGB) {\n\t\t\t\t\t\tdelta.Add(\"Spec.MonitoringScheduleConfig.MonitoringJobDefinition.MonitoringResources.ClusterConfig.VolumeSizeInGB\", a.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.MonitoringResources.ClusterConfig.VolumeSizeInGB, b.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.MonitoringResources.ClusterConfig.VolumeSizeInGB)\n\t\t\t\t\t} else if a.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.MonitoringResources.ClusterConfig.VolumeSizeInGB != nil && b.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.MonitoringResources.ClusterConfig.VolumeSizeInGB != nil {\n\t\t\t\t\t\tif *a.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.MonitoringResources.ClusterConfig.VolumeSizeInGB != *b.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.MonitoringResources.ClusterConfig.VolumeSizeInGB {\n\t\t\t\t\t\t\tdelta.Add(\"Spec.MonitoringScheduleConfig.MonitoringJobDefinition.MonitoringResources.ClusterConfig.VolumeSizeInGB\", a.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.MonitoringResources.ClusterConfig.VolumeSizeInGB, b.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.MonitoringResources.ClusterConfig.VolumeSizeInGB)\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t\tif ackcompare.HasNilDifference(a.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.NetworkConfig, b.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.NetworkConfig) {\n\t\t\t\tdelta.Add(\"Spec.MonitoringScheduleConfig.MonitoringJobDefinition.NetworkConfig\", a.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.NetworkConfig, b.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.NetworkConfig)\n\t\t\t} else if a.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.NetworkConfig != nil && b.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.NetworkConfig != nil {\n\t\t\t\tif ackcompare.HasNilDifference(a.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.NetworkConfig.EnableInterContainerTrafficEncryption, b.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.NetworkConfig.EnableInterContainerTrafficEncryption) {\n\t\t\t\t\tdelta.Add(\"Spec.MonitoringScheduleConfig.MonitoringJobDefinition.NetworkConfig.EnableInterContainerTrafficEncryption\", a.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.NetworkConfig.EnableInterContainerTrafficEncryption, b.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.NetworkConfig.EnableInterContainerTrafficEncryption)\n\t\t\t\t} else if a.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.NetworkConfig.EnableInterContainerTrafficEncryption != nil && b.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.NetworkConfig.EnableInterContainerTrafficEncryption != nil {\n\t\t\t\t\tif *a.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.NetworkConfig.EnableInterContainerTrafficEncryption != *b.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.NetworkConfig.EnableInterContainerTrafficEncryption {\n\t\t\t\t\t\tdelta.Add(\"Spec.MonitoringScheduleConfig.MonitoringJobDefinition.NetworkConfig.EnableInterContainerTrafficEncryption\", a.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.NetworkConfig.EnableInterContainerTrafficEncryption, b.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.NetworkConfig.EnableInterContainerTrafficEncryption)\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\tif ackcompare.HasNilDifference(a.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.NetworkConfig.EnableNetworkIsolation, b.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.NetworkConfig.EnableNetworkIsolation) {\n\t\t\t\t\tdelta.Add(\"Spec.MonitoringScheduleConfig.MonitoringJobDefinition.NetworkConfig.EnableNetworkIsolation\", a.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.NetworkConfig.EnableNetworkIsolation, b.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.NetworkConfig.EnableNetworkIsolation)\n\t\t\t\t} else if a.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.NetworkConfig.EnableNetworkIsolation != nil && b.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.NetworkConfig.EnableNetworkIsolation != nil {\n\t\t\t\t\tif *a.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.NetworkConfig.EnableNetworkIsolation != *b.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.NetworkConfig.EnableNetworkIsolation {\n\t\t\t\t\t\tdelta.Add(\"Spec.MonitoringScheduleConfig.MonitoringJobDefinition.NetworkConfig.EnableNetworkIsolation\", a.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.NetworkConfig.EnableNetworkIsolation, b.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.NetworkConfig.EnableNetworkIsolation)\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\tif ackcompare.HasNilDifference(a.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.NetworkConfig.VPCConfig, b.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.NetworkConfig.VPCConfig) {\n\t\t\t\t\tdelta.Add(\"Spec.MonitoringScheduleConfig.MonitoringJobDefinition.NetworkConfig.VPCConfig\", a.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.NetworkConfig.VPCConfig, b.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.NetworkConfig.VPCConfig)\n\t\t\t\t} else if a.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.NetworkConfig.VPCConfig != nil && b.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.NetworkConfig.VPCConfig != nil {\n\t\t\t\t\tif !ackcompare.SliceStringPEqual(a.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.NetworkConfig.VPCConfig.SecurityGroupIDs, b.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.NetworkConfig.VPCConfig.SecurityGroupIDs) {\n\t\t\t\t\t\tdelta.Add(\"Spec.MonitoringScheduleConfig.MonitoringJobDefinition.NetworkConfig.VPCConfig.SecurityGroupIDs\", a.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.NetworkConfig.VPCConfig.SecurityGroupIDs, b.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.NetworkConfig.VPCConfig.SecurityGroupIDs)\n\t\t\t\t\t}\n\t\t\t\t\tif !ackcompare.SliceStringPEqual(a.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.NetworkConfig.VPCConfig.Subnets, b.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.NetworkConfig.VPCConfig.Subnets) {\n\t\t\t\t\t\tdelta.Add(\"Spec.MonitoringScheduleConfig.MonitoringJobDefinition.NetworkConfig.VPCConfig.Subnets\", a.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.NetworkConfig.VPCConfig.Subnets, b.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.NetworkConfig.VPCConfig.Subnets)\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t\tif ackcompare.HasNilDifference(a.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.RoleARN, b.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.RoleARN) {\n\t\t\t\tdelta.Add(\"Spec.MonitoringScheduleConfig.MonitoringJobDefinition.RoleARN\", a.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.RoleARN, b.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.RoleARN)\n\t\t\t} else if a.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.RoleARN != nil && b.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.RoleARN != nil {\n\t\t\t\tif *a.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.RoleARN != *b.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.RoleARN {\n\t\t\t\t\tdelta.Add(\"Spec.MonitoringScheduleConfig.MonitoringJobDefinition.RoleARN\", a.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.RoleARN, b.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.RoleARN)\n\t\t\t\t}\n\t\t\t}\n\t\t\tif ackcompare.HasNilDifference(a.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.StoppingCondition, b.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.StoppingCondition) {\n\t\t\t\tdelta.Add(\"Spec.MonitoringScheduleConfig.MonitoringJobDefinition.StoppingCondition\", a.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.StoppingCondition, b.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.StoppingCondition)\n\t\t\t} else if a.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.StoppingCondition != nil && b.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.StoppingCondition != nil {\n\t\t\t\tif ackcompare.HasNilDifference(a.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.StoppingCondition.MaxRuntimeInSeconds, b.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.StoppingCondition.MaxRuntimeInSeconds) {\n\t\t\t\t\tdelta.Add(\"Spec.MonitoringScheduleConfig.MonitoringJobDefinition.StoppingCondition.MaxRuntimeInSeconds\", a.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.StoppingCondition.MaxRuntimeInSeconds, b.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.StoppingCondition.MaxRuntimeInSeconds)\n\t\t\t\t} else if a.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.StoppingCondition.MaxRuntimeInSeconds != nil && b.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.StoppingCondition.MaxRuntimeInSeconds != nil {\n\t\t\t\t\tif *a.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.StoppingCondition.MaxRuntimeInSeconds != *b.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.StoppingCondition.MaxRuntimeInSeconds {\n\t\t\t\t\t\tdelta.Add(\"Spec.MonitoringScheduleConfig.MonitoringJobDefinition.StoppingCondition.MaxRuntimeInSeconds\", a.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.StoppingCondition.MaxRuntimeInSeconds, b.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.StoppingCondition.MaxRuntimeInSeconds)\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\tif ackcompare.HasNilDifference(a.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinitionName, b.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinitionName) {\n\t\t\tdelta.Add(\"Spec.MonitoringScheduleConfig.MonitoringJobDefinitionName\", a.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinitionName, b.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinitionName)\n\t\t} else if a.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinitionName != nil && b.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinitionName != nil {\n\t\t\tif *a.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinitionName != *b.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinitionName {\n\t\t\t\tdelta.Add(\"Spec.MonitoringScheduleConfig.MonitoringJobDefinitionName\", a.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinitionName, b.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinitionName)\n\t\t\t}\n\t\t}\n\t\tif ackcompare.HasNilDifference(a.ko.Spec.MonitoringScheduleConfig.MonitoringType, b.ko.Spec.MonitoringScheduleConfig.MonitoringType) {\n\t\t\tdelta.Add(\"Spec.MonitoringScheduleConfig.MonitoringType\", a.ko.Spec.MonitoringScheduleConfig.MonitoringType, b.ko.Spec.MonitoringScheduleConfig.MonitoringType)\n\t\t} else if a.ko.Spec.MonitoringScheduleConfig.MonitoringType != nil && b.ko.Spec.MonitoringScheduleConfig.MonitoringType != nil {\n\t\t\tif *a.ko.Spec.MonitoringScheduleConfig.MonitoringType != *b.ko.Spec.MonitoringScheduleConfig.MonitoringType {\n\t\t\t\tdelta.Add(\"Spec.MonitoringScheduleConfig.MonitoringType\", a.ko.Spec.MonitoringScheduleConfig.MonitoringType, b.ko.Spec.MonitoringScheduleConfig.MonitoringType)\n\t\t\t}\n\t\t}\n\t\tif ackcompare.HasNilDifference(a.ko.Spec.MonitoringScheduleConfig.ScheduleConfig, b.ko.Spec.MonitoringScheduleConfig.ScheduleConfig) {\n\t\t\tdelta.Add(\"Spec.MonitoringScheduleConfig.ScheduleConfig\", a.ko.Spec.MonitoringScheduleConfig.ScheduleConfig, b.ko.Spec.MonitoringScheduleConfig.ScheduleConfig)\n\t\t} else if a.ko.Spec.MonitoringScheduleConfig.ScheduleConfig != nil && b.ko.Spec.MonitoringScheduleConfig.ScheduleConfig != nil {\n\t\t\tif ackcompare.HasNilDifference(a.ko.Spec.MonitoringScheduleConfig.ScheduleConfig.ScheduleExpression, b.ko.Spec.MonitoringScheduleConfig.ScheduleConfig.ScheduleExpression) {\n\t\t\t\tdelta.Add(\"Spec.MonitoringScheduleConfig.ScheduleConfig.ScheduleExpression\", a.ko.Spec.MonitoringScheduleConfig.ScheduleConfig.ScheduleExpression, b.ko.Spec.MonitoringScheduleConfig.ScheduleConfig.ScheduleExpression)\n\t\t\t} else if a.ko.Spec.MonitoringScheduleConfig.ScheduleConfig.ScheduleExpression != nil && b.ko.Spec.MonitoringScheduleConfig.ScheduleConfig.ScheduleExpression != nil {\n\t\t\t\tif *a.ko.Spec.MonitoringScheduleConfig.ScheduleConfig.ScheduleExpression != *b.ko.Spec.MonitoringScheduleConfig.ScheduleConfig.ScheduleExpression {\n\t\t\t\t\tdelta.Add(\"Spec.MonitoringScheduleConfig.ScheduleConfig.ScheduleExpression\", a.ko.Spec.MonitoringScheduleConfig.ScheduleConfig.ScheduleExpression, b.ko.Spec.MonitoringScheduleConfig.ScheduleConfig.ScheduleExpression)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\tif ackcompare.HasNilDifference(a.ko.Spec.MonitoringScheduleName, b.ko.Spec.MonitoringScheduleName) {\n\t\tdelta.Add(\"Spec.MonitoringScheduleName\", a.ko.Spec.MonitoringScheduleName, b.ko.Spec.MonitoringScheduleName)\n\t} else if a.ko.Spec.MonitoringScheduleName != nil && b.ko.Spec.MonitoringScheduleName != nil {\n\t\tif *a.ko.Spec.MonitoringScheduleName != *b.ko.Spec.MonitoringScheduleName {\n\t\t\tdelta.Add(\"Spec.MonitoringScheduleName\", a.ko.Spec.MonitoringScheduleName, b.ko.Spec.MonitoringScheduleName)\n\t\t}\n\t}\n\n\treturn delta\n}", "func createLogicalPlan(spec *operation.Spec) (*Spec, error) {\n\tnodes := make(map[operation.NodeID]Node, len(spec.Operations))\n\tadmin := administration{now: spec.Now}\n\n\tplan := NewPlanSpec()\n\tplan.Resources = spec.Resources\n\tplan.Now = spec.Now\n\n\tv := &fluxSpecVisitor{\n\t\ta: admin,\n\t\tspec: spec,\n\t\tplan: plan,\n\t\tnodes: nodes,\n\t\tyieldNames: make(map[string]struct{}),\n\t}\n\n\tif err := spec.Walk(v.visitOperation); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn v.plan, nil\n}", "func AlertmanagerSpec() *AlertmanagerSpecApplyConfiguration {\n\treturn &AlertmanagerSpecApplyConfiguration{}\n}", "func NewSpec(api *gmail.Service, db *db.DB) (*Spec, error) {\n\tlog.SetLevel(log.DebugLevel)\n\tlog.Info(\"starting new spec\")\n\n\tbytes, err := ioutil.ReadFile(\"./spec.yaml\")\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to read file: %v\", err)\n\t}\n\n\tspec := &Spec{\n\t\tapi: api,\n\t\tdb: db,\n\t}\n\n\terr = yaml.Unmarshal(bytes, spec)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to unmarshal: %v\", err)\n\t}\n\n\treturn spec, nil\n}", "func newReconciler(mgr manager.Manager) reconcile.Reconciler {\n\tolmClientset, err := olmclient.NewForConfig(mgr.GetConfig())\n\tif err != nil {\n\t\tklog.Error(\"Initialize the OLM client failed: \", err)\n\t\treturn nil\n\t}\n\treturn &ReconcileOperandRequest{\n\t\tclient: mgr.GetClient(),\n\t\trecorder: mgr.GetEventRecorderFor(\"OperandRequest\"),\n\t\tscheme: mgr.GetScheme(),\n\t\tolmClient: olmClientset}\n}", "func newReconciler(mgr manager.Manager) reconcile.Reconciler {\n\treturn &ReconcileDfJob{client: mgr.GetClient(), scheme: mgr.GetScheme()}\n}", "func newBatchSpecExecutionResetter(s *store.Store, observationContext *observation.Context, metrics batchChangesMetrics) *dbworker.Resetter {\n\tworkerStore := NewExecutorStore(s, observationContext)\n\n\toptions := dbworker.ResetterOptions{\n\t\tName: \"batch_spec_executor_resetter\",\n\t\tInterval: 1 * time.Minute,\n\t\tMetrics: metrics.executionResetterMetrics,\n\t}\n\n\tresetter := dbworker.NewResetter(workerStore, options)\n\treturn resetter\n}", "func newReceiver(\n\tparams receiver.CreateSettings,\n\tconfig *Config,\n\tnextConsumer consumer.Traces,\n) (receiver.Traces, error) {\n\t// build the response message\n\tdefaultResponse := &splunksapm.PostSpansResponse{}\n\tdefaultResponseBytes, err := defaultResponse.Marshal()\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to marshal default response body for %v receiver: %w\", params.ID, err)\n\t}\n\ttransport := \"http\"\n\tif config.TLSSetting != nil {\n\t\ttransport = \"https\"\n\t}\n\tobsrecv, err := obsreport.NewReceiver(obsreport.ReceiverSettings{\n\t\tReceiverID: params.ID,\n\t\tTransport: transport,\n\t\tReceiverCreateSettings: params,\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &sapmReceiver{\n\t\tsettings: params.TelemetrySettings,\n\t\tconfig: config,\n\t\tnextConsumer: nextConsumer,\n\t\tdefaultResponse: defaultResponseBytes,\n\t\tobsrecv: obsrecv,\n\t}, nil\n}", "func NewReport(author string) *Report {\n\tr := Report{}\n\tr.Date = time.Now().Format(\"20060102-150405\") // set to current time\n\tr.Author = author\n\tr.OS = runtime.GOOS\n\treturn &r\n}", "func NewOfProbe(ctx tp.Context, bridge string, address string, tlsConfig *tls.Config) BridgeOfProber {\n\treturn &ofProbe{\n\t\tCtx: ctx,\n\t\taddress: address,\n\t\tbridge: bridge,\n\t\ttlsConfig: tlsConfig,\n\t\trules: make(map[graph.Identifier]graph.Identifier),\n\t\trequests: make(map[uint32]*ofRule),\n\t}\n}", "func newEchoServerPodSpec(podName string) *api.Pod {\n\tport := 8080\n\tpod := &api.Pod{\n\t\tObjectMeta: api.ObjectMeta{\n\t\t\tName: podName,\n\t\t},\n\t\tSpec: api.PodSpec{\n\t\t\tContainers: []api.Container{\n\t\t\t\t{\n\t\t\t\t\tName: \"echoserver\",\n\t\t\t\t\tImage: \"gcr.io/google_containers/echoserver:1.4\",\n\t\t\t\t\tPorts: []api.ContainerPort{{ContainerPort: int32(port)}},\n\t\t\t\t},\n\t\t\t},\n\t\t\tRestartPolicy: api.RestartPolicyNever,\n\t\t},\n\t}\n\treturn pod\n}", "func createVendorSpec(b *troubleshootv1beta2.SupportBundle) (*troubleshootv1beta2.SupportBundle, error) {\n\tsupportBundle, err := staticspecs.GetVendorSpec()\n\tif err != nil {\n\t\tlogger.Errorf(\"Failed to load vendor support bundle spec: %v\", err)\n\t\treturn nil, err\n\t}\n\n\tif b.Spec.Collectors != nil {\n\t\tsupportBundle.Spec.Collectors = b.DeepCopy().Spec.Collectors\n\t}\n\tif b.Spec.Analyzers != nil {\n\t\tsupportBundle.Spec.Analyzers = b.DeepCopy().Spec.Analyzers\n\t}\n\treturn supportBundle, nil\n}", "func New(cfg *types.RPC) *RPC {\r\n\tInitCfg(cfg)\r\n\tif cfg.EnableTrace {\r\n\t\tgrpc.EnableTracing = true\r\n\t}\r\n\treturn &RPC{cfg: cfg}\r\n}", "func newReconciler(mgr manager.Manager) reconcile.Reconciler {\n\treturn &ReconcileApplicationMonitoring{\n\t\tclient: mgr.GetClient(),\n\t\tscheme: mgr.GetScheme(),\n\t\thelper: NewKubeHelper(),\n\t\textraParams: make(map[string]string),\n\t}\n}", "func TestOTELColValidatingWebhook(t *testing.T) {\n\tminusOne := int32(-1)\n\tzero := int32(0)\n\tzero64 := int64(0)\n\tone := int32(1)\n\tthree := int32(3)\n\tfive := int32(5)\n\n\ttests := []struct { //nolint:govet\n\t\tname string\n\t\totelcol OpenTelemetryCollector\n\t\texpectedErr string\n\t}{\n\t\t{\n\t\t\tname: \"valid empty spec\",\n\t\t\totelcol: OpenTelemetryCollector{},\n\t\t},\n\t\t{\n\t\t\tname: \"valid full spec\",\n\t\t\totelcol: OpenTelemetryCollector{\n\t\t\t\tSpec: OpenTelemetryCollectorSpec{\n\t\t\t\t\tMode: ModeStatefulSet,\n\t\t\t\t\tMinReplicas: &one,\n\t\t\t\t\tReplicas: &three,\n\t\t\t\t\tMaxReplicas: &five,\n\t\t\t\t\tUpgradeStrategy: \"adhoc\",\n\t\t\t\t\tTargetAllocator: OpenTelemetryTargetAllocator{\n\t\t\t\t\t\tEnabled: true,\n\t\t\t\t\t},\n\t\t\t\t\tConfig: `receivers:\n examplereceiver:\n endpoint: \"0.0.0.0:12345\"\n examplereceiver/settings:\n endpoint: \"0.0.0.0:12346\"\n prometheus:\n config:\n scrape_configs:\n - job_name: otel-collector\n scrape_interval: 10s\n jaeger/custom:\n protocols:\n thrift_http:\n endpoint: 0.0.0.0:15268\n`,\n\t\t\t\t\tPorts: []v1.ServicePort{\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tName: \"port1\",\n\t\t\t\t\t\t\tPort: 5555,\n\t\t\t\t\t\t},\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tName: \"port2\",\n\t\t\t\t\t\t\tPort: 5554,\n\t\t\t\t\t\t\tProtocol: v1.ProtocolUDP,\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\tAutoscaler: &AutoscalerSpec{\n\t\t\t\t\t\tBehavior: &autoscalingv2.HorizontalPodAutoscalerBehavior{\n\t\t\t\t\t\t\tScaleDown: &autoscalingv2.HPAScalingRules{\n\t\t\t\t\t\t\t\tStabilizationWindowSeconds: &three,\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\tScaleUp: &autoscalingv2.HPAScalingRules{\n\t\t\t\t\t\t\t\tStabilizationWindowSeconds: &five,\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tTargetCPUUtilization: &five,\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"invalid mode with volume claim templates\",\n\t\t\totelcol: OpenTelemetryCollector{\n\t\t\t\tSpec: OpenTelemetryCollectorSpec{\n\t\t\t\t\tMode: ModeSidecar,\n\t\t\t\t\tVolumeClaimTemplates: []v1.PersistentVolumeClaim{{}, {}},\n\t\t\t\t},\n\t\t\t},\n\t\t\texpectedErr: \"does not support the attribute 'volumeClaimTemplates'\",\n\t\t},\n\t\t{\n\t\t\tname: \"invalid mode with tolerations\",\n\t\t\totelcol: OpenTelemetryCollector{\n\t\t\t\tSpec: OpenTelemetryCollectorSpec{\n\t\t\t\t\tMode: ModeSidecar,\n\t\t\t\t\tTolerations: []v1.Toleration{{}, {}},\n\t\t\t\t},\n\t\t\t},\n\t\t\texpectedErr: \"does not support the attribute 'tolerations'\",\n\t\t},\n\t\t{\n\t\t\tname: \"invalid mode with target allocator\",\n\t\t\totelcol: OpenTelemetryCollector{\n\t\t\t\tSpec: OpenTelemetryCollectorSpec{\n\t\t\t\t\tMode: ModeDeployment,\n\t\t\t\t\tTargetAllocator: OpenTelemetryTargetAllocator{\n\t\t\t\t\t\tEnabled: true,\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\texpectedErr: \"does not support the target allocation deployment\",\n\t\t},\n\t\t{\n\t\t\tname: \"invalid target allocator config\",\n\t\t\totelcol: OpenTelemetryCollector{\n\t\t\t\tSpec: OpenTelemetryCollectorSpec{\n\t\t\t\t\tMode: ModeStatefulSet,\n\t\t\t\t\tTargetAllocator: OpenTelemetryTargetAllocator{\n\t\t\t\t\t\tEnabled: true,\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\texpectedErr: \"the OpenTelemetry Spec Prometheus configuration is incorrect\",\n\t\t},\n\t\t{\n\t\t\tname: \"invalid port name\",\n\t\t\totelcol: OpenTelemetryCollector{\n\t\t\t\tSpec: OpenTelemetryCollectorSpec{\n\t\t\t\t\tPorts: []v1.ServicePort{\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\t// this port name contains a non alphanumeric character, which is invalid.\n\t\t\t\t\t\t\tName: \"-test🦄port\",\n\t\t\t\t\t\t\tPort: 12345,\n\t\t\t\t\t\t\tProtocol: v1.ProtocolTCP,\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\texpectedErr: \"the OpenTelemetry Spec Ports configuration is incorrect\",\n\t\t},\n\t\t{\n\t\t\tname: \"invalid port name, too long\",\n\t\t\totelcol: OpenTelemetryCollector{\n\t\t\t\tSpec: OpenTelemetryCollectorSpec{\n\t\t\t\t\tPorts: []v1.ServicePort{\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tName: \"aaaabbbbccccdddd\", // len: 16, too long\n\t\t\t\t\t\t\tPort: 5555,\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\texpectedErr: \"the OpenTelemetry Spec Ports configuration is incorrect\",\n\t\t},\n\t\t{\n\t\t\tname: \"invalid port num\",\n\t\t\totelcol: OpenTelemetryCollector{\n\t\t\t\tSpec: OpenTelemetryCollectorSpec{\n\t\t\t\t\tPorts: []v1.ServicePort{\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tName: \"aaaabbbbccccddd\", // len: 15\n\t\t\t\t\t\t\t// no port set means it's 0, which is invalid\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\texpectedErr: \"the OpenTelemetry Spec Ports configuration is incorrect\",\n\t\t},\n\t\t{\n\t\t\tname: \"invalid max replicas\",\n\t\t\totelcol: OpenTelemetryCollector{\n\t\t\t\tSpec: OpenTelemetryCollectorSpec{\n\t\t\t\t\tMaxReplicas: &zero,\n\t\t\t\t},\n\t\t\t},\n\t\t\texpectedErr: \"maxReplicas should be defined and one or more\",\n\t\t},\n\t\t{\n\t\t\tname: \"invalid replicas, greater than max\",\n\t\t\totelcol: OpenTelemetryCollector{\n\t\t\t\tSpec: OpenTelemetryCollectorSpec{\n\t\t\t\t\tMaxReplicas: &three,\n\t\t\t\t\tReplicas: &five,\n\t\t\t\t},\n\t\t\t},\n\t\t\texpectedErr: \"replicas must not be greater than maxReplicas\",\n\t\t},\n\t\t{\n\t\t\tname: \"invalid min replicas, greater than max\",\n\t\t\totelcol: OpenTelemetryCollector{\n\t\t\t\tSpec: OpenTelemetryCollectorSpec{\n\t\t\t\t\tMaxReplicas: &three,\n\t\t\t\t\tMinReplicas: &five,\n\t\t\t\t},\n\t\t\t},\n\t\t\texpectedErr: \"minReplicas must not be greater than maxReplicas\",\n\t\t},\n\t\t{\n\t\t\tname: \"invalid min replicas, lesser than 1\",\n\t\t\totelcol: OpenTelemetryCollector{\n\t\t\t\tSpec: OpenTelemetryCollectorSpec{\n\t\t\t\t\tMaxReplicas: &three,\n\t\t\t\t\tMinReplicas: &zero,\n\t\t\t\t},\n\t\t\t},\n\t\t\texpectedErr: \"minReplicas should be one or more\",\n\t\t},\n\t\t{\n\t\t\tname: \"invalid autoscaler scale down\",\n\t\t\totelcol: OpenTelemetryCollector{\n\t\t\t\tSpec: OpenTelemetryCollectorSpec{\n\t\t\t\t\tMaxReplicas: &three,\n\t\t\t\t\tAutoscaler: &AutoscalerSpec{\n\t\t\t\t\t\tBehavior: &autoscalingv2.HorizontalPodAutoscalerBehavior{\n\t\t\t\t\t\t\tScaleDown: &autoscalingv2.HPAScalingRules{\n\t\t\t\t\t\t\t\tStabilizationWindowSeconds: &zero,\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\texpectedErr: \"scaleDown should be one or more\",\n\t\t},\n\t\t{\n\t\t\tname: \"invalid autoscaler scale up\",\n\t\t\totelcol: OpenTelemetryCollector{\n\t\t\t\tSpec: OpenTelemetryCollectorSpec{\n\t\t\t\t\tMaxReplicas: &three,\n\t\t\t\t\tAutoscaler: &AutoscalerSpec{\n\t\t\t\t\t\tBehavior: &autoscalingv2.HorizontalPodAutoscalerBehavior{\n\t\t\t\t\t\t\tScaleUp: &autoscalingv2.HPAScalingRules{\n\t\t\t\t\t\t\t\tStabilizationWindowSeconds: &zero,\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\texpectedErr: \"scaleUp should be one or more\",\n\t\t},\n\t\t{\n\t\t\tname: \"invalid autoscaler target cpu utilization\",\n\t\t\totelcol: OpenTelemetryCollector{\n\t\t\t\tSpec: OpenTelemetryCollectorSpec{\n\t\t\t\t\tMaxReplicas: &three,\n\t\t\t\t\tAutoscaler: &AutoscalerSpec{\n\t\t\t\t\t\tTargetCPUUtilization: &zero,\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\texpectedErr: \"targetCPUUtilization should be greater than 0 and less than 100\",\n\t\t},\n\t\t{\n\t\t\tname: \"autoscaler minReplicas is less than maxReplicas\",\n\t\t\totelcol: OpenTelemetryCollector{\n\t\t\t\tSpec: OpenTelemetryCollectorSpec{\n\t\t\t\t\tAutoscaler: &AutoscalerSpec{\n\t\t\t\t\t\tMaxReplicas: &one,\n\t\t\t\t\t\tMinReplicas: &five,\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\texpectedErr: \"the OpenTelemetry Spec autoscale configuration is incorrect, minReplicas must not be greater than maxReplicas\",\n\t\t},\n\t\t{\n\t\t\tname: \"invalid autoscaler metric type\",\n\t\t\totelcol: OpenTelemetryCollector{\n\t\t\t\tSpec: OpenTelemetryCollectorSpec{\n\t\t\t\t\tMaxReplicas: &three,\n\t\t\t\t\tAutoscaler: &AutoscalerSpec{\n\t\t\t\t\t\tMetrics: []MetricSpec{\n\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\tType: autoscalingv2.ResourceMetricSourceType,\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\texpectedErr: \"the OpenTelemetry Spec autoscale configuration is incorrect, metric type unsupported. Expected metric of source type Pod\",\n\t\t},\n\t\t{\n\t\t\tname: \"invalid pod metric average value\",\n\t\t\totelcol: OpenTelemetryCollector{\n\t\t\t\tSpec: OpenTelemetryCollectorSpec{\n\t\t\t\t\tMaxReplicas: &three,\n\t\t\t\t\tAutoscaler: &AutoscalerSpec{\n\t\t\t\t\t\tMetrics: []MetricSpec{\n\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\tType: autoscalingv2.PodsMetricSourceType,\n\t\t\t\t\t\t\t\tPods: &autoscalingv2.PodsMetricSource{\n\t\t\t\t\t\t\t\t\tMetric: autoscalingv2.MetricIdentifier{\n\t\t\t\t\t\t\t\t\t\tName: \"custom1\",\n\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\tTarget: autoscalingv2.MetricTarget{\n\t\t\t\t\t\t\t\t\t\tType: autoscalingv2.AverageValueMetricType,\n\t\t\t\t\t\t\t\t\t\tAverageValue: resource.NewQuantity(int64(0), resource.DecimalSI),\n\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\texpectedErr: \"the OpenTelemetry Spec autoscale configuration is incorrect, average value should be greater than 0\",\n\t\t},\n\t\t{\n\t\t\tname: \"utilization target is not valid with pod metrics\",\n\t\t\totelcol: OpenTelemetryCollector{\n\t\t\t\tSpec: OpenTelemetryCollectorSpec{\n\t\t\t\t\tMaxReplicas: &three,\n\t\t\t\t\tAutoscaler: &AutoscalerSpec{\n\t\t\t\t\t\tMetrics: []MetricSpec{\n\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\tType: autoscalingv2.PodsMetricSourceType,\n\t\t\t\t\t\t\t\tPods: &autoscalingv2.PodsMetricSource{\n\t\t\t\t\t\t\t\t\tMetric: autoscalingv2.MetricIdentifier{\n\t\t\t\t\t\t\t\t\t\tName: \"custom1\",\n\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\tTarget: autoscalingv2.MetricTarget{\n\t\t\t\t\t\t\t\t\t\tType: autoscalingv2.UtilizationMetricType,\n\t\t\t\t\t\t\t\t\t\tAverageUtilization: &one,\n\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\texpectedErr: \"the OpenTelemetry Spec autoscale configuration is incorrect, invalid pods target type\",\n\t\t},\n\t\t{\n\t\t\tname: \"invalid deployment mode incompabible with ingress settings\",\n\t\t\totelcol: OpenTelemetryCollector{\n\t\t\t\tSpec: OpenTelemetryCollectorSpec{\n\t\t\t\t\tMode: ModeSidecar,\n\t\t\t\t\tIngress: Ingress{\n\t\t\t\t\t\tType: IngressTypeNginx,\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\texpectedErr: fmt.Sprintf(\"Ingress can only be used in combination with the modes: %s, %s, %s\",\n\t\t\t\tModeDeployment, ModeDaemonSet, ModeStatefulSet,\n\t\t\t),\n\t\t},\n\t\t{\n\t\t\tname: \"invalid mode with priorityClassName\",\n\t\t\totelcol: OpenTelemetryCollector{\n\t\t\t\tSpec: OpenTelemetryCollectorSpec{\n\t\t\t\t\tMode: ModeSidecar,\n\t\t\t\t\tPriorityClassName: \"test-class\",\n\t\t\t\t},\n\t\t\t},\n\t\t\texpectedErr: \"does not support the attribute 'priorityClassName'\",\n\t\t},\n\t\t{\n\t\t\tname: \"invalid mode with affinity\",\n\t\t\totelcol: OpenTelemetryCollector{\n\t\t\t\tSpec: OpenTelemetryCollectorSpec{\n\t\t\t\t\tMode: ModeSidecar,\n\t\t\t\t\tAffinity: &v1.Affinity{\n\t\t\t\t\t\tNodeAffinity: &v1.NodeAffinity{\n\t\t\t\t\t\t\tRequiredDuringSchedulingIgnoredDuringExecution: &v1.NodeSelector{\n\t\t\t\t\t\t\t\tNodeSelectorTerms: []v1.NodeSelectorTerm{\n\t\t\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t\t\tMatchExpressions: []v1.NodeSelectorRequirement{\n\t\t\t\t\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t\t\t\t\tKey: \"node\",\n\t\t\t\t\t\t\t\t\t\t\t\tOperator: v1.NodeSelectorOpIn,\n\t\t\t\t\t\t\t\t\t\t\t\tValues: []string{\"test-node\"},\n\t\t\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\texpectedErr: \"does not support the attribute 'affinity'\",\n\t\t},\n\t\t{\n\t\t\tname: \"invalid InitialDelaySeconds\",\n\t\t\totelcol: OpenTelemetryCollector{\n\t\t\t\tSpec: OpenTelemetryCollectorSpec{\n\t\t\t\t\tLivenessProbe: &Probe{\n\t\t\t\t\t\tInitialDelaySeconds: &minusOne,\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\texpectedErr: \"the OpenTelemetry Spec LivenessProbe InitialDelaySeconds configuration is incorrect\",\n\t\t},\n\t\t{\n\t\t\tname: \"invalid PeriodSeconds\",\n\t\t\totelcol: OpenTelemetryCollector{\n\t\t\t\tSpec: OpenTelemetryCollectorSpec{\n\t\t\t\t\tLivenessProbe: &Probe{\n\t\t\t\t\t\tPeriodSeconds: &zero,\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\texpectedErr: \"the OpenTelemetry Spec LivenessProbe PeriodSeconds configuration is incorrect\",\n\t\t},\n\t\t{\n\t\t\tname: \"invalid TimeoutSeconds\",\n\t\t\totelcol: OpenTelemetryCollector{\n\t\t\t\tSpec: OpenTelemetryCollectorSpec{\n\t\t\t\t\tLivenessProbe: &Probe{\n\t\t\t\t\t\tTimeoutSeconds: &zero,\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\texpectedErr: \"the OpenTelemetry Spec LivenessProbe TimeoutSeconds configuration is incorrect\",\n\t\t},\n\t\t{\n\t\t\tname: \"invalid SuccessThreshold\",\n\t\t\totelcol: OpenTelemetryCollector{\n\t\t\t\tSpec: OpenTelemetryCollectorSpec{\n\t\t\t\t\tLivenessProbe: &Probe{\n\t\t\t\t\t\tSuccessThreshold: &zero,\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\texpectedErr: \"the OpenTelemetry Spec LivenessProbe SuccessThreshold configuration is incorrect\",\n\t\t},\n\t\t{\n\t\t\tname: \"invalid FailureThreshold\",\n\t\t\totelcol: OpenTelemetryCollector{\n\t\t\t\tSpec: OpenTelemetryCollectorSpec{\n\t\t\t\t\tLivenessProbe: &Probe{\n\t\t\t\t\t\tFailureThreshold: &zero,\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\texpectedErr: \"the OpenTelemetry Spec LivenessProbe FailureThreshold configuration is incorrect\",\n\t\t},\n\t\t{\n\t\t\tname: \"invalid TerminationGracePeriodSeconds\",\n\t\t\totelcol: OpenTelemetryCollector{\n\t\t\t\tSpec: OpenTelemetryCollectorSpec{\n\t\t\t\t\tLivenessProbe: &Probe{\n\t\t\t\t\t\tTerminationGracePeriodSeconds: &zero64,\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\texpectedErr: \"the OpenTelemetry Spec LivenessProbe TerminationGracePeriodSeconds configuration is incorrect\",\n\t\t},\n\t\t{\n\t\t\tname: \"invalid AdditionalContainers\",\n\t\t\totelcol: OpenTelemetryCollector{\n\t\t\t\tSpec: OpenTelemetryCollectorSpec{\n\t\t\t\t\tMode: ModeSidecar,\n\t\t\t\t\tAdditionalContainers: []v1.Container{\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tName: \"test\",\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\texpectedErr: \"the OpenTelemetry Collector mode is set to sidecar, which does not support the attribute 'AdditionalContainers'\",\n\t\t},\n\t\t{\n\t\t\tname: \"missing ingress hostname for subdomain ruleType\",\n\t\t\totelcol: OpenTelemetryCollector{\n\t\t\t\tSpec: OpenTelemetryCollectorSpec{\n\t\t\t\t\tIngress: Ingress{\n\t\t\t\t\t\tRuleType: IngressRuleTypeSubdomain,\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\texpectedErr: \"a valid Ingress hostname has to be defined for subdomain ruleType\",\n\t\t},\n\t}\n\n\tfor _, test := range tests {\n\t\tt.Run(test.name, func(t *testing.T) {\n\t\t\terr := test.otelcol.validateCRDSpec()\n\t\t\tif test.expectedErr == \"\" {\n\t\t\t\tassert.NoError(t, err)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tassert.ErrorContains(t, err, test.expectedErr)\n\t\t})\n\t}\n}", "func New(c Components, o Options) Interface {\n\tif o.ProcessedQueueSize == 0 {\n\t\to.ProcessedQueueSize = 5000\n\t}\n\n\th := &component{\n\t\tComponents: c,\n\t\tPublicNetAddr: o.PublicNetAddr,\n\t\tPrivateNetAddr: o.PrivateNetAddr,\n\t\tPrivateNetAddrAnnounce: o.PrivateNetAddrAnnounce,\n\t\tProcessed: newPQueue(o.ProcessedQueueSize),\n\t}\n\n\t// TODO Make it configurable\n\th.Configuration.CFList = [5]uint32{867100000, 867300000, 867500000, 867700000, 867900000}\n\th.Configuration.NetID = [3]byte{14, 14, 14}\n\t//h.Configuration.RX1DROffset = 0\n\th.Configuration.Rx1DrOffset = 0\n\th.Configuration.RFChain = 0\n\th.Configuration.InvPolarity = true\n\n\tset := make(chan bundle)\n\tbundles := make(chan []bundle)\n\n\th.ChBundles = set\n\tgo h.consumeBundles(bundles)\n\tgo h.consumeSet(bundles, set)\n\n\treturn h\n}", "func BuildJobSpec(pod *podtemplatespec.Builder) *jobspec.Builder {\n\tjobSpecObj := jobspec.NewBuilder().\n\t\tWithPodTemplateSpecBuilder(pod)\n\t_, err := jobSpecObj.Build()\n\tif err != nil {\n\t\tlog.Errorln(err)\n\t}\n\treturn jobSpecObj\n}", "func newReconciler(mgr manager.Manager) reconcile.Reconciler {\n\tgo setupAddressObserver(mgr, C)\n\treturn &ReconcileActiveMQArtemisAddress{client: mgr.GetClient(), scheme: mgr.GetScheme()}\n}", "func NewPrintJobStatus()(*PrintJobStatus) {\n m := &PrintJobStatus{\n }\n m.SetAdditionalData(make(map[string]interface{}));\n return m\n}", "func newResourceDelta(\n\ta *resource,\n\tb *resource,\n) *ackcompare.Delta {\n\tdelta := ackcompare.NewDelta()\n\tif (a == nil && b != nil) ||\n\t\t(a != nil && b == nil) {\n\t\tdelta.Add(\"\", a, b)\n\t\treturn delta\n\t}\n\tcustomSetDefaults(a, b)\n\n\tif ackcompare.HasNilDifference(a.ko.Spec.HyperParameterTuningJobConfig, b.ko.Spec.HyperParameterTuningJobConfig) {\n\t\tdelta.Add(\"Spec.HyperParameterTuningJobConfig\", a.ko.Spec.HyperParameterTuningJobConfig, b.ko.Spec.HyperParameterTuningJobConfig)\n\t} else if a.ko.Spec.HyperParameterTuningJobConfig != nil && b.ko.Spec.HyperParameterTuningJobConfig != nil {\n\t\tif ackcompare.HasNilDifference(a.ko.Spec.HyperParameterTuningJobConfig.HyperParameterTuningJobObjective, b.ko.Spec.HyperParameterTuningJobConfig.HyperParameterTuningJobObjective) {\n\t\t\tdelta.Add(\"Spec.HyperParameterTuningJobConfig.HyperParameterTuningJobObjective\", a.ko.Spec.HyperParameterTuningJobConfig.HyperParameterTuningJobObjective, b.ko.Spec.HyperParameterTuningJobConfig.HyperParameterTuningJobObjective)\n\t\t} else if a.ko.Spec.HyperParameterTuningJobConfig.HyperParameterTuningJobObjective != nil && b.ko.Spec.HyperParameterTuningJobConfig.HyperParameterTuningJobObjective != nil {\n\t\t\tif ackcompare.HasNilDifference(a.ko.Spec.HyperParameterTuningJobConfig.HyperParameterTuningJobObjective.MetricName, b.ko.Spec.HyperParameterTuningJobConfig.HyperParameterTuningJobObjective.MetricName) {\n\t\t\t\tdelta.Add(\"Spec.HyperParameterTuningJobConfig.HyperParameterTuningJobObjective.MetricName\", a.ko.Spec.HyperParameterTuningJobConfig.HyperParameterTuningJobObjective.MetricName, b.ko.Spec.HyperParameterTuningJobConfig.HyperParameterTuningJobObjective.MetricName)\n\t\t\t} else if a.ko.Spec.HyperParameterTuningJobConfig.HyperParameterTuningJobObjective.MetricName != nil && b.ko.Spec.HyperParameterTuningJobConfig.HyperParameterTuningJobObjective.MetricName != nil {\n\t\t\t\tif *a.ko.Spec.HyperParameterTuningJobConfig.HyperParameterTuningJobObjective.MetricName != *b.ko.Spec.HyperParameterTuningJobConfig.HyperParameterTuningJobObjective.MetricName {\n\t\t\t\t\tdelta.Add(\"Spec.HyperParameterTuningJobConfig.HyperParameterTuningJobObjective.MetricName\", a.ko.Spec.HyperParameterTuningJobConfig.HyperParameterTuningJobObjective.MetricName, b.ko.Spec.HyperParameterTuningJobConfig.HyperParameterTuningJobObjective.MetricName)\n\t\t\t\t}\n\t\t\t}\n\t\t\tif ackcompare.HasNilDifference(a.ko.Spec.HyperParameterTuningJobConfig.HyperParameterTuningJobObjective.Type, b.ko.Spec.HyperParameterTuningJobConfig.HyperParameterTuningJobObjective.Type) {\n\t\t\t\tdelta.Add(\"Spec.HyperParameterTuningJobConfig.HyperParameterTuningJobObjective.Type\", a.ko.Spec.HyperParameterTuningJobConfig.HyperParameterTuningJobObjective.Type, b.ko.Spec.HyperParameterTuningJobConfig.HyperParameterTuningJobObjective.Type)\n\t\t\t} else if a.ko.Spec.HyperParameterTuningJobConfig.HyperParameterTuningJobObjective.Type != nil && b.ko.Spec.HyperParameterTuningJobConfig.HyperParameterTuningJobObjective.Type != nil {\n\t\t\t\tif *a.ko.Spec.HyperParameterTuningJobConfig.HyperParameterTuningJobObjective.Type != *b.ko.Spec.HyperParameterTuningJobConfig.HyperParameterTuningJobObjective.Type {\n\t\t\t\t\tdelta.Add(\"Spec.HyperParameterTuningJobConfig.HyperParameterTuningJobObjective.Type\", a.ko.Spec.HyperParameterTuningJobConfig.HyperParameterTuningJobObjective.Type, b.ko.Spec.HyperParameterTuningJobConfig.HyperParameterTuningJobObjective.Type)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\tif ackcompare.HasNilDifference(a.ko.Spec.HyperParameterTuningJobConfig.ParameterRanges, b.ko.Spec.HyperParameterTuningJobConfig.ParameterRanges) {\n\t\t\tdelta.Add(\"Spec.HyperParameterTuningJobConfig.ParameterRanges\", a.ko.Spec.HyperParameterTuningJobConfig.ParameterRanges, b.ko.Spec.HyperParameterTuningJobConfig.ParameterRanges)\n\t\t} else if a.ko.Spec.HyperParameterTuningJobConfig.ParameterRanges != nil && b.ko.Spec.HyperParameterTuningJobConfig.ParameterRanges != nil {\n\t\t\tif !reflect.DeepEqual(a.ko.Spec.HyperParameterTuningJobConfig.ParameterRanges.CategoricalParameterRanges, b.ko.Spec.HyperParameterTuningJobConfig.ParameterRanges.CategoricalParameterRanges) {\n\t\t\t\tdelta.Add(\"Spec.HyperParameterTuningJobConfig.ParameterRanges.CategoricalParameterRanges\", a.ko.Spec.HyperParameterTuningJobConfig.ParameterRanges.CategoricalParameterRanges, b.ko.Spec.HyperParameterTuningJobConfig.ParameterRanges.CategoricalParameterRanges)\n\t\t\t}\n\t\t\tif !reflect.DeepEqual(a.ko.Spec.HyperParameterTuningJobConfig.ParameterRanges.ContinuousParameterRanges, b.ko.Spec.HyperParameterTuningJobConfig.ParameterRanges.ContinuousParameterRanges) {\n\t\t\t\tdelta.Add(\"Spec.HyperParameterTuningJobConfig.ParameterRanges.ContinuousParameterRanges\", a.ko.Spec.HyperParameterTuningJobConfig.ParameterRanges.ContinuousParameterRanges, b.ko.Spec.HyperParameterTuningJobConfig.ParameterRanges.ContinuousParameterRanges)\n\t\t\t}\n\t\t\tif !reflect.DeepEqual(a.ko.Spec.HyperParameterTuningJobConfig.ParameterRanges.IntegerParameterRanges, b.ko.Spec.HyperParameterTuningJobConfig.ParameterRanges.IntegerParameterRanges) {\n\t\t\t\tdelta.Add(\"Spec.HyperParameterTuningJobConfig.ParameterRanges.IntegerParameterRanges\", a.ko.Spec.HyperParameterTuningJobConfig.ParameterRanges.IntegerParameterRanges, b.ko.Spec.HyperParameterTuningJobConfig.ParameterRanges.IntegerParameterRanges)\n\t\t\t}\n\t\t}\n\t\tif ackcompare.HasNilDifference(a.ko.Spec.HyperParameterTuningJobConfig.ResourceLimits, b.ko.Spec.HyperParameterTuningJobConfig.ResourceLimits) {\n\t\t\tdelta.Add(\"Spec.HyperParameterTuningJobConfig.ResourceLimits\", a.ko.Spec.HyperParameterTuningJobConfig.ResourceLimits, b.ko.Spec.HyperParameterTuningJobConfig.ResourceLimits)\n\t\t} else if a.ko.Spec.HyperParameterTuningJobConfig.ResourceLimits != nil && b.ko.Spec.HyperParameterTuningJobConfig.ResourceLimits != nil {\n\t\t\tif ackcompare.HasNilDifference(a.ko.Spec.HyperParameterTuningJobConfig.ResourceLimits.MaxNumberOfTrainingJobs, b.ko.Spec.HyperParameterTuningJobConfig.ResourceLimits.MaxNumberOfTrainingJobs) {\n\t\t\t\tdelta.Add(\"Spec.HyperParameterTuningJobConfig.ResourceLimits.MaxNumberOfTrainingJobs\", a.ko.Spec.HyperParameterTuningJobConfig.ResourceLimits.MaxNumberOfTrainingJobs, b.ko.Spec.HyperParameterTuningJobConfig.ResourceLimits.MaxNumberOfTrainingJobs)\n\t\t\t} else if a.ko.Spec.HyperParameterTuningJobConfig.ResourceLimits.MaxNumberOfTrainingJobs != nil && b.ko.Spec.HyperParameterTuningJobConfig.ResourceLimits.MaxNumberOfTrainingJobs != nil {\n\t\t\t\tif *a.ko.Spec.HyperParameterTuningJobConfig.ResourceLimits.MaxNumberOfTrainingJobs != *b.ko.Spec.HyperParameterTuningJobConfig.ResourceLimits.MaxNumberOfTrainingJobs {\n\t\t\t\t\tdelta.Add(\"Spec.HyperParameterTuningJobConfig.ResourceLimits.MaxNumberOfTrainingJobs\", a.ko.Spec.HyperParameterTuningJobConfig.ResourceLimits.MaxNumberOfTrainingJobs, b.ko.Spec.HyperParameterTuningJobConfig.ResourceLimits.MaxNumberOfTrainingJobs)\n\t\t\t\t}\n\t\t\t}\n\t\t\tif ackcompare.HasNilDifference(a.ko.Spec.HyperParameterTuningJobConfig.ResourceLimits.MaxParallelTrainingJobs, b.ko.Spec.HyperParameterTuningJobConfig.ResourceLimits.MaxParallelTrainingJobs) {\n\t\t\t\tdelta.Add(\"Spec.HyperParameterTuningJobConfig.ResourceLimits.MaxParallelTrainingJobs\", a.ko.Spec.HyperParameterTuningJobConfig.ResourceLimits.MaxParallelTrainingJobs, b.ko.Spec.HyperParameterTuningJobConfig.ResourceLimits.MaxParallelTrainingJobs)\n\t\t\t} else if a.ko.Spec.HyperParameterTuningJobConfig.ResourceLimits.MaxParallelTrainingJobs != nil && b.ko.Spec.HyperParameterTuningJobConfig.ResourceLimits.MaxParallelTrainingJobs != nil {\n\t\t\t\tif *a.ko.Spec.HyperParameterTuningJobConfig.ResourceLimits.MaxParallelTrainingJobs != *b.ko.Spec.HyperParameterTuningJobConfig.ResourceLimits.MaxParallelTrainingJobs {\n\t\t\t\t\tdelta.Add(\"Spec.HyperParameterTuningJobConfig.ResourceLimits.MaxParallelTrainingJobs\", a.ko.Spec.HyperParameterTuningJobConfig.ResourceLimits.MaxParallelTrainingJobs, b.ko.Spec.HyperParameterTuningJobConfig.ResourceLimits.MaxParallelTrainingJobs)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\tif ackcompare.HasNilDifference(a.ko.Spec.HyperParameterTuningJobConfig.Strategy, b.ko.Spec.HyperParameterTuningJobConfig.Strategy) {\n\t\t\tdelta.Add(\"Spec.HyperParameterTuningJobConfig.Strategy\", a.ko.Spec.HyperParameterTuningJobConfig.Strategy, b.ko.Spec.HyperParameterTuningJobConfig.Strategy)\n\t\t} else if a.ko.Spec.HyperParameterTuningJobConfig.Strategy != nil && b.ko.Spec.HyperParameterTuningJobConfig.Strategy != nil {\n\t\t\tif *a.ko.Spec.HyperParameterTuningJobConfig.Strategy != *b.ko.Spec.HyperParameterTuningJobConfig.Strategy {\n\t\t\t\tdelta.Add(\"Spec.HyperParameterTuningJobConfig.Strategy\", a.ko.Spec.HyperParameterTuningJobConfig.Strategy, b.ko.Spec.HyperParameterTuningJobConfig.Strategy)\n\t\t\t}\n\t\t}\n\t\tif ackcompare.HasNilDifference(a.ko.Spec.HyperParameterTuningJobConfig.TrainingJobEarlyStoppingType, b.ko.Spec.HyperParameterTuningJobConfig.TrainingJobEarlyStoppingType) {\n\t\t\tdelta.Add(\"Spec.HyperParameterTuningJobConfig.TrainingJobEarlyStoppingType\", a.ko.Spec.HyperParameterTuningJobConfig.TrainingJobEarlyStoppingType, b.ko.Spec.HyperParameterTuningJobConfig.TrainingJobEarlyStoppingType)\n\t\t} else if a.ko.Spec.HyperParameterTuningJobConfig.TrainingJobEarlyStoppingType != nil && b.ko.Spec.HyperParameterTuningJobConfig.TrainingJobEarlyStoppingType != nil {\n\t\t\tif *a.ko.Spec.HyperParameterTuningJobConfig.TrainingJobEarlyStoppingType != *b.ko.Spec.HyperParameterTuningJobConfig.TrainingJobEarlyStoppingType {\n\t\t\t\tdelta.Add(\"Spec.HyperParameterTuningJobConfig.TrainingJobEarlyStoppingType\", a.ko.Spec.HyperParameterTuningJobConfig.TrainingJobEarlyStoppingType, b.ko.Spec.HyperParameterTuningJobConfig.TrainingJobEarlyStoppingType)\n\t\t\t}\n\t\t}\n\t\tif ackcompare.HasNilDifference(a.ko.Spec.HyperParameterTuningJobConfig.TuningJobCompletionCriteria, b.ko.Spec.HyperParameterTuningJobConfig.TuningJobCompletionCriteria) {\n\t\t\tdelta.Add(\"Spec.HyperParameterTuningJobConfig.TuningJobCompletionCriteria\", a.ko.Spec.HyperParameterTuningJobConfig.TuningJobCompletionCriteria, b.ko.Spec.HyperParameterTuningJobConfig.TuningJobCompletionCriteria)\n\t\t} else if a.ko.Spec.HyperParameterTuningJobConfig.TuningJobCompletionCriteria != nil && b.ko.Spec.HyperParameterTuningJobConfig.TuningJobCompletionCriteria != nil {\n\t\t\tif ackcompare.HasNilDifference(a.ko.Spec.HyperParameterTuningJobConfig.TuningJobCompletionCriteria.TargetObjectiveMetricValue, b.ko.Spec.HyperParameterTuningJobConfig.TuningJobCompletionCriteria.TargetObjectiveMetricValue) {\n\t\t\t\tdelta.Add(\"Spec.HyperParameterTuningJobConfig.TuningJobCompletionCriteria.TargetObjectiveMetricValue\", a.ko.Spec.HyperParameterTuningJobConfig.TuningJobCompletionCriteria.TargetObjectiveMetricValue, b.ko.Spec.HyperParameterTuningJobConfig.TuningJobCompletionCriteria.TargetObjectiveMetricValue)\n\t\t\t} else if a.ko.Spec.HyperParameterTuningJobConfig.TuningJobCompletionCriteria.TargetObjectiveMetricValue != nil && b.ko.Spec.HyperParameterTuningJobConfig.TuningJobCompletionCriteria.TargetObjectiveMetricValue != nil {\n\t\t\t\tif *a.ko.Spec.HyperParameterTuningJobConfig.TuningJobCompletionCriteria.TargetObjectiveMetricValue != *b.ko.Spec.HyperParameterTuningJobConfig.TuningJobCompletionCriteria.TargetObjectiveMetricValue {\n\t\t\t\t\tdelta.Add(\"Spec.HyperParameterTuningJobConfig.TuningJobCompletionCriteria.TargetObjectiveMetricValue\", a.ko.Spec.HyperParameterTuningJobConfig.TuningJobCompletionCriteria.TargetObjectiveMetricValue, b.ko.Spec.HyperParameterTuningJobConfig.TuningJobCompletionCriteria.TargetObjectiveMetricValue)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\tif ackcompare.HasNilDifference(a.ko.Spec.HyperParameterTuningJobName, b.ko.Spec.HyperParameterTuningJobName) {\n\t\tdelta.Add(\"Spec.HyperParameterTuningJobName\", a.ko.Spec.HyperParameterTuningJobName, b.ko.Spec.HyperParameterTuningJobName)\n\t} else if a.ko.Spec.HyperParameterTuningJobName != nil && b.ko.Spec.HyperParameterTuningJobName != nil {\n\t\tif *a.ko.Spec.HyperParameterTuningJobName != *b.ko.Spec.HyperParameterTuningJobName {\n\t\t\tdelta.Add(\"Spec.HyperParameterTuningJobName\", a.ko.Spec.HyperParameterTuningJobName, b.ko.Spec.HyperParameterTuningJobName)\n\t\t}\n\t}\n\tif ackcompare.HasNilDifference(a.ko.Spec.TrainingJobDefinition, b.ko.Spec.TrainingJobDefinition) {\n\t\tdelta.Add(\"Spec.TrainingJobDefinition\", a.ko.Spec.TrainingJobDefinition, b.ko.Spec.TrainingJobDefinition)\n\t} else if a.ko.Spec.TrainingJobDefinition != nil && b.ko.Spec.TrainingJobDefinition != nil {\n\t\tif ackcompare.HasNilDifference(a.ko.Spec.TrainingJobDefinition.AlgorithmSpecification, b.ko.Spec.TrainingJobDefinition.AlgorithmSpecification) {\n\t\t\tdelta.Add(\"Spec.TrainingJobDefinition.AlgorithmSpecification\", a.ko.Spec.TrainingJobDefinition.AlgorithmSpecification, b.ko.Spec.TrainingJobDefinition.AlgorithmSpecification)\n\t\t} else if a.ko.Spec.TrainingJobDefinition.AlgorithmSpecification != nil && b.ko.Spec.TrainingJobDefinition.AlgorithmSpecification != nil {\n\t\t\tif ackcompare.HasNilDifference(a.ko.Spec.TrainingJobDefinition.AlgorithmSpecification.AlgorithmName, b.ko.Spec.TrainingJobDefinition.AlgorithmSpecification.AlgorithmName) {\n\t\t\t\tdelta.Add(\"Spec.TrainingJobDefinition.AlgorithmSpecification.AlgorithmName\", a.ko.Spec.TrainingJobDefinition.AlgorithmSpecification.AlgorithmName, b.ko.Spec.TrainingJobDefinition.AlgorithmSpecification.AlgorithmName)\n\t\t\t} else if a.ko.Spec.TrainingJobDefinition.AlgorithmSpecification.AlgorithmName != nil && b.ko.Spec.TrainingJobDefinition.AlgorithmSpecification.AlgorithmName != nil {\n\t\t\t\tif *a.ko.Spec.TrainingJobDefinition.AlgorithmSpecification.AlgorithmName != *b.ko.Spec.TrainingJobDefinition.AlgorithmSpecification.AlgorithmName {\n\t\t\t\t\tdelta.Add(\"Spec.TrainingJobDefinition.AlgorithmSpecification.AlgorithmName\", a.ko.Spec.TrainingJobDefinition.AlgorithmSpecification.AlgorithmName, b.ko.Spec.TrainingJobDefinition.AlgorithmSpecification.AlgorithmName)\n\t\t\t\t}\n\t\t\t}\n\t\t\tif !reflect.DeepEqual(a.ko.Spec.TrainingJobDefinition.AlgorithmSpecification.MetricDefinitions, b.ko.Spec.TrainingJobDefinition.AlgorithmSpecification.MetricDefinitions) {\n\t\t\t\tdelta.Add(\"Spec.TrainingJobDefinition.AlgorithmSpecification.MetricDefinitions\", a.ko.Spec.TrainingJobDefinition.AlgorithmSpecification.MetricDefinitions, b.ko.Spec.TrainingJobDefinition.AlgorithmSpecification.MetricDefinitions)\n\t\t\t}\n\t\t\tif ackcompare.HasNilDifference(a.ko.Spec.TrainingJobDefinition.AlgorithmSpecification.TrainingImage, b.ko.Spec.TrainingJobDefinition.AlgorithmSpecification.TrainingImage) {\n\t\t\t\tdelta.Add(\"Spec.TrainingJobDefinition.AlgorithmSpecification.TrainingImage\", a.ko.Spec.TrainingJobDefinition.AlgorithmSpecification.TrainingImage, b.ko.Spec.TrainingJobDefinition.AlgorithmSpecification.TrainingImage)\n\t\t\t} else if a.ko.Spec.TrainingJobDefinition.AlgorithmSpecification.TrainingImage != nil && b.ko.Spec.TrainingJobDefinition.AlgorithmSpecification.TrainingImage != nil {\n\t\t\t\tif *a.ko.Spec.TrainingJobDefinition.AlgorithmSpecification.TrainingImage != *b.ko.Spec.TrainingJobDefinition.AlgorithmSpecification.TrainingImage {\n\t\t\t\t\tdelta.Add(\"Spec.TrainingJobDefinition.AlgorithmSpecification.TrainingImage\", a.ko.Spec.TrainingJobDefinition.AlgorithmSpecification.TrainingImage, b.ko.Spec.TrainingJobDefinition.AlgorithmSpecification.TrainingImage)\n\t\t\t\t}\n\t\t\t}\n\t\t\tif ackcompare.HasNilDifference(a.ko.Spec.TrainingJobDefinition.AlgorithmSpecification.TrainingInputMode, b.ko.Spec.TrainingJobDefinition.AlgorithmSpecification.TrainingInputMode) {\n\t\t\t\tdelta.Add(\"Spec.TrainingJobDefinition.AlgorithmSpecification.TrainingInputMode\", a.ko.Spec.TrainingJobDefinition.AlgorithmSpecification.TrainingInputMode, b.ko.Spec.TrainingJobDefinition.AlgorithmSpecification.TrainingInputMode)\n\t\t\t} else if a.ko.Spec.TrainingJobDefinition.AlgorithmSpecification.TrainingInputMode != nil && b.ko.Spec.TrainingJobDefinition.AlgorithmSpecification.TrainingInputMode != nil {\n\t\t\t\tif *a.ko.Spec.TrainingJobDefinition.AlgorithmSpecification.TrainingInputMode != *b.ko.Spec.TrainingJobDefinition.AlgorithmSpecification.TrainingInputMode {\n\t\t\t\t\tdelta.Add(\"Spec.TrainingJobDefinition.AlgorithmSpecification.TrainingInputMode\", a.ko.Spec.TrainingJobDefinition.AlgorithmSpecification.TrainingInputMode, b.ko.Spec.TrainingJobDefinition.AlgorithmSpecification.TrainingInputMode)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\tif ackcompare.HasNilDifference(a.ko.Spec.TrainingJobDefinition.CheckpointConfig, b.ko.Spec.TrainingJobDefinition.CheckpointConfig) {\n\t\t\tdelta.Add(\"Spec.TrainingJobDefinition.CheckpointConfig\", a.ko.Spec.TrainingJobDefinition.CheckpointConfig, b.ko.Spec.TrainingJobDefinition.CheckpointConfig)\n\t\t} else if a.ko.Spec.TrainingJobDefinition.CheckpointConfig != nil && b.ko.Spec.TrainingJobDefinition.CheckpointConfig != nil {\n\t\t\tif ackcompare.HasNilDifference(a.ko.Spec.TrainingJobDefinition.CheckpointConfig.LocalPath, b.ko.Spec.TrainingJobDefinition.CheckpointConfig.LocalPath) {\n\t\t\t\tdelta.Add(\"Spec.TrainingJobDefinition.CheckpointConfig.LocalPath\", a.ko.Spec.TrainingJobDefinition.CheckpointConfig.LocalPath, b.ko.Spec.TrainingJobDefinition.CheckpointConfig.LocalPath)\n\t\t\t} else if a.ko.Spec.TrainingJobDefinition.CheckpointConfig.LocalPath != nil && b.ko.Spec.TrainingJobDefinition.CheckpointConfig.LocalPath != nil {\n\t\t\t\tif *a.ko.Spec.TrainingJobDefinition.CheckpointConfig.LocalPath != *b.ko.Spec.TrainingJobDefinition.CheckpointConfig.LocalPath {\n\t\t\t\t\tdelta.Add(\"Spec.TrainingJobDefinition.CheckpointConfig.LocalPath\", a.ko.Spec.TrainingJobDefinition.CheckpointConfig.LocalPath, b.ko.Spec.TrainingJobDefinition.CheckpointConfig.LocalPath)\n\t\t\t\t}\n\t\t\t}\n\t\t\tif ackcompare.HasNilDifference(a.ko.Spec.TrainingJobDefinition.CheckpointConfig.S3URI, b.ko.Spec.TrainingJobDefinition.CheckpointConfig.S3URI) {\n\t\t\t\tdelta.Add(\"Spec.TrainingJobDefinition.CheckpointConfig.S3URI\", a.ko.Spec.TrainingJobDefinition.CheckpointConfig.S3URI, b.ko.Spec.TrainingJobDefinition.CheckpointConfig.S3URI)\n\t\t\t} else if a.ko.Spec.TrainingJobDefinition.CheckpointConfig.S3URI != nil && b.ko.Spec.TrainingJobDefinition.CheckpointConfig.S3URI != nil {\n\t\t\t\tif *a.ko.Spec.TrainingJobDefinition.CheckpointConfig.S3URI != *b.ko.Spec.TrainingJobDefinition.CheckpointConfig.S3URI {\n\t\t\t\t\tdelta.Add(\"Spec.TrainingJobDefinition.CheckpointConfig.S3URI\", a.ko.Spec.TrainingJobDefinition.CheckpointConfig.S3URI, b.ko.Spec.TrainingJobDefinition.CheckpointConfig.S3URI)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\tif ackcompare.HasNilDifference(a.ko.Spec.TrainingJobDefinition.DefinitionName, b.ko.Spec.TrainingJobDefinition.DefinitionName) {\n\t\t\tdelta.Add(\"Spec.TrainingJobDefinition.DefinitionName\", a.ko.Spec.TrainingJobDefinition.DefinitionName, b.ko.Spec.TrainingJobDefinition.DefinitionName)\n\t\t} else if a.ko.Spec.TrainingJobDefinition.DefinitionName != nil && b.ko.Spec.TrainingJobDefinition.DefinitionName != nil {\n\t\t\tif *a.ko.Spec.TrainingJobDefinition.DefinitionName != *b.ko.Spec.TrainingJobDefinition.DefinitionName {\n\t\t\t\tdelta.Add(\"Spec.TrainingJobDefinition.DefinitionName\", a.ko.Spec.TrainingJobDefinition.DefinitionName, b.ko.Spec.TrainingJobDefinition.DefinitionName)\n\t\t\t}\n\t\t}\n\t\tif ackcompare.HasNilDifference(a.ko.Spec.TrainingJobDefinition.EnableInterContainerTrafficEncryption, b.ko.Spec.TrainingJobDefinition.EnableInterContainerTrafficEncryption) {\n\t\t\tdelta.Add(\"Spec.TrainingJobDefinition.EnableInterContainerTrafficEncryption\", a.ko.Spec.TrainingJobDefinition.EnableInterContainerTrafficEncryption, b.ko.Spec.TrainingJobDefinition.EnableInterContainerTrafficEncryption)\n\t\t} else if a.ko.Spec.TrainingJobDefinition.EnableInterContainerTrafficEncryption != nil && b.ko.Spec.TrainingJobDefinition.EnableInterContainerTrafficEncryption != nil {\n\t\t\tif *a.ko.Spec.TrainingJobDefinition.EnableInterContainerTrafficEncryption != *b.ko.Spec.TrainingJobDefinition.EnableInterContainerTrafficEncryption {\n\t\t\t\tdelta.Add(\"Spec.TrainingJobDefinition.EnableInterContainerTrafficEncryption\", a.ko.Spec.TrainingJobDefinition.EnableInterContainerTrafficEncryption, b.ko.Spec.TrainingJobDefinition.EnableInterContainerTrafficEncryption)\n\t\t\t}\n\t\t}\n\t\tif ackcompare.HasNilDifference(a.ko.Spec.TrainingJobDefinition.EnableManagedSpotTraining, b.ko.Spec.TrainingJobDefinition.EnableManagedSpotTraining) {\n\t\t\tdelta.Add(\"Spec.TrainingJobDefinition.EnableManagedSpotTraining\", a.ko.Spec.TrainingJobDefinition.EnableManagedSpotTraining, b.ko.Spec.TrainingJobDefinition.EnableManagedSpotTraining)\n\t\t} else if a.ko.Spec.TrainingJobDefinition.EnableManagedSpotTraining != nil && b.ko.Spec.TrainingJobDefinition.EnableManagedSpotTraining != nil {\n\t\t\tif *a.ko.Spec.TrainingJobDefinition.EnableManagedSpotTraining != *b.ko.Spec.TrainingJobDefinition.EnableManagedSpotTraining {\n\t\t\t\tdelta.Add(\"Spec.TrainingJobDefinition.EnableManagedSpotTraining\", a.ko.Spec.TrainingJobDefinition.EnableManagedSpotTraining, b.ko.Spec.TrainingJobDefinition.EnableManagedSpotTraining)\n\t\t\t}\n\t\t}\n\t\tif ackcompare.HasNilDifference(a.ko.Spec.TrainingJobDefinition.EnableNetworkIsolation, b.ko.Spec.TrainingJobDefinition.EnableNetworkIsolation) {\n\t\t\tdelta.Add(\"Spec.TrainingJobDefinition.EnableNetworkIsolation\", a.ko.Spec.TrainingJobDefinition.EnableNetworkIsolation, b.ko.Spec.TrainingJobDefinition.EnableNetworkIsolation)\n\t\t} else if a.ko.Spec.TrainingJobDefinition.EnableNetworkIsolation != nil && b.ko.Spec.TrainingJobDefinition.EnableNetworkIsolation != nil {\n\t\t\tif *a.ko.Spec.TrainingJobDefinition.EnableNetworkIsolation != *b.ko.Spec.TrainingJobDefinition.EnableNetworkIsolation {\n\t\t\t\tdelta.Add(\"Spec.TrainingJobDefinition.EnableNetworkIsolation\", a.ko.Spec.TrainingJobDefinition.EnableNetworkIsolation, b.ko.Spec.TrainingJobDefinition.EnableNetworkIsolation)\n\t\t\t}\n\t\t}\n\t\tif ackcompare.HasNilDifference(a.ko.Spec.TrainingJobDefinition.HyperParameterRanges, b.ko.Spec.TrainingJobDefinition.HyperParameterRanges) {\n\t\t\tdelta.Add(\"Spec.TrainingJobDefinition.HyperParameterRanges\", a.ko.Spec.TrainingJobDefinition.HyperParameterRanges, b.ko.Spec.TrainingJobDefinition.HyperParameterRanges)\n\t\t} else if a.ko.Spec.TrainingJobDefinition.HyperParameterRanges != nil && b.ko.Spec.TrainingJobDefinition.HyperParameterRanges != nil {\n\t\t\tif !reflect.DeepEqual(a.ko.Spec.TrainingJobDefinition.HyperParameterRanges.CategoricalParameterRanges, b.ko.Spec.TrainingJobDefinition.HyperParameterRanges.CategoricalParameterRanges) {\n\t\t\t\tdelta.Add(\"Spec.TrainingJobDefinition.HyperParameterRanges.CategoricalParameterRanges\", a.ko.Spec.TrainingJobDefinition.HyperParameterRanges.CategoricalParameterRanges, b.ko.Spec.TrainingJobDefinition.HyperParameterRanges.CategoricalParameterRanges)\n\t\t\t}\n\t\t\tif !reflect.DeepEqual(a.ko.Spec.TrainingJobDefinition.HyperParameterRanges.ContinuousParameterRanges, b.ko.Spec.TrainingJobDefinition.HyperParameterRanges.ContinuousParameterRanges) {\n\t\t\t\tdelta.Add(\"Spec.TrainingJobDefinition.HyperParameterRanges.ContinuousParameterRanges\", a.ko.Spec.TrainingJobDefinition.HyperParameterRanges.ContinuousParameterRanges, b.ko.Spec.TrainingJobDefinition.HyperParameterRanges.ContinuousParameterRanges)\n\t\t\t}\n\t\t\tif !reflect.DeepEqual(a.ko.Spec.TrainingJobDefinition.HyperParameterRanges.IntegerParameterRanges, b.ko.Spec.TrainingJobDefinition.HyperParameterRanges.IntegerParameterRanges) {\n\t\t\t\tdelta.Add(\"Spec.TrainingJobDefinition.HyperParameterRanges.IntegerParameterRanges\", a.ko.Spec.TrainingJobDefinition.HyperParameterRanges.IntegerParameterRanges, b.ko.Spec.TrainingJobDefinition.HyperParameterRanges.IntegerParameterRanges)\n\t\t\t}\n\t\t}\n\t\tif !reflect.DeepEqual(a.ko.Spec.TrainingJobDefinition.InputDataConfig, b.ko.Spec.TrainingJobDefinition.InputDataConfig) {\n\t\t\tdelta.Add(\"Spec.TrainingJobDefinition.InputDataConfig\", a.ko.Spec.TrainingJobDefinition.InputDataConfig, b.ko.Spec.TrainingJobDefinition.InputDataConfig)\n\t\t}\n\t\tif ackcompare.HasNilDifference(a.ko.Spec.TrainingJobDefinition.OutputDataConfig, b.ko.Spec.TrainingJobDefinition.OutputDataConfig) {\n\t\t\tdelta.Add(\"Spec.TrainingJobDefinition.OutputDataConfig\", a.ko.Spec.TrainingJobDefinition.OutputDataConfig, b.ko.Spec.TrainingJobDefinition.OutputDataConfig)\n\t\t} else if a.ko.Spec.TrainingJobDefinition.OutputDataConfig != nil && b.ko.Spec.TrainingJobDefinition.OutputDataConfig != nil {\n\t\t\tif ackcompare.HasNilDifference(a.ko.Spec.TrainingJobDefinition.OutputDataConfig.KMSKeyID, b.ko.Spec.TrainingJobDefinition.OutputDataConfig.KMSKeyID) {\n\t\t\t\tdelta.Add(\"Spec.TrainingJobDefinition.OutputDataConfig.KMSKeyID\", a.ko.Spec.TrainingJobDefinition.OutputDataConfig.KMSKeyID, b.ko.Spec.TrainingJobDefinition.OutputDataConfig.KMSKeyID)\n\t\t\t} else if a.ko.Spec.TrainingJobDefinition.OutputDataConfig.KMSKeyID != nil && b.ko.Spec.TrainingJobDefinition.OutputDataConfig.KMSKeyID != nil {\n\t\t\t\tif *a.ko.Spec.TrainingJobDefinition.OutputDataConfig.KMSKeyID != *b.ko.Spec.TrainingJobDefinition.OutputDataConfig.KMSKeyID {\n\t\t\t\t\tdelta.Add(\"Spec.TrainingJobDefinition.OutputDataConfig.KMSKeyID\", a.ko.Spec.TrainingJobDefinition.OutputDataConfig.KMSKeyID, b.ko.Spec.TrainingJobDefinition.OutputDataConfig.KMSKeyID)\n\t\t\t\t}\n\t\t\t}\n\t\t\tif ackcompare.HasNilDifference(a.ko.Spec.TrainingJobDefinition.OutputDataConfig.S3OutputPath, b.ko.Spec.TrainingJobDefinition.OutputDataConfig.S3OutputPath) {\n\t\t\t\tdelta.Add(\"Spec.TrainingJobDefinition.OutputDataConfig.S3OutputPath\", a.ko.Spec.TrainingJobDefinition.OutputDataConfig.S3OutputPath, b.ko.Spec.TrainingJobDefinition.OutputDataConfig.S3OutputPath)\n\t\t\t} else if a.ko.Spec.TrainingJobDefinition.OutputDataConfig.S3OutputPath != nil && b.ko.Spec.TrainingJobDefinition.OutputDataConfig.S3OutputPath != nil {\n\t\t\t\tif *a.ko.Spec.TrainingJobDefinition.OutputDataConfig.S3OutputPath != *b.ko.Spec.TrainingJobDefinition.OutputDataConfig.S3OutputPath {\n\t\t\t\t\tdelta.Add(\"Spec.TrainingJobDefinition.OutputDataConfig.S3OutputPath\", a.ko.Spec.TrainingJobDefinition.OutputDataConfig.S3OutputPath, b.ko.Spec.TrainingJobDefinition.OutputDataConfig.S3OutputPath)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\tif ackcompare.HasNilDifference(a.ko.Spec.TrainingJobDefinition.ResourceConfig, b.ko.Spec.TrainingJobDefinition.ResourceConfig) {\n\t\t\tdelta.Add(\"Spec.TrainingJobDefinition.ResourceConfig\", a.ko.Spec.TrainingJobDefinition.ResourceConfig, b.ko.Spec.TrainingJobDefinition.ResourceConfig)\n\t\t} else if a.ko.Spec.TrainingJobDefinition.ResourceConfig != nil && b.ko.Spec.TrainingJobDefinition.ResourceConfig != nil {\n\t\t\tif ackcompare.HasNilDifference(a.ko.Spec.TrainingJobDefinition.ResourceConfig.InstanceCount, b.ko.Spec.TrainingJobDefinition.ResourceConfig.InstanceCount) {\n\t\t\t\tdelta.Add(\"Spec.TrainingJobDefinition.ResourceConfig.InstanceCount\", a.ko.Spec.TrainingJobDefinition.ResourceConfig.InstanceCount, b.ko.Spec.TrainingJobDefinition.ResourceConfig.InstanceCount)\n\t\t\t} else if a.ko.Spec.TrainingJobDefinition.ResourceConfig.InstanceCount != nil && b.ko.Spec.TrainingJobDefinition.ResourceConfig.InstanceCount != nil {\n\t\t\t\tif *a.ko.Spec.TrainingJobDefinition.ResourceConfig.InstanceCount != *b.ko.Spec.TrainingJobDefinition.ResourceConfig.InstanceCount {\n\t\t\t\t\tdelta.Add(\"Spec.TrainingJobDefinition.ResourceConfig.InstanceCount\", a.ko.Spec.TrainingJobDefinition.ResourceConfig.InstanceCount, b.ko.Spec.TrainingJobDefinition.ResourceConfig.InstanceCount)\n\t\t\t\t}\n\t\t\t}\n\t\t\tif ackcompare.HasNilDifference(a.ko.Spec.TrainingJobDefinition.ResourceConfig.InstanceType, b.ko.Spec.TrainingJobDefinition.ResourceConfig.InstanceType) {\n\t\t\t\tdelta.Add(\"Spec.TrainingJobDefinition.ResourceConfig.InstanceType\", a.ko.Spec.TrainingJobDefinition.ResourceConfig.InstanceType, b.ko.Spec.TrainingJobDefinition.ResourceConfig.InstanceType)\n\t\t\t} else if a.ko.Spec.TrainingJobDefinition.ResourceConfig.InstanceType != nil && b.ko.Spec.TrainingJobDefinition.ResourceConfig.InstanceType != nil {\n\t\t\t\tif *a.ko.Spec.TrainingJobDefinition.ResourceConfig.InstanceType != *b.ko.Spec.TrainingJobDefinition.ResourceConfig.InstanceType {\n\t\t\t\t\tdelta.Add(\"Spec.TrainingJobDefinition.ResourceConfig.InstanceType\", a.ko.Spec.TrainingJobDefinition.ResourceConfig.InstanceType, b.ko.Spec.TrainingJobDefinition.ResourceConfig.InstanceType)\n\t\t\t\t}\n\t\t\t}\n\t\t\tif ackcompare.HasNilDifference(a.ko.Spec.TrainingJobDefinition.ResourceConfig.VolumeKMSKeyID, b.ko.Spec.TrainingJobDefinition.ResourceConfig.VolumeKMSKeyID) {\n\t\t\t\tdelta.Add(\"Spec.TrainingJobDefinition.ResourceConfig.VolumeKMSKeyID\", a.ko.Spec.TrainingJobDefinition.ResourceConfig.VolumeKMSKeyID, b.ko.Spec.TrainingJobDefinition.ResourceConfig.VolumeKMSKeyID)\n\t\t\t} else if a.ko.Spec.TrainingJobDefinition.ResourceConfig.VolumeKMSKeyID != nil && b.ko.Spec.TrainingJobDefinition.ResourceConfig.VolumeKMSKeyID != nil {\n\t\t\t\tif *a.ko.Spec.TrainingJobDefinition.ResourceConfig.VolumeKMSKeyID != *b.ko.Spec.TrainingJobDefinition.ResourceConfig.VolumeKMSKeyID {\n\t\t\t\t\tdelta.Add(\"Spec.TrainingJobDefinition.ResourceConfig.VolumeKMSKeyID\", a.ko.Spec.TrainingJobDefinition.ResourceConfig.VolumeKMSKeyID, b.ko.Spec.TrainingJobDefinition.ResourceConfig.VolumeKMSKeyID)\n\t\t\t\t}\n\t\t\t}\n\t\t\tif ackcompare.HasNilDifference(a.ko.Spec.TrainingJobDefinition.ResourceConfig.VolumeSizeInGB, b.ko.Spec.TrainingJobDefinition.ResourceConfig.VolumeSizeInGB) {\n\t\t\t\tdelta.Add(\"Spec.TrainingJobDefinition.ResourceConfig.VolumeSizeInGB\", a.ko.Spec.TrainingJobDefinition.ResourceConfig.VolumeSizeInGB, b.ko.Spec.TrainingJobDefinition.ResourceConfig.VolumeSizeInGB)\n\t\t\t} else if a.ko.Spec.TrainingJobDefinition.ResourceConfig.VolumeSizeInGB != nil && b.ko.Spec.TrainingJobDefinition.ResourceConfig.VolumeSizeInGB != nil {\n\t\t\t\tif *a.ko.Spec.TrainingJobDefinition.ResourceConfig.VolumeSizeInGB != *b.ko.Spec.TrainingJobDefinition.ResourceConfig.VolumeSizeInGB {\n\t\t\t\t\tdelta.Add(\"Spec.TrainingJobDefinition.ResourceConfig.VolumeSizeInGB\", a.ko.Spec.TrainingJobDefinition.ResourceConfig.VolumeSizeInGB, b.ko.Spec.TrainingJobDefinition.ResourceConfig.VolumeSizeInGB)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\tif ackcompare.HasNilDifference(a.ko.Spec.TrainingJobDefinition.RoleARN, b.ko.Spec.TrainingJobDefinition.RoleARN) {\n\t\t\tdelta.Add(\"Spec.TrainingJobDefinition.RoleARN\", a.ko.Spec.TrainingJobDefinition.RoleARN, b.ko.Spec.TrainingJobDefinition.RoleARN)\n\t\t} else if a.ko.Spec.TrainingJobDefinition.RoleARN != nil && b.ko.Spec.TrainingJobDefinition.RoleARN != nil {\n\t\t\tif *a.ko.Spec.TrainingJobDefinition.RoleARN != *b.ko.Spec.TrainingJobDefinition.RoleARN {\n\t\t\t\tdelta.Add(\"Spec.TrainingJobDefinition.RoleARN\", a.ko.Spec.TrainingJobDefinition.RoleARN, b.ko.Spec.TrainingJobDefinition.RoleARN)\n\t\t\t}\n\t\t}\n\t\tif ackcompare.HasNilDifference(a.ko.Spec.TrainingJobDefinition.StaticHyperParameters, b.ko.Spec.TrainingJobDefinition.StaticHyperParameters) {\n\t\t\tdelta.Add(\"Spec.TrainingJobDefinition.StaticHyperParameters\", a.ko.Spec.TrainingJobDefinition.StaticHyperParameters, b.ko.Spec.TrainingJobDefinition.StaticHyperParameters)\n\t\t} else if a.ko.Spec.TrainingJobDefinition.StaticHyperParameters != nil && b.ko.Spec.TrainingJobDefinition.StaticHyperParameters != nil {\n\t\t\tif !ackcompare.MapStringStringPEqual(a.ko.Spec.TrainingJobDefinition.StaticHyperParameters, b.ko.Spec.TrainingJobDefinition.StaticHyperParameters) {\n\t\t\t\tdelta.Add(\"Spec.TrainingJobDefinition.StaticHyperParameters\", a.ko.Spec.TrainingJobDefinition.StaticHyperParameters, b.ko.Spec.TrainingJobDefinition.StaticHyperParameters)\n\t\t\t}\n\t\t}\n\t\tif ackcompare.HasNilDifference(a.ko.Spec.TrainingJobDefinition.StoppingCondition, b.ko.Spec.TrainingJobDefinition.StoppingCondition) {\n\t\t\tdelta.Add(\"Spec.TrainingJobDefinition.StoppingCondition\", a.ko.Spec.TrainingJobDefinition.StoppingCondition, b.ko.Spec.TrainingJobDefinition.StoppingCondition)\n\t\t} else if a.ko.Spec.TrainingJobDefinition.StoppingCondition != nil && b.ko.Spec.TrainingJobDefinition.StoppingCondition != nil {\n\t\t\tif ackcompare.HasNilDifference(a.ko.Spec.TrainingJobDefinition.StoppingCondition.MaxRuntimeInSeconds, b.ko.Spec.TrainingJobDefinition.StoppingCondition.MaxRuntimeInSeconds) {\n\t\t\t\tdelta.Add(\"Spec.TrainingJobDefinition.StoppingCondition.MaxRuntimeInSeconds\", a.ko.Spec.TrainingJobDefinition.StoppingCondition.MaxRuntimeInSeconds, b.ko.Spec.TrainingJobDefinition.StoppingCondition.MaxRuntimeInSeconds)\n\t\t\t} else if a.ko.Spec.TrainingJobDefinition.StoppingCondition.MaxRuntimeInSeconds != nil && b.ko.Spec.TrainingJobDefinition.StoppingCondition.MaxRuntimeInSeconds != nil {\n\t\t\t\tif *a.ko.Spec.TrainingJobDefinition.StoppingCondition.MaxRuntimeInSeconds != *b.ko.Spec.TrainingJobDefinition.StoppingCondition.MaxRuntimeInSeconds {\n\t\t\t\t\tdelta.Add(\"Spec.TrainingJobDefinition.StoppingCondition.MaxRuntimeInSeconds\", a.ko.Spec.TrainingJobDefinition.StoppingCondition.MaxRuntimeInSeconds, b.ko.Spec.TrainingJobDefinition.StoppingCondition.MaxRuntimeInSeconds)\n\t\t\t\t}\n\t\t\t}\n\t\t\tif ackcompare.HasNilDifference(a.ko.Spec.TrainingJobDefinition.StoppingCondition.MaxWaitTimeInSeconds, b.ko.Spec.TrainingJobDefinition.StoppingCondition.MaxWaitTimeInSeconds) {\n\t\t\t\tdelta.Add(\"Spec.TrainingJobDefinition.StoppingCondition.MaxWaitTimeInSeconds\", a.ko.Spec.TrainingJobDefinition.StoppingCondition.MaxWaitTimeInSeconds, b.ko.Spec.TrainingJobDefinition.StoppingCondition.MaxWaitTimeInSeconds)\n\t\t\t} else if a.ko.Spec.TrainingJobDefinition.StoppingCondition.MaxWaitTimeInSeconds != nil && b.ko.Spec.TrainingJobDefinition.StoppingCondition.MaxWaitTimeInSeconds != nil {\n\t\t\t\tif *a.ko.Spec.TrainingJobDefinition.StoppingCondition.MaxWaitTimeInSeconds != *b.ko.Spec.TrainingJobDefinition.StoppingCondition.MaxWaitTimeInSeconds {\n\t\t\t\t\tdelta.Add(\"Spec.TrainingJobDefinition.StoppingCondition.MaxWaitTimeInSeconds\", a.ko.Spec.TrainingJobDefinition.StoppingCondition.MaxWaitTimeInSeconds, b.ko.Spec.TrainingJobDefinition.StoppingCondition.MaxWaitTimeInSeconds)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\tif ackcompare.HasNilDifference(a.ko.Spec.TrainingJobDefinition.TuningObjective, b.ko.Spec.TrainingJobDefinition.TuningObjective) {\n\t\t\tdelta.Add(\"Spec.TrainingJobDefinition.TuningObjective\", a.ko.Spec.TrainingJobDefinition.TuningObjective, b.ko.Spec.TrainingJobDefinition.TuningObjective)\n\t\t} else if a.ko.Spec.TrainingJobDefinition.TuningObjective != nil && b.ko.Spec.TrainingJobDefinition.TuningObjective != nil {\n\t\t\tif ackcompare.HasNilDifference(a.ko.Spec.TrainingJobDefinition.TuningObjective.MetricName, b.ko.Spec.TrainingJobDefinition.TuningObjective.MetricName) {\n\t\t\t\tdelta.Add(\"Spec.TrainingJobDefinition.TuningObjective.MetricName\", a.ko.Spec.TrainingJobDefinition.TuningObjective.MetricName, b.ko.Spec.TrainingJobDefinition.TuningObjective.MetricName)\n\t\t\t} else if a.ko.Spec.TrainingJobDefinition.TuningObjective.MetricName != nil && b.ko.Spec.TrainingJobDefinition.TuningObjective.MetricName != nil {\n\t\t\t\tif *a.ko.Spec.TrainingJobDefinition.TuningObjective.MetricName != *b.ko.Spec.TrainingJobDefinition.TuningObjective.MetricName {\n\t\t\t\t\tdelta.Add(\"Spec.TrainingJobDefinition.TuningObjective.MetricName\", a.ko.Spec.TrainingJobDefinition.TuningObjective.MetricName, b.ko.Spec.TrainingJobDefinition.TuningObjective.MetricName)\n\t\t\t\t}\n\t\t\t}\n\t\t\tif ackcompare.HasNilDifference(a.ko.Spec.TrainingJobDefinition.TuningObjective.Type, b.ko.Spec.TrainingJobDefinition.TuningObjective.Type) {\n\t\t\t\tdelta.Add(\"Spec.TrainingJobDefinition.TuningObjective.Type\", a.ko.Spec.TrainingJobDefinition.TuningObjective.Type, b.ko.Spec.TrainingJobDefinition.TuningObjective.Type)\n\t\t\t} else if a.ko.Spec.TrainingJobDefinition.TuningObjective.Type != nil && b.ko.Spec.TrainingJobDefinition.TuningObjective.Type != nil {\n\t\t\t\tif *a.ko.Spec.TrainingJobDefinition.TuningObjective.Type != *b.ko.Spec.TrainingJobDefinition.TuningObjective.Type {\n\t\t\t\t\tdelta.Add(\"Spec.TrainingJobDefinition.TuningObjective.Type\", a.ko.Spec.TrainingJobDefinition.TuningObjective.Type, b.ko.Spec.TrainingJobDefinition.TuningObjective.Type)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\tif ackcompare.HasNilDifference(a.ko.Spec.TrainingJobDefinition.VPCConfig, b.ko.Spec.TrainingJobDefinition.VPCConfig) {\n\t\t\tdelta.Add(\"Spec.TrainingJobDefinition.VPCConfig\", a.ko.Spec.TrainingJobDefinition.VPCConfig, b.ko.Spec.TrainingJobDefinition.VPCConfig)\n\t\t} else if a.ko.Spec.TrainingJobDefinition.VPCConfig != nil && b.ko.Spec.TrainingJobDefinition.VPCConfig != nil {\n\t\t\tif !ackcompare.SliceStringPEqual(a.ko.Spec.TrainingJobDefinition.VPCConfig.SecurityGroupIDs, b.ko.Spec.TrainingJobDefinition.VPCConfig.SecurityGroupIDs) {\n\t\t\t\tdelta.Add(\"Spec.TrainingJobDefinition.VPCConfig.SecurityGroupIDs\", a.ko.Spec.TrainingJobDefinition.VPCConfig.SecurityGroupIDs, b.ko.Spec.TrainingJobDefinition.VPCConfig.SecurityGroupIDs)\n\t\t\t}\n\t\t\tif !ackcompare.SliceStringPEqual(a.ko.Spec.TrainingJobDefinition.VPCConfig.Subnets, b.ko.Spec.TrainingJobDefinition.VPCConfig.Subnets) {\n\t\t\t\tdelta.Add(\"Spec.TrainingJobDefinition.VPCConfig.Subnets\", a.ko.Spec.TrainingJobDefinition.VPCConfig.Subnets, b.ko.Spec.TrainingJobDefinition.VPCConfig.Subnets)\n\t\t\t}\n\t\t}\n\t}\n\tif !reflect.DeepEqual(a.ko.Spec.TrainingJobDefinitions, b.ko.Spec.TrainingJobDefinitions) {\n\t\tdelta.Add(\"Spec.TrainingJobDefinitions\", a.ko.Spec.TrainingJobDefinitions, b.ko.Spec.TrainingJobDefinitions)\n\t}\n\tif ackcompare.HasNilDifference(a.ko.Spec.WarmStartConfig, b.ko.Spec.WarmStartConfig) {\n\t\tdelta.Add(\"Spec.WarmStartConfig\", a.ko.Spec.WarmStartConfig, b.ko.Spec.WarmStartConfig)\n\t} else if a.ko.Spec.WarmStartConfig != nil && b.ko.Spec.WarmStartConfig != nil {\n\t\tif !reflect.DeepEqual(a.ko.Spec.WarmStartConfig.ParentHyperParameterTuningJobs, b.ko.Spec.WarmStartConfig.ParentHyperParameterTuningJobs) {\n\t\t\tdelta.Add(\"Spec.WarmStartConfig.ParentHyperParameterTuningJobs\", a.ko.Spec.WarmStartConfig.ParentHyperParameterTuningJobs, b.ko.Spec.WarmStartConfig.ParentHyperParameterTuningJobs)\n\t\t}\n\t\tif ackcompare.HasNilDifference(a.ko.Spec.WarmStartConfig.WarmStartType, b.ko.Spec.WarmStartConfig.WarmStartType) {\n\t\t\tdelta.Add(\"Spec.WarmStartConfig.WarmStartType\", a.ko.Spec.WarmStartConfig.WarmStartType, b.ko.Spec.WarmStartConfig.WarmStartType)\n\t\t} else if a.ko.Spec.WarmStartConfig.WarmStartType != nil && b.ko.Spec.WarmStartConfig.WarmStartType != nil {\n\t\t\tif *a.ko.Spec.WarmStartConfig.WarmStartType != *b.ko.Spec.WarmStartConfig.WarmStartType {\n\t\t\t\tdelta.Add(\"Spec.WarmStartConfig.WarmStartType\", a.ko.Spec.WarmStartConfig.WarmStartType, b.ko.Spec.WarmStartConfig.WarmStartType)\n\t\t\t}\n\t\t}\n\t}\n\n\treturn delta\n}", "func CreatePlanSpec(spec *PlanSpec) *plan.Spec {\n\treturn createPlanSpec(spec.Nodes, spec.Edges, spec.Resources, spec.Now)\n}", "func DefaultDatadogAgentSpecAgentOTLP(agent *DatadogAgentSpecAgentSpec) *OTLPSpec {\n\tdefaultOTLP := &OTLPSpec{OTLPReceiverSpec{OTLPProtocolsSpec{\n\t\tGRPC: &OTLPGRPCSpec{\n\t\t\tEnabled: apiutils.NewBoolPointer(defaultOTLPGRPCEnabled),\n\t\t\tEndpoint: apiutils.NewStringPointer(defaultOTLPGRPCEndpoint),\n\t\t},\n\t\tHTTP: &OTLPHTTPSpec{\n\t\t\tEnabled: apiutils.NewBoolPointer(defaultOTLPHTTPEnabled),\n\t\t\tEndpoint: apiutils.NewStringPointer(defaultOTLPHTTPEndpoint),\n\t\t},\n\t}}}\n\n\tif agent.OTLP == nil {\n\t\tagent.OTLP = defaultOTLP\n\t\treturn agent.OTLP\n\t}\n\n\totlpOverride := &OTLPSpec{}\n\n\t// OTLP/gRPC section\n\tif agent.OTLP.Receiver.Protocols.GRPC == nil {\n\t\tagent.OTLP.Receiver.Protocols.GRPC = defaultOTLP.Receiver.Protocols.GRPC\n\t}\n\totlpOverride.Receiver.Protocols.GRPC = agent.OTLP.Receiver.Protocols.GRPC\n\tif agent.OTLP.Receiver.Protocols.GRPC.Enabled == nil {\n\t\tagent.OTLP.Receiver.Protocols.GRPC.Enabled = defaultOTLP.Receiver.Protocols.GRPC.Enabled\n\t\totlpOverride.Receiver.Protocols.GRPC.Enabled = agent.OTLP.Receiver.Protocols.GRPC.Enabled\n\t}\n\tif agent.OTLP.Receiver.Protocols.GRPC.Endpoint == nil {\n\t\tagent.OTLP.Receiver.Protocols.GRPC.Endpoint = defaultOTLP.Receiver.Protocols.GRPC.Endpoint\n\t\totlpOverride.Receiver.Protocols.GRPC.Endpoint = agent.OTLP.Receiver.Protocols.GRPC.Endpoint\n\t}\n\n\t// OTLP/HTTP section\n\tif agent.OTLP.Receiver.Protocols.HTTP == nil {\n\t\tagent.OTLP.Receiver.Protocols.HTTP = defaultOTLP.Receiver.Protocols.HTTP\n\t}\n\totlpOverride.Receiver.Protocols.HTTP = agent.OTLP.Receiver.Protocols.HTTP\n\tif agent.OTLP.Receiver.Protocols.HTTP.Enabled == nil {\n\t\tagent.OTLP.Receiver.Protocols.HTTP.Enabled = defaultOTLP.Receiver.Protocols.HTTP.Enabled\n\t\totlpOverride.Receiver.Protocols.HTTP.Enabled = agent.OTLP.Receiver.Protocols.HTTP.Enabled\n\t}\n\tif agent.OTLP.Receiver.Protocols.HTTP.Endpoint == nil {\n\t\tagent.OTLP.Receiver.Protocols.HTTP.Endpoint = defaultOTLP.Receiver.Protocols.HTTP.Endpoint\n\t\totlpOverride.Receiver.Protocols.HTTP.Endpoint = agent.OTLP.Receiver.Protocols.HTTP.Endpoint\n\t}\n\n\treturn otlpOverride\n}", "func newReconciler(mgr manager.Manager) reconcile.Reconciler {\n\tlogger := log.WithName(\"newReconciler\")\n\tpodResourcesClient, err := podresourcesclient.NewPodResourcesClient()\n\tif err != nil {\n\t\tlogger.Error(err, \"unable to create podresources client\")\n\t\treturn nil\n\t}\n\treturn &ReconcilePod{client: mgr.GetClient(), scheme: mgr.GetScheme(), podResourcesClient: podResourcesClient}\n}", "func (s *SmartContract) InitLedger(stub shim.ChaincodeStubInterface) peer.Response {\n \n // Build six initial components, with one of them already Retired\n // There are three CarID's in here: CAR0, CAR1, and CAR2\n components := []CarComponent{\n CarComponent{Retired: false, Owner: \"Supplier.s0\", CarID: \"CAR0\"},\n CarComponent{Retired: false, Owner: \"Supplier.s1\", CarID: \"CAR1\"},\n CarComponent{Retired: false, Owner: \"Manufacture.m0\", CarID: \"CAR2\"},\n CarComponent{Retired: false, Owner: \"Manufacture.m2\", CarID: \"CAR3\"},\n CarComponent{Retired: false, Owner: \"Dealer.d0\", CarID: \"CAR4\"},\n CarComponent{Retired: true, Owner: \"Dealer.d1\", CarID: \"CAR5\"},\n } \n\n /*\n List of ComponentID:\n 000000000\n 000000001\n 000000002\n 000000003\n 000000004\n 000000005\n */\n // Component${i}\n i := 0\n var ComponentID string\n for i < len(components) {\n fmt.Println(\"i = \", i, \"component is\", components[i])\n componentAsBytes, _ := json.Marshal(components[i]) // debug\n ComponentID = \"00000000\" + strconv.Itoa(i)\n stub.PutState(ComponentID, componentAsBytes)\n fmt.Println(\"Added\", components[i], \"with ComponentID:\", ComponentID, \"Marshal form:\", componentAsBytes)\n i = i + 1 // increment\n }\n return shim.Success(nil)\n}", "func NewReport(startTime *time.Time, endTime *time.Time) (*Report, error) {\n\terr := DeleteReport(startTime, endTime)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn OpenReport(startTime, endTime)\n}", "func newReconciler(mgr manager.Manager) reconcile.Reconciler {\n\treturn &JenkinsReconciler{client: mgr.GetClient(), scheme: mgr.GetScheme()}\n}", "func newReconcilerWithoutAPIClient(mgr manager.Manager, apiClient *swagger.APIClient) reconcile.Reconciler {\n\treturn &ReconcileNotebookJob{\n\t\tClient: mgr.GetClient(),\n\t\tscheme: mgr.GetScheme(),\n\t\trecorder: mgr.GetRecorder(\"notebookjob-controller\"),\n\t\tapiClient: apiClient,\n\t}\n}", "func NewBusinessScenarioPlanner()(*BusinessScenarioPlanner) {\n m := &BusinessScenarioPlanner{\n Entity: *NewEntity(),\n }\n return m\n}", "func New(client plugins.Client, cryptor txcrypto.Cryptor) (*AppchainMonitor, error) {\n\tmeta, err := client.GetOutMeta()\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"get out interchainCounter from broker contract :%w\", err)\n\t}\n\n\tctx, cancel := context.WithCancel(context.Background())\n\treturn &AppchainMonitor{\n\t\tclient: client,\n\t\tinterchainCounter: meta,\n\t\tcryptor: cryptor,\n\t\trecvCh: make(chan *pb.IBTP, 1024),\n\t\tctx: ctx,\n\t\tcancel: cancel,\n\t}, nil\n}", "func newReconciler(mgr manager.Manager) reconcile.Reconciler {\n\treturn &ReconcileParameterStore{client: mgr.GetClient(), scheme: mgr.GetScheme()}\n}", "func NewReportRoot()(*ReportRoot) {\n m := &ReportRoot{\n Entity: *NewEntity(),\n }\n return m\n}", "func New(cfg *types.RPC) *RPC {\n\tInitCfg(cfg)\n\tif cfg.EnableTrace {\n\t\tgrpc.EnableTracing = true\n\t}\n\treturn &RPC{cfg: cfg}\n}", "func newAddrBook(cfg config.SwarmConfig, path string, logger log.Log) *addrBook {\n\t//TODO use config for const params.\n\tam := addrBook{\n\t\tlogger: logger,\n\t\tpath: path,\n\t\tpeersFileName: cfg.PeersFile,\n\t\trand: rand.New(rand.NewSource(time.Now().UnixNano())),\n\t\tquit: make(chan struct{}),\n\t}\n\tam.reset()\n\treturn &am\n}", "func (*CMsgRequestOfferingsResponse_NewYearsOffering) Descriptor() ([]byte, []int) {\n\treturn file_dota_gcmessages_client_proto_rawDescGZIP(), []int{207, 0}\n}", "func TestConfigure_NewLeafASN(t *testing.T) {\n\tExpectedASN := \"65000\"\n\n\tMockLeafDeviceAdapter := mock.DeviceAdapter{\n\t\tMockGetInterfaces: func(FabricID uint, DeviceID uint, DeviceIP string) ([]domain.Interface, error) {\n\n\t\t\treturn []domain.Interface{domain.Interface{FabricID: FabricID, DeviceID: DeviceID,\n\t\t\t\tIntType: \"ethernet\", IntName: \"1/11\", Mac: \"M1\", ConfigState: \"up\"}}, nil\n\n\t\t},\n\t\tMockGetLLDPs: func(FabricID uint, DeviceID uint, DeviceIP string) ([]domain.LLDP, error) {\n\n\t\t\treturn []domain.LLDP{domain.LLDP{FabricID: FabricID, DeviceID: DeviceID,\n\t\t\t\tLocalIntType: \"ethernet\", LocalIntName: \"1/11\", LocalIntMac: \"M1\",\n\t\t\t\tRemoteIntType: \"ethernet\", RemoteIntName: \"1/22\", RemoteIntMac: \"M2\"}}, nil\n\n\t\t},\n\t}\n\n\tdatabase.Setup(constants.TESTDBLocation)\n\tdefer cleanupDB(database.GetWorkingInstance())\n\n\tDatabaseRepository := gateway.DatabaseRepository{Database: database.GetWorkingInstance()}\n\tdevUC := usecase.DeviceInteractor{Db: &DatabaseRepository, DeviceAdapterFactory: mock.GetDeviceAdapterFactory(MockLeafDeviceAdapter)}\n\tdevUC.AddFabric(context.Background(), MockFabricName)\n\n\tresp, err := devUC.AddDevices(context.Background(), MockFabricName, []string{MockLeaf1IP}, []string{},\n\t\tUserName, Password, false)\n\tassert.Contains(t, resp, usecase.AddDeviceResponse{FabricName: MockFabricName, FabricID: 1, IPAddress: MockLeaf1IP, Role: usecase.LeafRole})\n\n\tassert.Nil(t, err)\n\tswitchConfig, err := DatabaseRepository.GetSwitchConfigOnDeviceIP(MockFabricName, MockLeaf1IP)\n\t//Verify ASN is the first ASN from the Pool\n\tassert.Equal(t, ExpectedASN, switchConfig.LocalAS)\n\t//Verify ASN is to be created\n\tassert.Equal(t, domain.ConfigCreate, switchConfig.ASConfigType)\n\n}", "func getUpgradeLSCCSpec(chainID string, cds *pb.ChaincodeDeploymentSpec) (*pb.ChaincodeInvocationSpec, error) {\n\tb, err := proto.Marshal(cds)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t//wrap the deployment in an invocation spec to lscc...\n\tlsccSpec := &pb.ChaincodeInvocationSpec{ChaincodeSpec: &pb.ChaincodeSpec{Type: pb.ChaincodeSpec_GOLANG, ChaincodeId: &pb.ChaincodeID{Name: \"lscc\"}, Input: &pb.ChaincodeInput{Args: [][]byte{[]byte(\"upgrade\"), []byte(chainID), b}}}}\n\n\treturn lsccSpec, nil\n}", "func New() Generator {\n\tspec := rspec.Spec{\n\t\tVersion: rspec.Version,\n\t\tPlatform: rspec.Platform{\n\t\t\tOS: runtime.GOOS,\n\t\t\tArch: runtime.GOARCH,\n\t\t},\n\t\tRoot: rspec.Root{\n\t\t\tPath: \"\",\n\t\t\tReadonly: false,\n\t\t},\n\t\tProcess: rspec.Process{\n\t\t\tTerminal: false,\n\t\t\tUser: rspec.User{},\n\t\t\tArgs: []string{\n\t\t\t\t\"sh\",\n\t\t\t},\n\t\t\tEnv: []string{\n\t\t\t\t\"PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\",\n\t\t\t\t\"TERM=xterm\",\n\t\t\t},\n\t\t\tCwd: \"/\",\n\t\t\tCapabilities: []string{\n\t\t\t\t\"CAP_CHOWN\",\n\t\t\t\t\"CAP_DAC_OVERRIDE\",\n\t\t\t\t\"CAP_FSETID\",\n\t\t\t\t\"CAP_FOWNER\",\n\t\t\t\t\"CAP_MKNOD\",\n\t\t\t\t\"CAP_NET_RAW\",\n\t\t\t\t\"CAP_SETGID\",\n\t\t\t\t\"CAP_SETUID\",\n\t\t\t\t\"CAP_SETFCAP\",\n\t\t\t\t\"CAP_SETPCAP\",\n\t\t\t\t\"CAP_NET_BIND_SERVICE\",\n\t\t\t\t\"CAP_SYS_CHROOT\",\n\t\t\t\t\"CAP_KILL\",\n\t\t\t\t\"CAP_AUDIT_WRITE\",\n\t\t\t},\n\t\t\tRlimits: []rspec.Rlimit{\n\t\t\t\t{\n\t\t\t\t\tType: \"RLIMIT_NOFILE\",\n\t\t\t\t\tHard: uint64(1024),\n\t\t\t\t\tSoft: uint64(1024),\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\tHostname: \"mrsdalloway\",\n\t\tMounts: []rspec.Mount{\n\t\t\t{\n\t\t\t\tDestination: \"/proc\",\n\t\t\t\tType: \"proc\",\n\t\t\t\tSource: \"proc\",\n\t\t\t\tOptions: nil,\n\t\t\t},\n\t\t\t{\n\t\t\t\tDestination: \"/dev\",\n\t\t\t\tType: \"tmpfs\",\n\t\t\t\tSource: \"tmpfs\",\n\t\t\t\tOptions: []string{\"nosuid\", \"strictatime\", \"mode=755\", \"size=65536k\"},\n\t\t\t},\n\t\t\t{\n\t\t\t\tDestination: \"/dev/pts\",\n\t\t\t\tType: \"devpts\",\n\t\t\t\tSource: \"devpts\",\n\t\t\t\tOptions: []string{\"nosuid\", \"noexec\", \"newinstance\", \"ptmxmode=0666\", \"mode=0620\", \"gid=5\"},\n\t\t\t},\n\t\t\t{\n\t\t\t\tDestination: \"/dev/shm\",\n\t\t\t\tType: \"tmpfs\",\n\t\t\t\tSource: \"shm\",\n\t\t\t\tOptions: []string{\"nosuid\", \"noexec\", \"nodev\", \"mode=1777\", \"size=65536k\"},\n\t\t\t},\n\t\t\t{\n\t\t\t\tDestination: \"/dev/mqueue\",\n\t\t\t\tType: \"mqueue\",\n\t\t\t\tSource: \"mqueue\",\n\t\t\t\tOptions: []string{\"nosuid\", \"noexec\", \"nodev\"},\n\t\t\t},\n\t\t\t{\n\t\t\t\tDestination: \"/sys\",\n\t\t\t\tType: \"sysfs\",\n\t\t\t\tSource: \"sysfs\",\n\t\t\t\tOptions: []string{\"nosuid\", \"noexec\", \"nodev\", \"ro\"},\n\t\t\t},\n\t\t},\n\t\tLinux: &rspec.Linux{\n\t\t\tResources: &rspec.Resources{\n\t\t\t\tDevices: []rspec.DeviceCgroup{\n\t\t\t\t\t{\n\t\t\t\t\t\tAllow: false,\n\t\t\t\t\t\tAccess: strPtr(\"rwm\"),\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\tNamespaces: []rspec.Namespace{\n\t\t\t\t{\n\t\t\t\t\tType: \"pid\",\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tType: \"network\",\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tType: \"ipc\",\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tType: \"uts\",\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tType: \"mount\",\n\t\t\t\t},\n\t\t\t},\n\t\t\tDevices: []rspec.Device{},\n\t\t},\n\t}\n\treturn Generator{\n\t\tspec: &spec,\n\t}\n}", "func (rm *resourceManager) newDescribeRequestPayload(\n\tr *resource,\n) (*svcsdk.DescribeModelBiasJobDefinitionInput, error) {\n\tres := &svcsdk.DescribeModelBiasJobDefinitionInput{}\n\n\tif r.ko.Spec.JobDefinitionName != nil {\n\t\tres.SetJobDefinitionName(*r.ko.Spec.JobDefinitionName)\n\t}\n\n\treturn res, nil\n}", "func NewProtocolf(action string, hID int64, note string, a ...interface{}) Protocol {\n\treturn NewProtocol(action, hID, fmt.Sprintf(note, a...))\n}", "func NewPrinterCreateOperation()(*PrinterCreateOperation) {\n m := &PrinterCreateOperation{\n PrintOperation: *NewPrintOperation(),\n }\n odataTypeValue := \"#microsoft.graph.printerCreateOperation\";\n m.SetOdataType(&odataTypeValue);\n return m\n}", "func newReportErr(metricsPublisher *metrics.Publisher) *reportErr {\n\treturn &reportErr{\n\t\tmetricsPublisher: metricsPublisher,\n\t}\n}", "func newReconciler(mgr manager.Manager) reconcile.Reconciler {\n\treturn &ReconcilePodHealth{client: mgr.GetClient(), scheme: mgr.GetScheme()}\n}", "func (a *API) CreateOutlierReport(cfg *OutlierReport) (*OutlierReport, error) {\n\tif cfg == nil {\n\t\treturn nil, fmt.Errorf(\"Invalid outlier report config [nil]\")\n\t}\n\n\tjsonCfg, err := json.Marshal(cfg)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif a.Debug {\n\t\ta.Log.Printf(\"[DEBUG] create outlier report, sending JSON: %s\", string(jsonCfg))\n\t}\n\n\tresult, err := a.Post(config.OutlierReportPrefix, jsonCfg)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treport := &OutlierReport{}\n\tif err := json.Unmarshal(result, report); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn report, nil\n}", "func newReconciler(mgr manager.Manager) reconcile.Reconciler {\n\treturn &ReconcileDeploymentConfig{Client: mgr.GetClient(), scheme: mgr.GetScheme()}\n}", "func newResource() *resource.Resource {\n\tr, _ := resource.Merge(\n\t\tresource.Default(),\n\t\tresource.NewWithAttributes(\n\t\t\tsemconv.SchemaURL,\n\t\t\tsemconv.ServiceNameKey.String(\"opentelemetry-server\"),\n\t\t\tsemconv.ServiceVersionKey.String(\"v0.1.0\"),\n\t\t\tattribute.String(\"environment\", \"demo\"),\n\t\t),\n\t)\n\treturn r\n}", "func newPrometheusSpec(name, addr string) cap.SupervisorSpec {\n\treturn cap.NewSupervisorSpec(\n\t\tname,\n\t\t// this function builds an HTTP Server, this functionality requires more\n\t\t// than a goroutine given the only way to stop a http server is to call the\n\t\t// http.Shutdown function on a seperate goroutine\n\t\tfunc() ([]cap.Node, cap.CleanupResourcesFn, error) {\n\t\t\tserver := buildPrometheusHTTPServer(addr)\n\n\t\t\t// CAUTION: The order here matters, we need waitUntilDone to start last so\n\t\t\t// that it can terminate first, if this is not the case the\n\t\t\t// listenAndServeHTTPWorker child will never terminate.\n\t\t\t//\n\t\t\t// DISCLAIMER: The caution above _is not_ a capataz requirement, but a\n\t\t\t// requirement of net/https' API\n\t\t\tnodes := []cap.Node{\n\t\t\t\tlistenAndServeHTTPWorker(server),\n\t\t\t\twaitUntilDoneHTTPWorker(server),\n\t\t\t}\n\n\t\t\tcleanupServer := func() error {\n\t\t\t\treturn server.Close()\n\t\t\t}\n\n\t\t\treturn nodes, cleanupServer, nil\n\t\t},\n\t)\n}", "func New(stack consensus.Stack) consensus.Consenter {\n\t\n\tconfigLock.Lock()\n\tif config == nil{\n\t\tconfig = loadConfig()\n\t}\n\tdefer configLock.Unlock()\n\t\n\thandle, _, _ := stack.GetNetworkHandles()\n\tid, _ := getValidatorID(handle)\n\n\tswitch strings.ToLower(config.GetString(\"general.mode\")) {\n\tcase \"batch\":\n\t\treturn newObcBatch(id, config, stack)\n\tdefault:\n\t\tpanic(fmt.Errorf(\"Invalid PBFT mode: %s\", config.GetString(\"general.mode\")))\n\t}\n}", "func newReconciler(mgr manager.Manager) reconcile.Reconciler {\n\treturn &ReconcilerPolkadot{client: mgr.GetClient(), scheme: mgr.GetScheme()}\n}", "func AlertingSpec() *AlertingSpecApplyConfiguration {\n\treturn &AlertingSpecApplyConfiguration{}\n}", "func newOtlpReceiver(cfg *Config, logger *zap.Logger) *otlpReceiver {\n\tr := &otlpReceiver{\n\t\tcfg: cfg,\n\t\tlogger: logger,\n\t}\n\tif cfg.HTTP != nil {\n\t\tr.httpMux = mux.NewRouter()\n\t}\n\n\treturn r\n}", "func NewAUDIT(config configuration.CONFIGURATION) *AUDIT_IMPL {\r\n client := new(AUDIT_IMPL)\r\n client.config = config\r\n return client\r\n}", "func newReconciler(mgr manager.Manager) reconcile.Reconciler {\n\teventBroadcaster := record.NewBroadcaster()\n\teventBroadcaster.StartLogging(klog.Infof)\n\tclient, err :=clientset.NewForConfig(mgr.GetConfig())\n\tif err != nil {\n\t\tklog.Errorln(err)\n\t}\n\tkclient, err := kubeclient.NewForConfig(mgr.GetConfig())\n\tif err != nil {\n\t\tklog.Errorln(err)\n\t}\n\tdc := &ReconcileDeployment{\n\t\tclient: mgr.GetClient(),\n\t\tscheme: mgr.GetScheme(),\n\t\tKetiClient: client,\n\t\tKubeClient: kclient,\n\t\tqueue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), \"deployment\"),\n\t}\n\tdc.rsControl = util.RealRSControl{\n\t\tKubeClient: kclient,\n\t}\n\tdc.syncHandler = dc.Reconcile\n\tdc.enqueueDeployment = dc.enqueue\n\n\treturn dc\n}", "func NewPrometheusRuleSpec() *monitoringv1.PrometheusRuleSpec {\n\trunbookCreator := newRunbookCreator()\n\n\tspec := &monitoringv1.PrometheusRuleSpec{\n\t\tGroups: []monitoringv1.RuleGroup{{\n\t\t\tName: alertRuleGroup,\n\t\t\tRules: []monitoringv1.Rule{\n\t\t\t\tcreateOutOfBandUpdateAlertRule(),\n\t\t\t\tcreateUnsafeModificationAlertRule(),\n\t\t\t\tcreateInstallationNotCompletedAlertRule(),\n\t\t\t\tcreateRequestCPUCoresRule(),\n\t\t\t\tcreateOperatorHealthStatusRule(),\n\t\t\t\tcreateSingleStackIPv6AlertRule(),\n\t\t\t},\n\t\t}},\n\t}\n\n\tfor _, rule := range spec.Groups[0].Rules {\n\t\tif rule.Alert != \"\" {\n\t\t\trule.Annotations[\"runbook_url\"] = runbookCreator.getURL(rule.Alert)\n\t\t\trule.Labels[partOfAlertLabelKey] = partOfAlertLabelValue\n\t\t\trule.Labels[componentAlertLabelKey] = componentAlertLabelValue\n\t\t}\n\t}\n\n\treturn spec\n}", "func NewFromSpec(spec *rspec.Spec) Generator {\n\treturn Generator{\n\t\tspec: spec,\n\t}\n}", "func (s *SmartContract) initLedger(APIstub shim.ChaincodeStubInterface) sc.Response {\n\temployees := []Employee{\n\t\tEmployee{EmployeeID: \"EMP1001\", FirstName: \"Vineet\", LastName: \"Timble\", DateOfJoining: \"01/01/2000\", OfficeID: \"3\"},\n\t\tEmployee{EmployeeID: \"EMP1002\", FirstName: \"Amit\", LastName: \"Saxena\", DateOfJoining: \"01/01/2006\", OfficeID: \"2\"},\n\t\tEmployee{EmployeeID: \"EMP1003\", FirstName: \"Ashutosh\", LastName: \"Phoujdar\", DateOfJoining: \"01/01/2014\", OfficeID: \"2\"},\n\t\tEmployee{EmployeeID: \"EMP1004\", FirstName: \"Niraj\", LastName: \"Pandey\", DateOfJoining: \"01/01/2012\", OfficeID: \"1\"},\n\t\tEmployee{EmployeeID: \"EMP1005\", FirstName: \"Dinesh\", LastName: \"Juturu\", DateOfJoining: \"01/01/2015\", OfficeID: \"2\"},\n\t\tEmployee{EmployeeID: \"EMP1006\", FirstName: \"Rajesh\", LastName: \"Annaji\", DateOfJoining: \"01/01/2012\", OfficeID: \"2\"},\n\t}\n\ti := 0\n\tfor i < len(employees) {\n\t\tfmt.Println(\"i is \", i)\n\t\tempAsBytes, _ := json.Marshal(employees[i])\n\t\tAPIstub.PutState(\"EMPLOYEE\"+strconv.Itoa(i), empAsBytes)\n\t\tfmt.Println(\"Added\", employees[i])\n\t\ti = i + 1\n\t}\n\n\toffices := []Office{\n\t\tOffice{OfficeID: \"OFF1\", BuildingName: \"Nirlon Compound\", StreetName: \"Off Western Express Highway\", City: \"Mumbai\", State: \"Maharashtra\", Country: \"India\"},\n\t\tOffice{OfficeID: \"OFF2\", BuildingName: \"Global Axis\", StreetName: \"Road No 9\", City: \"Bangalore\", State: \"Karnataka\", Country: \"India\"},\n\t\tOffice{OfficeID: \"OFF3\", BuildingName: \"Ambrosia\", StreetName: \"Bavdhan Khurd\", City: \"Pune\", State: \"Maharashtra\", Country: \"India\"},\n\t}\n\n\tj := 0\n\tfor j < len(offices) {\n\t\tfmt.Println(\"j is \", j)\n\t\tofficeAsBytes, _ := json.Marshal(offices[j])\n\t\tAPIstub.PutState(\"OFFICE\"+strconv.Itoa(j), officeAsBytes)\n\t\tfmt.Println(\"Added\", offices[j])\n\t\tj = j + 1\n\t}\n\treturn shim.Success(nil)\n}", "func NewOceanLaunchSpec(ctx *pulumi.Context,\n\tname string, args *OceanLaunchSpecArgs, opts ...pulumi.ResourceOption) (*OceanLaunchSpec, error) {\n\tif args == nil {\n\t\treturn nil, errors.New(\"missing one or more required arguments\")\n\t}\n\n\tif args.OceanId == nil {\n\t\treturn nil, errors.New(\"invalid value for required argument 'OceanId'\")\n\t}\n\topts = internal.PkgResourceDefaultOpts(opts)\n\tvar resource OceanLaunchSpec\n\terr := ctx.RegisterResource(\"spotinst:gke/oceanLaunchSpec:OceanLaunchSpec\", name, args, &resource, opts...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &resource, nil\n}", "func NewUpdateSpec(s *api.JobUpdateSettings) *stateless.UpdateSpec {\n\treturn &stateless.UpdateSpec{\n\t\tBatchSize: uint32(s.GetUpdateGroupSize()),\n\t\tRollbackOnFailure: s.GetRollbackOnFailure(),\n\t\tMaxInstanceRetries: uint32(s.GetMaxPerInstanceFailures()),\n\t\tMaxTolerableInstanceFailures: uint32(s.GetMaxFailedInstances()),\n\n\t\t// Peloton does not support pulsed updates, so if block if no pulse is\n\t\t// set, then we start the update in a paused state such that it must\n\t\t// be manually continued.\n\t\tStartPaused: s.GetBlockIfNoPulsesAfterMs() > 0,\n\t}\n}", "func NewCreateSpec(s *api.JobUpdateSettings) *stateless.CreateSpec {\n\tu := NewUpdateSpec(s, false)\n\treturn &stateless.CreateSpec{\n\t\tBatchSize: u.BatchSize,\n\t\tMaxInstanceRetries: u.MaxInstanceRetries,\n\t\tMaxTolerableInstanceFailures: u.MaxTolerableInstanceFailures,\n\t\tStartPaused: u.StartPaused,\n\t}\n}", "func NewWorkbookOperation()(*WorkbookOperation) {\n m := &WorkbookOperation{\n Entity: *NewEntity(),\n }\n return m\n}", "func newReconciler(mgr manager.Manager) (reconcile.Reconciler, error) {\n\tsv, err := version.Server()\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"get version: %v\", err)\n\t}\n\n\treturn &ReconcilePerconaXtraDBClusterBackup{\n\t\tclient: mgr.GetClient(),\n\t\tscheme: mgr.GetScheme(),\n\t\tserverVersion: sv,\n\t}, nil\n}", "func New() *Prober {\n\treturn newForTest(time.Now, newRealTicker)\n}", "func newReconciler(mgr manager.Manager) reconcile.Reconciler {\n\treturn &ReconcileInfluxdb{client: mgr.GetClient(), scheme: mgr.GetScheme()}\n}", "func newWallet(cfg *BTCCloneCFG, node rpcClient) *ExchangeWallet {\n\treturn &ExchangeWallet{\n\t\tnode: node,\n\t\twallet: newWalletClient(node, cfg.ChainParams),\n\t\tsymbol: cfg.Symbol,\n\t\tchainParams: cfg.ChainParams,\n\t\tlog: cfg.Logger,\n\t\ttradeChange: make(map[string]time.Time),\n\t\ttipChange: cfg.WalletCFG.TipChange,\n\t\tfundingCoins: make(map[string]*compositeUTXO),\n\t\tminNetworkVersion: cfg.MinNetworkVersion,\n\t\tfallbackFeeRate: cfg.WalletCFG.FallbackFeeRate,\n\t\twalletInfo: cfg.WalletInfo,\n\t}\n}", "func (suite *TestManagerSuite) TestManagerCreateWithExisting() {\n\terr := suite.m.UpdateStatus(\"tid001\", job.SuccessStatus.String(), 2000)\n\trequire.NoError(suite.T(), err)\n\n\trp := &scan.Report{\n\t\tDigest: \"d1000\",\n\t\tRegistrationUUID: \"ruuid\",\n\t\tMimeType: v1.MimeTypeNativeReport,\n\t\tTrackID: \"tid002\",\n\t}\n\n\tuuid, err := suite.m.Create(rp)\n\trequire.NoError(suite.T(), err)\n\trequire.NotEmpty(suite.T(), uuid)\n\n\tassert.NotEqual(suite.T(), suite.rpUUID, uuid)\n\tsuite.rpUUID = uuid\n}", "func (t *ManagePatient) Init(stub shim.ChaincodeStubInterface, function string, args []string) ([]byte, error) {\n var msg string\n var err error\n if len(args) != 1 {\n return nil, errors.New(\"Incorrect number of arguments. Expecting 1\")\n }\n // Initialize the chaincode\n msg = args[0]\n fmt.Println(\"ManagePatient chaincode is deployed successfully.\");\n \n // Write the state to the ledger\n err = stub.PutState(\"abc\", []byte(msg)) //making a test var \"abc\", I find it handy to read/write to it right away to test the network\n if err != nil {\n return nil, err\n }\n \n var empty []string\n jsonAsBytes, _ := json.Marshal(empty) //marshal an emtpy array of strings to clear the index\n err = stub.PutState(PatientIndexStr, jsonAsBytes)\n if err != nil {\n return nil, err\n }\n err = stub.PutState(EVENT_COUNTER, []byte(\"1\"))\n if err != nil {\n return nil, err\n }\n return nil, nil\n}", "func newAlertRuleReconciler(namespace string, owner metav1.OwnerReference) *AlertRuleReconciler {\n\treturn &AlertRuleReconciler{\n\t\ttheRule: newPrometheusRule(namespace, owner),\n\t}\n}", "func newReconciler(mgr manager.Manager) (reconcile.Reconciler, *reconcileGitTrackOpts) {\n\t// Create a restMapper (used by informer to look up resource kinds)\n\trestMapper, err := utils.NewRestMapper(mgr.GetConfig())\n\tif err != nil {\n\t\tpanic(fmt.Errorf(\"unable to create rest mapper: %v\", err))\n\t}\n\n\tgvrs, err := farosflags.ParseIgnoredResources()\n\tif err != nil {\n\t\tpanic(fmt.Errorf(\"unable to parse ignored resources: %v\", err))\n\t}\n\n\tapplier, err := farosclient.NewApplier(mgr.GetConfig(), farosclient.Options{})\n\tif err != nil {\n\t\tpanic(fmt.Errorf(\"unable to create applier: %v\", err))\n\t}\n\n\trec := &ReconcileGitTrack{\n\t\tClient: mgr.GetClient(),\n\t\tscheme: mgr.GetScheme(),\n\t\tstore: gitstore.NewRepoStore(farosflags.RepositoryDir),\n\t\trestMapper: restMapper,\n\t\trecorder: mgr.GetEventRecorderFor(\"gittrack-controller\"),\n\t\tignoredGVRs: gvrs,\n\t\tlastUpdateTimes: make(map[string]time.Time),\n\t\tmutex: &sync.RWMutex{},\n\t\tapplier: applier,\n\t\tlog: rlogr.Log.WithName(\"gittrack-controller\"),\n\t\tgitTrackMode: farosflags.GitTrack,\n\t\tnamespace: farosflags.Namespace,\n\t\tclusterGitTrackMode: farosflags.ClusterGitTrack,\n\t}\n\topts := &reconcileGitTrackOpts{\n\t\tgitTrackMode: farosflags.GitTrack,\n\t\tclusterGitTrackMode: farosflags.ClusterGitTrack,\n\t}\n\treturn rec, opts\n}", "func MakeSpec(\n\tconn, unique string,\n\tneedsUpdate func(db.Specifier, db.Specifier) bool,\n\tnewDBSpec db.Specifier,\n\tnewDBFunc DBMaker,\n\tnewDBError error,\n\tupdateFunc Updater,\n\tupdateErr error,\n) db.Specifier {\n\treturn &Spec{\n\t\tConn: conn,\n\t\tUnique: unique,\n\t\tUpdateNeeded: needsUpdate,\n\t\tNewDBSpec: newDBSpec,\n\t\tNewDBFunc: newDBFunc,\n\t\tNewDBError: newDBError,\n\t\tUpdateFunc: updateFunc,\n\t\tUpdateErr: updateErr,\n\t}\n}", "func newReconciler(mgr manager.Manager) reconcile.Reconciler {\n\treturn &ReconcileClusterDeprovisionRequest{Client: mgr.GetClient(), scheme: mgr.GetScheme()}\n}", "func newReconciler(mgr manager.Manager) reconcile.Reconciler {\n\treturn &ReconcileHiveConfig{Client: mgr.GetClient(), scheme: mgr.GetScheme(), restConfig: mgr.GetConfig()}\n}", "func (dc *DriverCall) AppendSpec(options interface{}) error {\n\tjsonBytes, err := json.Marshal(options)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Failed to marshal spec, error: %s\", err.Error())\n\t}\n\n\tdc.Append(string(jsonBytes))\n\treturn nil\n}", "func newReport() *cobra.Command {\n\tvar cluster []string\n\tvar dbName string\n\tvar fileName string\n\tvar headers, lines bool\n\n\tcmd := &cobra.Command{\n\t\tUse: \"query\",\n\t\tShort: \"Execute the queries in in the given file.\",\n\t\tRunE: func(cmd *cobra.Command, args []string) error {\n\t\t\tctx := context.Background()\n\t\t\tdbReport(ctx, &globalKeys, fileName, dbName, headers, lines, cluster)\n\t\t\treturn nil\n\t\t},\n\t}\n\n\tflags := cmd.Flags()\n\tflags.StringSliceVarP(&cluster, \"cluster\", \"c\", clusterList(), \"addresses of existing cluster nodes\")\n\tflags.StringVarP(&dbName, \"database\", \"d\", envy.StringDefault(\"DQLITED_DB\", defaultDatabase), \"name of database to use\")\n\tflags.StringVarP(&fileName, \"file\", \"f\", \"\", \"name of file to load\")\n\tflags.BoolVarP(&headers, \"headers\", \"b\", true, \"show table headings\")\n\tflags.BoolVarP(&lines, \"lines\", \"v\", false, \"print lines between columns\")\n\n\treturn cmd\n}", "func (pool *Workspaces_BigDataPool_Spec) ConvertSpecTo(destination genruntime.ConvertibleSpec) error {\n\tif destination == pool {\n\t\treturn errors.New(\"attempted conversion between unrelated implementations of github.com/Azure/azure-service-operator/v2/pkg/genruntime/ConvertibleSpec\")\n\t}\n\n\treturn destination.ConvertSpecFrom(pool)\n}", "func newReconciler(mgr manager.Manager, certs *certs.Certs) reconcile.Reconciler {\n\treturn &ReconcileOutgoingPortal{client: mgr.GetClient(), scheme: mgr.GetScheme(), certs: certs}\n}", "func newReconciler(mgr manager.Manager, certs *certs.Certs) reconcile.Reconciler {\n\treturn &ReconcileOutgoingPortal{client: mgr.GetClient(), scheme: mgr.GetScheme(), certs: certs}\n}", "func PrometheusSpec() *PrometheusSpecApplyConfiguration {\n\treturn &PrometheusSpecApplyConfiguration{}\n}", "func New(opts ...OptionFunc) (*WorkOracle, error) {\n\toptions := &option{\n\t\tmaxRetries: 10,\n\t\tretryDelay: time.Second,\n\t\tlogger: zap.NewNop(),\n\t}\n\n\tfor _, opt := range opts {\n\t\tif err := opt(options); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\tif err := options.validate(); err != nil {\n\t\treturn nil, err\n\t}\n\n\tscrypt := options.scrypter\n\tif scrypt == nil {\n\t\tscrypt = &LazyScrypter{init: func() (postrs.Scrypter, error) {\n\t\t\tif options.providerID == nil {\n\t\t\t\treturn nil, errors.New(\"no provider specified\")\n\t\t\t}\n\n\t\t\treturn postrs.NewScrypt(\n\t\t\t\tpostrs.WithProviderID(*options.providerID),\n\t\t\t\tpostrs.WithCommitment(options.commitment),\n\t\t\t\tpostrs.WithScryptN(options.n),\n\t\t\t\tpostrs.WithVRFDifficulty(options.vrfDifficulty),\n\t\t\t\tpostrs.WithLogger(options.logger),\n\t\t\t)\n\t\t}}\n\t}\n\n\treturn &WorkOracle{\n\t\toptions: options,\n\t\tscrypt: scrypt,\n\t}, nil\n}", "func newReconciler(mgr manager.Manager) reconcile.Reconciler {\n\treturn &ReconcileJenkinsInstance{\n\t\tClient: mgr.GetClient(),\n\t\tEventRecorder: mgr.GetRecorder(\"JenkinsInstanceController\"),\n\t\tscheme: mgr.GetScheme(),\n\t}\n}", "func (sm *ServerMetrics) toLoadReportProto() *v3orcapb.OrcaLoadReport {\n\tret := &v3orcapb.OrcaLoadReport{\n\t\tUtilization: sm.Utilization,\n\t\tRequestCost: sm.RequestCost,\n\t\tNamedMetrics: sm.NamedMetrics,\n\t}\n\tif sm.CPUUtilization != -1 {\n\t\tret.CpuUtilization = sm.CPUUtilization\n\t}\n\tif sm.MemUtilization != -1 {\n\t\tret.MemUtilization = sm.MemUtilization\n\t}\n\tif sm.AppUtilization != -1 {\n\t\tret.ApplicationUtilization = sm.AppUtilization\n\t}\n\tif sm.QPS != -1 {\n\t\tret.RpsFractional = sm.QPS\n\t}\n\tif sm.EPS != -1 {\n\t\tret.Eps = sm.EPS\n\t}\n\treturn ret\n}", "func (info *joinPlanningInfo) makeCoreSpec() execinfrapb.ProcessorCoreUnion {\n\tvar core execinfrapb.ProcessorCoreUnion\n\tif len(info.leftMergeOrd.Columns) != len(info.rightMergeOrd.Columns) {\n\t\tpanic(errors.AssertionFailedf(\n\t\t\t\"unexpectedly different merge join ordering lengths: left %d, right %d\",\n\t\t\tlen(info.leftMergeOrd.Columns), len(info.rightMergeOrd.Columns),\n\t\t))\n\t}\n\tif len(info.leftMergeOrd.Columns) == 0 {\n\t\t// There is no required ordering on the columns, so we plan a hash join.\n\t\tcore.HashJoiner = &execinfrapb.HashJoinerSpec{\n\t\t\tLeftEqColumns: info.leftEqCols,\n\t\t\tRightEqColumns: info.rightEqCols,\n\t\t\tOnExpr: info.onExpr,\n\t\t\tType: info.joinType,\n\t\t\tLeftEqColumnsAreKey: info.leftEqColsAreKey,\n\t\t\tRightEqColumnsAreKey: info.rightEqColsAreKey,\n\t\t}\n\t} else {\n\t\tcore.MergeJoiner = &execinfrapb.MergeJoinerSpec{\n\t\t\tLeftOrdering: info.leftMergeOrd,\n\t\t\tRightOrdering: info.rightMergeOrd,\n\t\t\tOnExpr: info.onExpr,\n\t\t\tType: info.joinType,\n\t\t\tLeftEqColumnsAreKey: info.leftEqColsAreKey,\n\t\t\tRightEqColumnsAreKey: info.rightEqColsAreKey,\n\t\t}\n\t}\n\treturn core\n}", "func (r *Reconciler) createHighPlan(ctx context.Context, apiObject k8sutil.APIObject,\n\tcurrentPlan api.Plan, spec api.DeploymentSpec,\n\tstatus api.DeploymentStatus,\n\tbuilderCtx PlanBuilderContext) (api.Plan, api.BackOff, bool) {\n\tif !currentPlan.IsEmpty() {\n\t\t// Plan already exists, complete that first\n\t\treturn currentPlan, nil, false\n\t}\n\n\tq := recoverPlanAppender(r.log, newPlanAppender(NewWithPlanBuilder(ctx, apiObject, spec, status, builderCtx), status.BackOff, currentPlan).\n\t\tApplyIfEmpty(r.updateMemberPodTemplateSpec).\n\t\tApplyIfEmpty(r.updateMemberPhasePlan).\n\t\tApplyIfEmpty(r.createCleanOutPlan).\n\t\tApplyIfEmpty(r.syncMemberStatus).\n\t\tApplyIfEmpty(r.createSyncPlan).\n\t\tApplyIfEmpty(r.updateMemberUpdateConditionsPlan).\n\t\tApplyIfEmpty(r.updateMemberRotationConditionsPlan).\n\t\tApplyIfEmpty(r.createMemberRecreationConditionsPlan).\n\t\tApplyIfEmpty(r.createRotateServerStoragePVCPendingResizeConditionPlan).\n\t\tApplyIfEmpty(r.createChangeMemberArchPlan).\n\t\tApplyIfEmpty(r.createRotateServerStorageResizePlanRuntime).\n\t\tApplyIfEmpty(r.createTopologyMemberUpdatePlan).\n\t\tApplyIfEmptyWithBackOff(LicenseCheck, 30*time.Second, r.updateClusterLicense).\n\t\tApplyIfEmpty(r.createTopologyMemberConditionPlan).\n\t\tApplyIfEmpty(r.updateMemberConditionTypeMemberVolumeUnschedulableCondition).\n\t\tApplyIfEmpty(r.createRebalancerCheckPlanCore).\n\t\tApplyIfEmpty(r.createMemberFailedRestoreHighPlan).\n\t\tApplyIfEmpty(r.scaleDownCandidate).\n\t\tApplyIfEmpty(r.volumeMemberReplacement).\n\t\tApplyWithBackOff(BackOffCheck, time.Minute, r.emptyPlanBuilder)).\n\t\tApplyIfEmptyWithBackOff(TimezoneCheck, time.Minute, r.createTimezoneUpdatePlan).\n\t\tApply(r.createBackupInProgressConditionPlan). // Discover backups always\n\t\tApply(r.createMaintenanceConditionPlan). // Discover maintenance always\n\t\tApply(r.cleanupConditions) // Cleanup Conditions\n\n\treturn q.Plan(), q.BackOff(), true\n}" ]
[ "0.76700133", "0.6786117", "0.5106095", "0.505037", "0.49529737", "0.4917524", "0.48303846", "0.48108968", "0.48050052", "0.4734925", "0.47245955", "0.46765545", "0.46355996", "0.4632689", "0.46127805", "0.46126866", "0.45519337", "0.45240414", "0.451157", "0.44827083", "0.44729096", "0.4462632", "0.4458878", "0.4451075", "0.44407713", "0.44176495", "0.43490982", "0.43421218", "0.43370733", "0.4319516", "0.4313903", "0.43094665", "0.4305938", "0.43057343", "0.42996818", "0.42943", "0.4293905", "0.4282629", "0.42808223", "0.42778072", "0.42758515", "0.4275364", "0.42726547", "0.4270535", "0.42639685", "0.42614934", "0.42595357", "0.42583722", "0.42489123", "0.42476785", "0.42463863", "0.42459846", "0.42419413", "0.42358768", "0.42349142", "0.42343011", "0.4232223", "0.42234617", "0.42225343", "0.422092", "0.4219085", "0.42189175", "0.4215891", "0.42142224", "0.4210459", "0.42094272", "0.42076492", "0.42014235", "0.41838047", "0.4168215", "0.41636667", "0.4163528", "0.4161774", "0.41617063", "0.41614875", "0.41596526", "0.41521102", "0.41514745", "0.4150283", "0.41484505", "0.41460007", "0.41377446", "0.41339236", "0.41266146", "0.41254944", "0.4122092", "0.41199028", "0.41198793", "0.41182652", "0.41161385", "0.41130492", "0.41129482", "0.41106576", "0.41106576", "0.4106567", "0.40995297", "0.40960097", "0.40948302", "0.4088674", "0.40800774" ]
0.78592646
0
NewOffChainReporting2Spec initializes a new OffChainReportingSpec from a job.OCR2OracleSpec
func NewOffChainReporting2Spec(spec *job.OCR2OracleSpec) *OffChainReporting2Spec { return &OffChainReporting2Spec{ ContractID: spec.ContractID, Relay: spec.Relay, RelayConfig: spec.RelayConfig, P2PV2Bootstrappers: spec.P2PV2Bootstrappers, OCRKeyBundleID: spec.OCRKeyBundleID, TransmitterID: spec.TransmitterID, BlockchainTimeout: spec.BlockchainTimeout, ContractConfigTrackerPollInterval: spec.ContractConfigTrackerPollInterval, ContractConfigConfirmations: spec.ContractConfigConfirmations, CreatedAt: spec.CreatedAt, UpdatedAt: spec.UpdatedAt, CollectTelemetry: spec.CaptureEATelemetry, } }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func NewOffChainReportingSpec(spec *job.OCROracleSpec) *OffChainReportingSpec {\n\treturn &OffChainReportingSpec{\n\t\tContractAddress: spec.ContractAddress,\n\t\tP2PBootstrapPeers: spec.P2PBootstrapPeers,\n\t\tP2PV2Bootstrappers: spec.P2PV2Bootstrappers,\n\t\tIsBootstrapPeer: spec.IsBootstrapPeer,\n\t\tEncryptedOCRKeyBundleID: spec.EncryptedOCRKeyBundleID,\n\t\tTransmitterAddress: spec.TransmitterAddress,\n\t\tObservationTimeout: spec.ObservationTimeout,\n\t\tObservationTimeoutEnv: spec.ObservationTimeoutEnv,\n\t\tBlockchainTimeout: spec.BlockchainTimeout,\n\t\tBlockchainTimeoutEnv: spec.BlockchainTimeoutEnv,\n\t\tContractConfigTrackerSubscribeInterval: spec.ContractConfigTrackerSubscribeInterval,\n\t\tContractConfigTrackerSubscribeIntervalEnv: spec.ContractConfigTrackerSubscribeIntervalEnv,\n\t\tContractConfigTrackerPollInterval: spec.ContractConfigTrackerPollInterval,\n\t\tContractConfigTrackerPollIntervalEnv: spec.ContractConfigTrackerPollIntervalEnv,\n\t\tContractConfigConfirmations: spec.ContractConfigConfirmations,\n\t\tContractConfigConfirmationsEnv: spec.ContractConfigConfirmationsEnv,\n\t\tCreatedAt: spec.CreatedAt,\n\t\tUpdatedAt: spec.UpdatedAt,\n\t\tEVMChainID: spec.EVMChainID,\n\t\tDatabaseTimeout: spec.DatabaseTimeout,\n\t\tDatabaseTimeoutEnv: spec.DatabaseTimeoutEnv,\n\t\tObservationGracePeriod: spec.ObservationGracePeriod,\n\t\tObservationGracePeriodEnv: spec.ObservationGracePeriodEnv,\n\t\tContractTransmitterTransmitTimeout: spec.ContractTransmitterTransmitTimeout,\n\t\tContractTransmitterTransmitTimeoutEnv: spec.ContractTransmitterTransmitTimeoutEnv,\n\t\tCollectTelemetry: spec.CaptureEATelemetry,\n\t}\n}", "func NewOffChainReportingSpec(spec *job.OffchainReportingOracleSpec) *OffChainReportingSpec {\n\treturn &OffChainReportingSpec{\n\t\tContractAddress: spec.ContractAddress,\n\t\tP2PPeerID: spec.P2PPeerID,\n\t\tP2PBootstrapPeers: spec.P2PBootstrapPeers,\n\t\tIsBootstrapPeer: spec.IsBootstrapPeer,\n\t\tEncryptedOCRKeyBundleID: spec.EncryptedOCRKeyBundleID,\n\t\tTransmitterAddress: spec.TransmitterAddress,\n\t\tObservationTimeout: spec.ObservationTimeout,\n\t\tBlockchainTimeout: spec.BlockchainTimeout,\n\t\tContractConfigTrackerSubscribeInterval: spec.ContractConfigTrackerSubscribeInterval,\n\t\tContractConfigTrackerPollInterval: spec.ContractConfigTrackerPollInterval,\n\t\tContractConfigConfirmations: spec.ContractConfigConfirmations,\n\t\tCreatedAt: spec.CreatedAt,\n\t\tUpdatedAt: spec.UpdatedAt,\n\t}\n}", "func newE2ESetup(msg *e2e.SetupReq) *colibri_mgmt.E2ESetup {\n\tallocTrail := make([]uint8, len(msg.AllocationTrail))\n\tfor i := range msg.AllocationTrail {\n\t\tallocTrail[i] = uint8(msg.AllocationTrail[i])\n\t}\n\treturn &colibri_mgmt.E2ESetup{\n\t\tBase: newE2EBase(&msg.Request),\n\t\tSegmentRsvs: newSegmentIDs(msg.SegmentRsvs),\n\t\tSegmentRsvASCount: msg.SegmentRsvASCount,\n\t\tRequestedBW: uint8(msg.RequestedBW),\n\t\tAllocationTrail: allocTrail,\n\t}\n}", "func NewSpec(details *SpecDetails) *Spec {\n\treturn &Spec{\n\t\tDetails: details,\n\t\tServices: NewServiceList(),\n\t\tStatus: SpecWaiting,\n\t}\n}", "func newOtlpReceiver(cfg *Config, set receiver.CreateSettings) (*otlpReceiver, error) {\n\tr := &otlpReceiver{\n\t\tcfg: cfg,\n\t\tsettings: set,\n\t}\n\tif cfg.HTTP != nil {\n\t\tr.httpMux = http.NewServeMux()\n\t}\n\n\tvar err error\n\tr.obsrepGRPC, err = obsreport.NewReceiver(obsreport.ReceiverSettings{\n\t\tReceiverID: set.ID,\n\t\tTransport: \"grpc\",\n\t\tReceiverCreateSettings: set,\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tr.obsrepHTTP, err = obsreport.NewReceiver(obsreport.ReceiverSettings{\n\t\tReceiverID: set.ID,\n\t\tTransport: \"http\",\n\t\tReceiverCreateSettings: set,\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn r, nil\n}", "func NewSpec(api *gmail.Service, db *db.DB) (*Spec, error) {\n\tlog.SetLevel(log.DebugLevel)\n\tlog.Info(\"starting new spec\")\n\n\tbytes, err := ioutil.ReadFile(\"./spec.yaml\")\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to read file: %v\", err)\n\t}\n\n\tspec := &Spec{\n\t\tapi: api,\n\t\tdb: db,\n\t}\n\n\terr = yaml.Unmarshal(bytes, spec)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to unmarshal: %v\", err)\n\t}\n\n\treturn spec, nil\n}", "func newReceiver(\n\tparams receiver.CreateSettings,\n\tconfig *Config,\n\tnextConsumer consumer.Traces,\n) (receiver.Traces, error) {\n\t// build the response message\n\tdefaultResponse := &splunksapm.PostSpansResponse{}\n\tdefaultResponseBytes, err := defaultResponse.Marshal()\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to marshal default response body for %v receiver: %w\", params.ID, err)\n\t}\n\ttransport := \"http\"\n\tif config.TLSSetting != nil {\n\t\ttransport = \"https\"\n\t}\n\tobsrecv, err := obsreport.NewReceiver(obsreport.ReceiverSettings{\n\t\tReceiverID: params.ID,\n\t\tTransport: transport,\n\t\tReceiverCreateSettings: params,\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &sapmReceiver{\n\t\tsettings: params.TelemetrySettings,\n\t\tconfig: config,\n\t\tnextConsumer: nextConsumer,\n\t\tdefaultResponse: defaultResponseBytes,\n\t\tobsrecv: obsrecv,\n\t}, nil\n}", "func newReconciler(mgr manager.Manager) reconcile.Reconciler {\n\tapiConfig := swagger.NewConfiguration()\n\t//TODO Make it configurable\n\tapiConfig.BasePath = \"http://localhost:5000\"\n\n\treturn &ReconcileNotebookJob{\n\t\tClient: mgr.GetClient(),\n\t\tscheme: mgr.GetScheme(),\n\t\trecorder: mgr.GetRecorder(\"notebookjob-controller\"),\n\t\tapiClient: swagger.NewAPIClient(apiConfig),\n\t}\n}", "func NewCoherenceInternalSpec(cluster *CoherenceCluster, role *CoherenceRole) *CoherenceInternalSpec {\n\tout := CoherenceInternalSpec{}\n\n\tout.FullnameOverride = role.Name\n\tout.Cluster = cluster.Name\n\tout.ServiceAccountName = cluster.Spec.ServiceAccountName\n\tout.AutomountServiceAccountToken = cluster.Spec.AutomountServiceAccountToken\n\tout.ImagePullSecrets = cluster.Spec.ImagePullSecrets\n\tout.WKA = cluster.GetWkaServiceName()\n\tout.OperatorRequestTimeout = cluster.Spec.OperatorRequestTimeout\n\n\tout.CoherenceRoleSpec = CoherenceRoleSpec{}\n\trole.Spec.DeepCopyInto(&out.CoherenceRoleSpec)\n\n\treturn &out\n}", "func newBatchSpecExecutionResetter(s *store.Store, observationContext *observation.Context, metrics batchChangesMetrics) *dbworker.Resetter {\n\tworkerStore := NewExecutorStore(s, observationContext)\n\n\toptions := dbworker.ResetterOptions{\n\t\tName: \"batch_spec_executor_resetter\",\n\t\tInterval: 1 * time.Minute,\n\t\tMetrics: metrics.executionResetterMetrics,\n\t}\n\n\tresetter := dbworker.NewResetter(workerStore, options)\n\treturn resetter\n}", "func New() Generator {\n\tspec := rspec.Spec{\n\t\tVersion: rspec.Version,\n\t\tPlatform: rspec.Platform{\n\t\t\tOS: runtime.GOOS,\n\t\t\tArch: runtime.GOARCH,\n\t\t},\n\t\tRoot: rspec.Root{\n\t\t\tPath: \"\",\n\t\t\tReadonly: false,\n\t\t},\n\t\tProcess: rspec.Process{\n\t\t\tTerminal: false,\n\t\t\tUser: rspec.User{},\n\t\t\tArgs: []string{\n\t\t\t\t\"sh\",\n\t\t\t},\n\t\t\tEnv: []string{\n\t\t\t\t\"PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\",\n\t\t\t\t\"TERM=xterm\",\n\t\t\t},\n\t\t\tCwd: \"/\",\n\t\t\tCapabilities: []string{\n\t\t\t\t\"CAP_CHOWN\",\n\t\t\t\t\"CAP_DAC_OVERRIDE\",\n\t\t\t\t\"CAP_FSETID\",\n\t\t\t\t\"CAP_FOWNER\",\n\t\t\t\t\"CAP_MKNOD\",\n\t\t\t\t\"CAP_NET_RAW\",\n\t\t\t\t\"CAP_SETGID\",\n\t\t\t\t\"CAP_SETUID\",\n\t\t\t\t\"CAP_SETFCAP\",\n\t\t\t\t\"CAP_SETPCAP\",\n\t\t\t\t\"CAP_NET_BIND_SERVICE\",\n\t\t\t\t\"CAP_SYS_CHROOT\",\n\t\t\t\t\"CAP_KILL\",\n\t\t\t\t\"CAP_AUDIT_WRITE\",\n\t\t\t},\n\t\t\tRlimits: []rspec.Rlimit{\n\t\t\t\t{\n\t\t\t\t\tType: \"RLIMIT_NOFILE\",\n\t\t\t\t\tHard: uint64(1024),\n\t\t\t\t\tSoft: uint64(1024),\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\tHostname: \"mrsdalloway\",\n\t\tMounts: []rspec.Mount{\n\t\t\t{\n\t\t\t\tDestination: \"/proc\",\n\t\t\t\tType: \"proc\",\n\t\t\t\tSource: \"proc\",\n\t\t\t\tOptions: nil,\n\t\t\t},\n\t\t\t{\n\t\t\t\tDestination: \"/dev\",\n\t\t\t\tType: \"tmpfs\",\n\t\t\t\tSource: \"tmpfs\",\n\t\t\t\tOptions: []string{\"nosuid\", \"strictatime\", \"mode=755\", \"size=65536k\"},\n\t\t\t},\n\t\t\t{\n\t\t\t\tDestination: \"/dev/pts\",\n\t\t\t\tType: \"devpts\",\n\t\t\t\tSource: \"devpts\",\n\t\t\t\tOptions: []string{\"nosuid\", \"noexec\", \"newinstance\", \"ptmxmode=0666\", \"mode=0620\", \"gid=5\"},\n\t\t\t},\n\t\t\t{\n\t\t\t\tDestination: \"/dev/shm\",\n\t\t\t\tType: \"tmpfs\",\n\t\t\t\tSource: \"shm\",\n\t\t\t\tOptions: []string{\"nosuid\", \"noexec\", \"nodev\", \"mode=1777\", \"size=65536k\"},\n\t\t\t},\n\t\t\t{\n\t\t\t\tDestination: \"/dev/mqueue\",\n\t\t\t\tType: \"mqueue\",\n\t\t\t\tSource: \"mqueue\",\n\t\t\t\tOptions: []string{\"nosuid\", \"noexec\", \"nodev\"},\n\t\t\t},\n\t\t\t{\n\t\t\t\tDestination: \"/sys\",\n\t\t\t\tType: \"sysfs\",\n\t\t\t\tSource: \"sysfs\",\n\t\t\t\tOptions: []string{\"nosuid\", \"noexec\", \"nodev\", \"ro\"},\n\t\t\t},\n\t\t},\n\t\tLinux: &rspec.Linux{\n\t\t\tResources: &rspec.Resources{\n\t\t\t\tDevices: []rspec.DeviceCgroup{\n\t\t\t\t\t{\n\t\t\t\t\t\tAllow: false,\n\t\t\t\t\t\tAccess: strPtr(\"rwm\"),\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\tNamespaces: []rspec.Namespace{\n\t\t\t\t{\n\t\t\t\t\tType: \"pid\",\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tType: \"network\",\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tType: \"ipc\",\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tType: \"uts\",\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tType: \"mount\",\n\t\t\t\t},\n\t\t\t},\n\t\t\tDevices: []rspec.Device{},\n\t\t},\n\t}\n\treturn Generator{\n\t\tspec: &spec,\n\t}\n}", "func createVendorSpec(b *troubleshootv1beta2.SupportBundle) (*troubleshootv1beta2.SupportBundle, error) {\n\tsupportBundle, err := staticspecs.GetVendorSpec()\n\tif err != nil {\n\t\tlogger.Errorf(\"Failed to load vendor support bundle spec: %v\", err)\n\t\treturn nil, err\n\t}\n\n\tif b.Spec.Collectors != nil {\n\t\tsupportBundle.Spec.Collectors = b.DeepCopy().Spec.Collectors\n\t}\n\tif b.Spec.Analyzers != nil {\n\t\tsupportBundle.Spec.Analyzers = b.DeepCopy().Spec.Analyzers\n\t}\n\treturn supportBundle, nil\n}", "func setupDiffReport(r *Report) {\n\tr.format.output = printDiffReport\n\tr.format.changestyles = make(map[string]ChangeStyle)\n\tr.format.changestyles[\"ADD\"] = ChangeStyle{color: \"green\", message: \"has been added:\"}\n\tr.format.changestyles[\"REMOVE\"] = ChangeStyle{color: \"red\", message: \"has been removed:\"}\n\tr.format.changestyles[\"MODIFY\"] = ChangeStyle{color: \"yellow\", message: \"has changed:\"}\n}", "func AlertmanagerSpec() *AlertmanagerSpecApplyConfiguration {\n\treturn &AlertmanagerSpecApplyConfiguration{}\n}", "func newPeerAuthenticationWithSpec() *securityv1beta1.PeerAuthentication {\n\tpeerAuthentication := newPeerAuthentication()\n\tpeerAuthentication.Spec.PortLevelMtls = map[uint32]*securityv1beta1apis.PeerAuthentication_MutualTLS{\n\t\ttTargetPort: {\n\t\t\tMode: securityv1beta1apis.PeerAuthentication_MutualTLS_PERMISSIVE,\n\t\t},\n\t}\n\tpeerAuthentication.Spec.Selector = &istiov1beta1apis.WorkloadSelector{\n\t\tMatchLabels: map[string]string{\n\t\t\tapplicationLabelKey: tName,\n\t\t},\n\t}\n\treturn peerAuthentication\n}", "func NewPrintConnector()(*PrintConnector) {\n m := &PrintConnector{\n Entity: *NewEntity(),\n }\n return m\n}", "func makePodSpec(t thanosv1beta1.Receiver) (*corev1.PodSpec, error) {\n\n\tif t.Spec.ReceivePrefix == \"\" {\n\t\tt.Spec.ReceivePrefix = receiverDir\n\t}\n\tif t.Spec.Retention == \"\" {\n\t\tt.Spec.Retention = defaultRetetion\n\t}\n\t// TODO set args to spec\n\tthanosArgs := []string{\n\t\t\"receive\",\n\t\tfmt.Sprintf(\"--tsdb.path=%s\", t.Spec.ReceivePrefix),\n\t\tfmt.Sprintf(\"--tsdb.retention=%s\", t.Spec.Retention),\n\t\tfmt.Sprintf(\"--labels=receive=\\\"%s\\\"\", t.Spec.ReceiveLables),\n\t\tfmt.Sprintf(\"--objstore.config=type: %s\\nconfig:\\n bucket: \\\"%s\\\"\", t.Spec.ObjectStorageType, t.Spec.BucketName),\n\t}\n\tif t.Spec.LogLevel != \"\" && t.Spec.LogLevel != \"info\" {\n\t\tthanosArgs = append(thanosArgs, fmt.Sprintf(\"--log.level=%s\", t.Spec.LogLevel))\n\t}\n\tenv := []corev1.EnvVar{\n\t\t{\n\t\t\tName: \"GOOGLE_APPLICATION_CREDENTIALS\",\n\t\t\tValue: secretsDir + t.Spec.SecretName + \".json\",\n\t\t},\n\t}\n\n\tports := []corev1.ContainerPort{\n\t\t{\n\t\t\tContainerPort: 10902,\n\t\t\tName: \"http\",\n\t\t},\n\t\t{\n\t\t\tContainerPort: 10901,\n\t\t\tName: \"grpc\",\n\t\t},\n\t}\n\n\tif strings.Contains(t.Name, \"receiver\") {\n\t\tports = append(ports, corev1.ContainerPort{\n\t\t\tContainerPort: 19291,\n\t\t\tName: \"receive\",\n\t\t})\n\t}\n\n\t// mount to pod\n\tvolumemounts := []corev1.VolumeMount{\n\t\t{\n\t\t\tName: \"thanos-persistent-storage\",\n\t\t\tMountPath: t.Spec.Retention,\n\t\t},\n\t\t{\n\t\t\tName: \"google-cloud-key\",\n\t\t\tMountPath: secretsDir,\n\t\t},\n\t}\n\n\tcontainers := []corev1.Container{\n\t\t{\n\t\t\tName: \"receiver\",\n\t\t\tImage: *t.Spec.Image,\n\t\t\tArgs: thanosArgs,\n\t\t\tEnv: env,\n\t\t\tPorts: ports,\n\t\t\tVolumeMounts: volumemounts,\n\t\t},\n\t}\n\n\t// Need create json from gcp iam\n\t// https://github.com/orangesys/blueprint/tree/master/prometheus-thanos\n\t// kubectl create secret generic ${SERVICE_ACCOUNT_NAME} --from-file=${SERVICE_ACCOUNT_NAME}.json=${SERVICE_ACCOUNT_NAME}.json\n\t// secret name is thanos-demo-gcs\n\t// TODO setting secret name with spec\n\tvolumes := []corev1.Volume{\n\t\t{\n\t\t\tName: \"google-cloud-key\",\n\t\t\tVolumeSource: corev1.VolumeSource{\n\t\t\t\tSecret: &corev1.SecretVolumeSource{\n\t\t\t\t\tSecretName: t.Spec.SecretName,\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n\n\treturn &corev1.PodSpec{\n\t\tTerminationGracePeriodSeconds: &gracePeriodTerm,\n\t\tContainers: containers,\n\t\tVolumes: volumes,\n\t}, nil\n}", "func newWorkBookFromOle2(rs io.ReadSeeker) *WorkBook {\n\tvar wb = &WorkBook{\n\t\trs: rs,\n\t\tref: new(extSheetRef),\n\t\tsheets: make([]*WorkSheet, 0),\n\t\tFormats: make(map[uint16]*Format),\n\t}\n\n\twb.parse(rs)\n\twb.prepare()\n\n\treturn wb\n}", "func NewBootstrapSpec(spec *job.BootstrapSpec) *BootstrapSpec {\n\treturn &BootstrapSpec{\n\t\tContractID: spec.ContractID,\n\t\tRelay: spec.Relay,\n\t\tRelayConfig: spec.RelayConfig,\n\t\tBlockchainTimeout: spec.BlockchainTimeout,\n\t\tContractConfigTrackerPollInterval: spec.ContractConfigTrackerPollInterval,\n\t\tContractConfigConfirmations: spec.ContractConfigConfirmations,\n\t\tCreatedAt: spec.CreatedAt,\n\t\tUpdatedAt: spec.UpdatedAt,\n\t}\n}", "func NewWebhookSpec(spec *job.WebhookSpec) *WebhookSpec {\n\treturn &WebhookSpec{\n\t\tCreatedAt: spec.CreatedAt,\n\t\tUpdatedAt: spec.UpdatedAt,\n\t}\n}", "func newDriverV2(options *DriverOptions) *DriverV2 {\n\tklog.Warning(\"Using DriverV2\")\n\tdriver := DriverV2{}\n\tdriver.Name = options.DriverName\n\tdriver.Version = driverVersion\n\tdriver.NodeID = options.NodeID\n\tdriver.VolumeAttachLimit = options.VolumeAttachLimit\n\tdriver.volumeLocks = volumehelper.NewVolumeLocks()\n\tdriver.perfOptimizationEnabled = options.EnablePerfOptimization\n\tdriver.cloudConfigSecretName = options.CloudConfigSecretName\n\tdriver.cloudConfigSecretNamespace = options.CloudConfigSecretNamespace\n\tdriver.customUserAgent = options.CustomUserAgent\n\tdriver.userAgentSuffix = options.UserAgentSuffix\n\tdriver.useCSIProxyGAInterface = options.UseCSIProxyGAInterface\n\tdriver.enableOtelTracing = options.EnableOtelTracing\n\tdriver.ioHandler = azureutils.NewOSIOHandler()\n\tdriver.hostUtil = hostutil.NewHostUtil()\n\n\ttopologyKey = fmt.Sprintf(\"topology.%s/zone\", driver.Name)\n\treturn &driver\n}", "func NewPrometheusRuleSpec() *monitoringv1.PrometheusRuleSpec {\n\trunbookCreator := newRunbookCreator()\n\n\tspec := &monitoringv1.PrometheusRuleSpec{\n\t\tGroups: []monitoringv1.RuleGroup{{\n\t\t\tName: alertRuleGroup,\n\t\t\tRules: []monitoringv1.Rule{\n\t\t\t\tcreateOutOfBandUpdateAlertRule(),\n\t\t\t\tcreateUnsafeModificationAlertRule(),\n\t\t\t\tcreateInstallationNotCompletedAlertRule(),\n\t\t\t\tcreateRequestCPUCoresRule(),\n\t\t\t\tcreateOperatorHealthStatusRule(),\n\t\t\t\tcreateSingleStackIPv6AlertRule(),\n\t\t\t},\n\t\t}},\n\t}\n\n\tfor _, rule := range spec.Groups[0].Rules {\n\t\tif rule.Alert != \"\" {\n\t\t\trule.Annotations[\"runbook_url\"] = runbookCreator.getURL(rule.Alert)\n\t\t\trule.Labels[partOfAlertLabelKey] = partOfAlertLabelValue\n\t\t\trule.Labels[componentAlertLabelKey] = componentAlertLabelValue\n\t\t}\n\t}\n\n\treturn spec\n}", "func NewConnectorStatusDetails()(*ConnectorStatusDetails) {\n m := &ConnectorStatusDetails{\n }\n m.backingStore = ie8677ce2c7e1b4c22e9c3827ecd078d41185424dd9eeb92b7d971ed2d49a392e.BackingStoreFactoryInstance();\n m.SetAdditionalData(make(map[string]any))\n return m\n}", "func newMac2Vnd(mac, vendor string, err error) *Mac2Vnd {\n\tif err != nil {\n\t\treturn &Mac2Vnd{\n\t\t\tError: err,\n\t\t}\n\t}\n\treturn &Mac2Vnd{\n\t\tMac: mac,\n\t\tVendor: vendor,\n\t}\n}", "func newWorker(\n\tm *manager,\n\tthirdComponent *v1alpha1.ThirdComponent,\n\tendpoint v1alpha1.ThirdComponentEndpointStatus) *worker {\n\n\tw := &worker{\n\t\tstopCh: make(chan struct{}, 1), // Buffer so stop() can be non-blocking.\n\t\tprobeManager: m,\n\t\tthirdComponent: thirdComponent,\n\t\tendpoint: endpoint,\n\t}\n\n\tw.spec = thirdComponent.Spec.Probe\n\tw.resultsManager = m.readinessManager\n\tw.initialValue = results.Failure\n\n\tbasicMetricLabels := metrics.Labels{\n\t\t\"endpoint\": string(w.endpoint.Address),\n\t\t\"pod\": w.thirdComponent.Name,\n\t\t\"namespace\": w.thirdComponent.Namespace,\n\t}\n\n\tw.proberResultsSuccessfulMetricLabels = deepCopyPrometheusLabels(basicMetricLabels)\n\tw.proberResultsSuccessfulMetricLabels[\"result\"] = probeResultSuccessful\n\n\tw.proberResultsFailedMetricLabels = deepCopyPrometheusLabels(basicMetricLabels)\n\tw.proberResultsFailedMetricLabels[\"result\"] = probeResultFailed\n\n\tw.proberResultsUnknownMetricLabels = deepCopyPrometheusLabels(basicMetricLabels)\n\tw.proberResultsUnknownMetricLabels[\"result\"] = probeResultUnknown\n\n\treturn w\n}", "func New(opts ...Option) (oci.SpecModifier, error) {\n\tb := &builder{}\n\tfor _, opt := range opts {\n\t\topt(b)\n\t}\n\tif b.logger == nil {\n\t\tb.logger = logger.New()\n\t}\n\treturn b.build()\n}", "func newResource() *resource.Resource {\n\tr, _ := resource.Merge(\n\t\tresource.Default(),\n\t\tresource.NewWithAttributes(\n\t\t\tsemconv.SchemaURL,\n\t\t\tsemconv.ServiceNameKey.String(\"opentelemetry-server\"),\n\t\t\tsemconv.ServiceVersionKey.String(\"v0.1.0\"),\n\t\t\tattribute.String(\"environment\", \"demo\"),\n\t\t),\n\t)\n\treturn r\n}", "func newReconcilerWithoutAPIClient(mgr manager.Manager, apiClient *swagger.APIClient) reconcile.Reconciler {\n\treturn &ReconcileNotebookJob{\n\t\tClient: mgr.GetClient(),\n\t\tscheme: mgr.GetScheme(),\n\t\trecorder: mgr.GetRecorder(\"notebookjob-controller\"),\n\t\tapiClient: apiClient,\n\t}\n}", "func TestConfigure_NewLeafASN(t *testing.T) {\n\tExpectedASN := \"65000\"\n\n\tMockLeafDeviceAdapter := mock.DeviceAdapter{\n\t\tMockGetInterfaces: func(FabricID uint, DeviceID uint, DeviceIP string) ([]domain.Interface, error) {\n\n\t\t\treturn []domain.Interface{domain.Interface{FabricID: FabricID, DeviceID: DeviceID,\n\t\t\t\tIntType: \"ethernet\", IntName: \"1/11\", Mac: \"M1\", ConfigState: \"up\"}}, nil\n\n\t\t},\n\t\tMockGetLLDPs: func(FabricID uint, DeviceID uint, DeviceIP string) ([]domain.LLDP, error) {\n\n\t\t\treturn []domain.LLDP{domain.LLDP{FabricID: FabricID, DeviceID: DeviceID,\n\t\t\t\tLocalIntType: \"ethernet\", LocalIntName: \"1/11\", LocalIntMac: \"M1\",\n\t\t\t\tRemoteIntType: \"ethernet\", RemoteIntName: \"1/22\", RemoteIntMac: \"M2\"}}, nil\n\n\t\t},\n\t}\n\n\tdatabase.Setup(constants.TESTDBLocation)\n\tdefer cleanupDB(database.GetWorkingInstance())\n\n\tDatabaseRepository := gateway.DatabaseRepository{Database: database.GetWorkingInstance()}\n\tdevUC := usecase.DeviceInteractor{Db: &DatabaseRepository, DeviceAdapterFactory: mock.GetDeviceAdapterFactory(MockLeafDeviceAdapter)}\n\tdevUC.AddFabric(context.Background(), MockFabricName)\n\n\tresp, err := devUC.AddDevices(context.Background(), MockFabricName, []string{MockLeaf1IP}, []string{},\n\t\tUserName, Password, false)\n\tassert.Contains(t, resp, usecase.AddDeviceResponse{FabricName: MockFabricName, FabricID: 1, IPAddress: MockLeaf1IP, Role: usecase.LeafRole})\n\n\tassert.Nil(t, err)\n\tswitchConfig, err := DatabaseRepository.GetSwitchConfigOnDeviceIP(MockFabricName, MockLeaf1IP)\n\t//Verify ASN is the first ASN from the Pool\n\tassert.Equal(t, ExpectedASN, switchConfig.LocalAS)\n\t//Verify ASN is to be created\n\tassert.Equal(t, domain.ConfigCreate, switchConfig.ASConfigType)\n\n}", "func (*FlatConfig) HCL2Spec() map[string]hcldec.Spec {\n\ts := map[string]hcldec.Spec{\n\t\t\"packer_build_name\": &hcldec.AttrSpec{Name: \"packer_build_name\", Type: cty.String, Required: false},\n\t\t\"packer_builder_type\": &hcldec.AttrSpec{Name: \"packer_builder_type\", Type: cty.String, Required: false},\n\t\t\"packer_debug\": &hcldec.AttrSpec{Name: \"packer_debug\", Type: cty.Bool, Required: false},\n\t\t\"packer_force\": &hcldec.AttrSpec{Name: \"packer_force\", Type: cty.Bool, Required: false},\n\t\t\"packer_on_error\": &hcldec.AttrSpec{Name: \"packer_on_error\", Type: cty.String, Required: false},\n\t\t\"packer_user_variables\": &hcldec.BlockAttrsSpec{TypeName: \"packer_user_variables\", ElementType: cty.String, Required: false},\n\t\t\"packer_sensitive_variables\": &hcldec.AttrSpec{Name: \"packer_sensitive_variables\", Type: cty.List(cty.String), Required: false},\n\t\t\"access_key\": &hcldec.AttrSpec{Name: \"access_key\", Type: cty.String, Required: false},\n\t\t\"assume_role\": &hcldec.BlockSpec{TypeName: \"assume_role\", Nested: hcldec.ObjectSpec((*common.FlatAssumeRoleConfig)(nil).HCL2Spec())},\n\t\t\"custom_endpoint_ec2\": &hcldec.AttrSpec{Name: \"custom_endpoint_ec2\", Type: cty.String, Required: false},\n\t\t\"shared_credentials_file\": &hcldec.AttrSpec{Name: \"shared_credentials_file\", Type: cty.String, Required: false},\n\t\t\"decode_authorization_messages\": &hcldec.AttrSpec{Name: \"decode_authorization_messages\", Type: cty.Bool, Required: false},\n\t\t\"insecure_skip_tls_verify\": &hcldec.AttrSpec{Name: \"insecure_skip_tls_verify\", Type: cty.Bool, Required: false},\n\t\t\"max_retries\": &hcldec.AttrSpec{Name: \"max_retries\", Type: cty.Number, Required: false},\n\t\t\"mfa_code\": &hcldec.AttrSpec{Name: \"mfa_code\", Type: cty.String, Required: false},\n\t\t\"profile\": &hcldec.AttrSpec{Name: \"profile\", Type: cty.String, Required: false},\n\t\t\"region\": &hcldec.AttrSpec{Name: \"region\", Type: cty.String, Required: false},\n\t\t\"secret_key\": &hcldec.AttrSpec{Name: \"secret_key\", Type: cty.String, Required: false},\n\t\t\"skip_region_validation\": &hcldec.AttrSpec{Name: \"skip_region_validation\", Type: cty.Bool, Required: false},\n\t\t\"skip_metadata_api_check\": &hcldec.AttrSpec{Name: \"skip_metadata_api_check\", Type: cty.Bool, Required: false},\n\t\t\"skip_credential_validation\": &hcldec.AttrSpec{Name: \"skip_credential_validation\", Type: cty.Bool, Required: false},\n\t\t\"token\": &hcldec.AttrSpec{Name: \"token\", Type: cty.String, Required: false},\n\t\t\"vault_aws_engine\": &hcldec.BlockSpec{TypeName: \"vault_aws_engine\", Nested: hcldec.ObjectSpec((*common.FlatVaultAWSEngineOptions)(nil).HCL2Spec())},\n\t\t\"aws_polling\": &hcldec.BlockSpec{TypeName: \"aws_polling\", Nested: hcldec.ObjectSpec((*common.FlatAWSPollingConfig)(nil).HCL2Spec())},\n\t\t\"identifier\": &hcldec.AttrSpec{Name: \"identifier\", Type: cty.String, Required: false},\n\t\t\"keep_releases\": &hcldec.AttrSpec{Name: \"keep_releases\", Type: cty.Number, Required: false},\n\t\t\"keep_days\": &hcldec.AttrSpec{Name: \"keep_days\", Type: cty.Number, Required: false},\n\t\t\"regions\": &hcldec.AttrSpec{Name: \"regions\", Type: cty.List(cty.String), Required: false},\n\t\t\"dry_run\": &hcldec.AttrSpec{Name: \"dry_run\", Type: cty.Bool, Required: false},\n\t}\n\treturn s\n}", "func newPrometheusSpec(name, addr string) cap.SupervisorSpec {\n\treturn cap.NewSupervisorSpec(\n\t\tname,\n\t\t// this function builds an HTTP Server, this functionality requires more\n\t\t// than a goroutine given the only way to stop a http server is to call the\n\t\t// http.Shutdown function on a seperate goroutine\n\t\tfunc() ([]cap.Node, cap.CleanupResourcesFn, error) {\n\t\t\tserver := buildPrometheusHTTPServer(addr)\n\n\t\t\t// CAUTION: The order here matters, we need waitUntilDone to start last so\n\t\t\t// that it can terminate first, if this is not the case the\n\t\t\t// listenAndServeHTTPWorker child will never terminate.\n\t\t\t//\n\t\t\t// DISCLAIMER: The caution above _is not_ a capataz requirement, but a\n\t\t\t// requirement of net/https' API\n\t\t\tnodes := []cap.Node{\n\t\t\t\tlistenAndServeHTTPWorker(server),\n\t\t\t\twaitUntilDoneHTTPWorker(server),\n\t\t\t}\n\n\t\t\tcleanupServer := func() error {\n\t\t\t\treturn server.Close()\n\t\t\t}\n\n\t\t\treturn nodes, cleanupServer, nil\n\t\t},\n\t)\n}", "func createLogicalPlan(spec *operation.Spec) (*Spec, error) {\n\tnodes := make(map[operation.NodeID]Node, len(spec.Operations))\n\tadmin := administration{now: spec.Now}\n\n\tplan := NewPlanSpec()\n\tplan.Resources = spec.Resources\n\tplan.Now = spec.Now\n\n\tv := &fluxSpecVisitor{\n\t\ta: admin,\n\t\tspec: spec,\n\t\tplan: plan,\n\t\tnodes: nodes,\n\t\tyieldNames: make(map[string]struct{}),\n\t}\n\n\tif err := spec.Walk(v.visitOperation); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn v.plan, nil\n}", "func newPrometheusReceiver(set receiver.CreateSettings, cfg *Config, next consumer.Metrics) *pReceiver {\n\tpr := &pReceiver{\n\t\tcfg: cfg,\n\t\tconsumer: next,\n\t\tsettings: set,\n\t\tconfigLoaded: make(chan struct{}),\n\t\ttargetAllocatorStop: make(chan struct{}),\n\t}\n\treturn pr\n}", "func newResourceDelta(\n\ta *resource,\n\tb *resource,\n) *ackcompare.Delta {\n\tdelta := ackcompare.NewDelta()\n\tif (a == nil && b != nil) ||\n\t\t(a != nil && b == nil) {\n\t\tdelta.Add(\"\", a, b)\n\t\treturn delta\n\t}\n\n\tif ackcompare.HasNilDifference(a.ko.Spec.MonitoringScheduleConfig, b.ko.Spec.MonitoringScheduleConfig) {\n\t\tdelta.Add(\"Spec.MonitoringScheduleConfig\", a.ko.Spec.MonitoringScheduleConfig, b.ko.Spec.MonitoringScheduleConfig)\n\t} else if a.ko.Spec.MonitoringScheduleConfig != nil && b.ko.Spec.MonitoringScheduleConfig != nil {\n\t\tif ackcompare.HasNilDifference(a.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition, b.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition) {\n\t\t\tdelta.Add(\"Spec.MonitoringScheduleConfig.MonitoringJobDefinition\", a.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition, b.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition)\n\t\t} else if a.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition != nil && b.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition != nil {\n\t\t\tif ackcompare.HasNilDifference(a.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.BaselineConfig, b.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.BaselineConfig) {\n\t\t\t\tdelta.Add(\"Spec.MonitoringScheduleConfig.MonitoringJobDefinition.BaselineConfig\", a.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.BaselineConfig, b.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.BaselineConfig)\n\t\t\t} else if a.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.BaselineConfig != nil && b.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.BaselineConfig != nil {\n\t\t\t\tif ackcompare.HasNilDifference(a.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.BaselineConfig.BaseliningJobName, b.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.BaselineConfig.BaseliningJobName) {\n\t\t\t\t\tdelta.Add(\"Spec.MonitoringScheduleConfig.MonitoringJobDefinition.BaselineConfig.BaseliningJobName\", a.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.BaselineConfig.BaseliningJobName, b.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.BaselineConfig.BaseliningJobName)\n\t\t\t\t} else if a.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.BaselineConfig.BaseliningJobName != nil && b.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.BaselineConfig.BaseliningJobName != nil {\n\t\t\t\t\tif *a.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.BaselineConfig.BaseliningJobName != *b.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.BaselineConfig.BaseliningJobName {\n\t\t\t\t\t\tdelta.Add(\"Spec.MonitoringScheduleConfig.MonitoringJobDefinition.BaselineConfig.BaseliningJobName\", a.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.BaselineConfig.BaseliningJobName, b.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.BaselineConfig.BaseliningJobName)\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\tif ackcompare.HasNilDifference(a.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.BaselineConfig.ConstraintsResource, b.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.BaselineConfig.ConstraintsResource) {\n\t\t\t\t\tdelta.Add(\"Spec.MonitoringScheduleConfig.MonitoringJobDefinition.BaselineConfig.ConstraintsResource\", a.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.BaselineConfig.ConstraintsResource, b.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.BaselineConfig.ConstraintsResource)\n\t\t\t\t} else if a.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.BaselineConfig.ConstraintsResource != nil && b.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.BaselineConfig.ConstraintsResource != nil {\n\t\t\t\t\tif ackcompare.HasNilDifference(a.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.BaselineConfig.ConstraintsResource.S3URI, b.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.BaselineConfig.ConstraintsResource.S3URI) {\n\t\t\t\t\t\tdelta.Add(\"Spec.MonitoringScheduleConfig.MonitoringJobDefinition.BaselineConfig.ConstraintsResource.S3URI\", a.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.BaselineConfig.ConstraintsResource.S3URI, b.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.BaselineConfig.ConstraintsResource.S3URI)\n\t\t\t\t\t} else if a.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.BaselineConfig.ConstraintsResource.S3URI != nil && b.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.BaselineConfig.ConstraintsResource.S3URI != nil {\n\t\t\t\t\t\tif *a.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.BaselineConfig.ConstraintsResource.S3URI != *b.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.BaselineConfig.ConstraintsResource.S3URI {\n\t\t\t\t\t\t\tdelta.Add(\"Spec.MonitoringScheduleConfig.MonitoringJobDefinition.BaselineConfig.ConstraintsResource.S3URI\", a.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.BaselineConfig.ConstraintsResource.S3URI, b.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.BaselineConfig.ConstraintsResource.S3URI)\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\tif ackcompare.HasNilDifference(a.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.BaselineConfig.StatisticsResource, b.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.BaselineConfig.StatisticsResource) {\n\t\t\t\t\tdelta.Add(\"Spec.MonitoringScheduleConfig.MonitoringJobDefinition.BaselineConfig.StatisticsResource\", a.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.BaselineConfig.StatisticsResource, b.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.BaselineConfig.StatisticsResource)\n\t\t\t\t} else if a.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.BaselineConfig.StatisticsResource != nil && b.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.BaselineConfig.StatisticsResource != nil {\n\t\t\t\t\tif ackcompare.HasNilDifference(a.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.BaselineConfig.StatisticsResource.S3URI, b.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.BaselineConfig.StatisticsResource.S3URI) {\n\t\t\t\t\t\tdelta.Add(\"Spec.MonitoringScheduleConfig.MonitoringJobDefinition.BaselineConfig.StatisticsResource.S3URI\", a.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.BaselineConfig.StatisticsResource.S3URI, b.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.BaselineConfig.StatisticsResource.S3URI)\n\t\t\t\t\t} else if a.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.BaselineConfig.StatisticsResource.S3URI != nil && b.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.BaselineConfig.StatisticsResource.S3URI != nil {\n\t\t\t\t\t\tif *a.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.BaselineConfig.StatisticsResource.S3URI != *b.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.BaselineConfig.StatisticsResource.S3URI {\n\t\t\t\t\t\t\tdelta.Add(\"Spec.MonitoringScheduleConfig.MonitoringJobDefinition.BaselineConfig.StatisticsResource.S3URI\", a.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.BaselineConfig.StatisticsResource.S3URI, b.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.BaselineConfig.StatisticsResource.S3URI)\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t\tif ackcompare.HasNilDifference(a.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.Environment, b.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.Environment) {\n\t\t\t\tdelta.Add(\"Spec.MonitoringScheduleConfig.MonitoringJobDefinition.Environment\", a.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.Environment, b.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.Environment)\n\t\t\t} else if a.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.Environment != nil && b.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.Environment != nil {\n\t\t\t\tif !ackcompare.MapStringStringPEqual(a.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.Environment, b.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.Environment) {\n\t\t\t\t\tdelta.Add(\"Spec.MonitoringScheduleConfig.MonitoringJobDefinition.Environment\", a.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.Environment, b.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.Environment)\n\t\t\t\t}\n\t\t\t}\n\t\t\tif ackcompare.HasNilDifference(a.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.MonitoringAppSpecification, b.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.MonitoringAppSpecification) {\n\t\t\t\tdelta.Add(\"Spec.MonitoringScheduleConfig.MonitoringJobDefinition.MonitoringAppSpecification\", a.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.MonitoringAppSpecification, b.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.MonitoringAppSpecification)\n\t\t\t} else if a.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.MonitoringAppSpecification != nil && b.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.MonitoringAppSpecification != nil {\n\t\t\t\tif !ackcompare.SliceStringPEqual(a.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.MonitoringAppSpecification.ContainerArguments, b.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.MonitoringAppSpecification.ContainerArguments) {\n\t\t\t\t\tdelta.Add(\"Spec.MonitoringScheduleConfig.MonitoringJobDefinition.MonitoringAppSpecification.ContainerArguments\", a.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.MonitoringAppSpecification.ContainerArguments, b.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.MonitoringAppSpecification.ContainerArguments)\n\t\t\t\t}\n\t\t\t\tif !ackcompare.SliceStringPEqual(a.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.MonitoringAppSpecification.ContainerEntrypoint, b.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.MonitoringAppSpecification.ContainerEntrypoint) {\n\t\t\t\t\tdelta.Add(\"Spec.MonitoringScheduleConfig.MonitoringJobDefinition.MonitoringAppSpecification.ContainerEntrypoint\", a.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.MonitoringAppSpecification.ContainerEntrypoint, b.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.MonitoringAppSpecification.ContainerEntrypoint)\n\t\t\t\t}\n\t\t\t\tif ackcompare.HasNilDifference(a.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.MonitoringAppSpecification.ImageURI, b.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.MonitoringAppSpecification.ImageURI) {\n\t\t\t\t\tdelta.Add(\"Spec.MonitoringScheduleConfig.MonitoringJobDefinition.MonitoringAppSpecification.ImageURI\", a.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.MonitoringAppSpecification.ImageURI, b.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.MonitoringAppSpecification.ImageURI)\n\t\t\t\t} else if a.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.MonitoringAppSpecification.ImageURI != nil && b.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.MonitoringAppSpecification.ImageURI != nil {\n\t\t\t\t\tif *a.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.MonitoringAppSpecification.ImageURI != *b.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.MonitoringAppSpecification.ImageURI {\n\t\t\t\t\t\tdelta.Add(\"Spec.MonitoringScheduleConfig.MonitoringJobDefinition.MonitoringAppSpecification.ImageURI\", a.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.MonitoringAppSpecification.ImageURI, b.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.MonitoringAppSpecification.ImageURI)\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\tif ackcompare.HasNilDifference(a.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.MonitoringAppSpecification.PostAnalyticsProcessorSourceURI, b.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.MonitoringAppSpecification.PostAnalyticsProcessorSourceURI) {\n\t\t\t\t\tdelta.Add(\"Spec.MonitoringScheduleConfig.MonitoringJobDefinition.MonitoringAppSpecification.PostAnalyticsProcessorSourceURI\", a.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.MonitoringAppSpecification.PostAnalyticsProcessorSourceURI, b.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.MonitoringAppSpecification.PostAnalyticsProcessorSourceURI)\n\t\t\t\t} else if a.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.MonitoringAppSpecification.PostAnalyticsProcessorSourceURI != nil && b.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.MonitoringAppSpecification.PostAnalyticsProcessorSourceURI != nil {\n\t\t\t\t\tif *a.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.MonitoringAppSpecification.PostAnalyticsProcessorSourceURI != *b.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.MonitoringAppSpecification.PostAnalyticsProcessorSourceURI {\n\t\t\t\t\t\tdelta.Add(\"Spec.MonitoringScheduleConfig.MonitoringJobDefinition.MonitoringAppSpecification.PostAnalyticsProcessorSourceURI\", a.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.MonitoringAppSpecification.PostAnalyticsProcessorSourceURI, b.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.MonitoringAppSpecification.PostAnalyticsProcessorSourceURI)\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\tif ackcompare.HasNilDifference(a.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.MonitoringAppSpecification.RecordPreprocessorSourceURI, b.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.MonitoringAppSpecification.RecordPreprocessorSourceURI) {\n\t\t\t\t\tdelta.Add(\"Spec.MonitoringScheduleConfig.MonitoringJobDefinition.MonitoringAppSpecification.RecordPreprocessorSourceURI\", a.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.MonitoringAppSpecification.RecordPreprocessorSourceURI, b.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.MonitoringAppSpecification.RecordPreprocessorSourceURI)\n\t\t\t\t} else if a.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.MonitoringAppSpecification.RecordPreprocessorSourceURI != nil && b.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.MonitoringAppSpecification.RecordPreprocessorSourceURI != nil {\n\t\t\t\t\tif *a.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.MonitoringAppSpecification.RecordPreprocessorSourceURI != *b.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.MonitoringAppSpecification.RecordPreprocessorSourceURI {\n\t\t\t\t\t\tdelta.Add(\"Spec.MonitoringScheduleConfig.MonitoringJobDefinition.MonitoringAppSpecification.RecordPreprocessorSourceURI\", a.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.MonitoringAppSpecification.RecordPreprocessorSourceURI, b.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.MonitoringAppSpecification.RecordPreprocessorSourceURI)\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t\tif !reflect.DeepEqual(a.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.MonitoringInputs, b.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.MonitoringInputs) {\n\t\t\t\tdelta.Add(\"Spec.MonitoringScheduleConfig.MonitoringJobDefinition.MonitoringInputs\", a.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.MonitoringInputs, b.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.MonitoringInputs)\n\t\t\t}\n\t\t\tif ackcompare.HasNilDifference(a.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.MonitoringOutputConfig, b.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.MonitoringOutputConfig) {\n\t\t\t\tdelta.Add(\"Spec.MonitoringScheduleConfig.MonitoringJobDefinition.MonitoringOutputConfig\", a.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.MonitoringOutputConfig, b.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.MonitoringOutputConfig)\n\t\t\t} else if a.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.MonitoringOutputConfig != nil && b.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.MonitoringOutputConfig != nil {\n\t\t\t\tif ackcompare.HasNilDifference(a.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.MonitoringOutputConfig.KMSKeyID, b.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.MonitoringOutputConfig.KMSKeyID) {\n\t\t\t\t\tdelta.Add(\"Spec.MonitoringScheduleConfig.MonitoringJobDefinition.MonitoringOutputConfig.KMSKeyID\", a.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.MonitoringOutputConfig.KMSKeyID, b.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.MonitoringOutputConfig.KMSKeyID)\n\t\t\t\t} else if a.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.MonitoringOutputConfig.KMSKeyID != nil && b.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.MonitoringOutputConfig.KMSKeyID != nil {\n\t\t\t\t\tif *a.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.MonitoringOutputConfig.KMSKeyID != *b.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.MonitoringOutputConfig.KMSKeyID {\n\t\t\t\t\t\tdelta.Add(\"Spec.MonitoringScheduleConfig.MonitoringJobDefinition.MonitoringOutputConfig.KMSKeyID\", a.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.MonitoringOutputConfig.KMSKeyID, b.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.MonitoringOutputConfig.KMSKeyID)\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\tif !reflect.DeepEqual(a.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.MonitoringOutputConfig.MonitoringOutputs, b.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.MonitoringOutputConfig.MonitoringOutputs) {\n\t\t\t\t\tdelta.Add(\"Spec.MonitoringScheduleConfig.MonitoringJobDefinition.MonitoringOutputConfig.MonitoringOutputs\", a.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.MonitoringOutputConfig.MonitoringOutputs, b.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.MonitoringOutputConfig.MonitoringOutputs)\n\t\t\t\t}\n\t\t\t}\n\t\t\tif ackcompare.HasNilDifference(a.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.MonitoringResources, b.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.MonitoringResources) {\n\t\t\t\tdelta.Add(\"Spec.MonitoringScheduleConfig.MonitoringJobDefinition.MonitoringResources\", a.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.MonitoringResources, b.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.MonitoringResources)\n\t\t\t} else if a.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.MonitoringResources != nil && b.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.MonitoringResources != nil {\n\t\t\t\tif ackcompare.HasNilDifference(a.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.MonitoringResources.ClusterConfig, b.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.MonitoringResources.ClusterConfig) {\n\t\t\t\t\tdelta.Add(\"Spec.MonitoringScheduleConfig.MonitoringJobDefinition.MonitoringResources.ClusterConfig\", a.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.MonitoringResources.ClusterConfig, b.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.MonitoringResources.ClusterConfig)\n\t\t\t\t} else if a.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.MonitoringResources.ClusterConfig != nil && b.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.MonitoringResources.ClusterConfig != nil {\n\t\t\t\t\tif ackcompare.HasNilDifference(a.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.MonitoringResources.ClusterConfig.InstanceCount, b.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.MonitoringResources.ClusterConfig.InstanceCount) {\n\t\t\t\t\t\tdelta.Add(\"Spec.MonitoringScheduleConfig.MonitoringJobDefinition.MonitoringResources.ClusterConfig.InstanceCount\", a.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.MonitoringResources.ClusterConfig.InstanceCount, b.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.MonitoringResources.ClusterConfig.InstanceCount)\n\t\t\t\t\t} else if a.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.MonitoringResources.ClusterConfig.InstanceCount != nil && b.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.MonitoringResources.ClusterConfig.InstanceCount != nil {\n\t\t\t\t\t\tif *a.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.MonitoringResources.ClusterConfig.InstanceCount != *b.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.MonitoringResources.ClusterConfig.InstanceCount {\n\t\t\t\t\t\t\tdelta.Add(\"Spec.MonitoringScheduleConfig.MonitoringJobDefinition.MonitoringResources.ClusterConfig.InstanceCount\", a.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.MonitoringResources.ClusterConfig.InstanceCount, b.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.MonitoringResources.ClusterConfig.InstanceCount)\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t\tif ackcompare.HasNilDifference(a.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.MonitoringResources.ClusterConfig.InstanceType, b.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.MonitoringResources.ClusterConfig.InstanceType) {\n\t\t\t\t\t\tdelta.Add(\"Spec.MonitoringScheduleConfig.MonitoringJobDefinition.MonitoringResources.ClusterConfig.InstanceType\", a.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.MonitoringResources.ClusterConfig.InstanceType, b.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.MonitoringResources.ClusterConfig.InstanceType)\n\t\t\t\t\t} else if a.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.MonitoringResources.ClusterConfig.InstanceType != nil && b.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.MonitoringResources.ClusterConfig.InstanceType != nil {\n\t\t\t\t\t\tif *a.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.MonitoringResources.ClusterConfig.InstanceType != *b.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.MonitoringResources.ClusterConfig.InstanceType {\n\t\t\t\t\t\t\tdelta.Add(\"Spec.MonitoringScheduleConfig.MonitoringJobDefinition.MonitoringResources.ClusterConfig.InstanceType\", a.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.MonitoringResources.ClusterConfig.InstanceType, b.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.MonitoringResources.ClusterConfig.InstanceType)\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t\tif ackcompare.HasNilDifference(a.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.MonitoringResources.ClusterConfig.VolumeKMSKeyID, b.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.MonitoringResources.ClusterConfig.VolumeKMSKeyID) {\n\t\t\t\t\t\tdelta.Add(\"Spec.MonitoringScheduleConfig.MonitoringJobDefinition.MonitoringResources.ClusterConfig.VolumeKMSKeyID\", a.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.MonitoringResources.ClusterConfig.VolumeKMSKeyID, b.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.MonitoringResources.ClusterConfig.VolumeKMSKeyID)\n\t\t\t\t\t} else if a.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.MonitoringResources.ClusterConfig.VolumeKMSKeyID != nil && b.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.MonitoringResources.ClusterConfig.VolumeKMSKeyID != nil {\n\t\t\t\t\t\tif *a.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.MonitoringResources.ClusterConfig.VolumeKMSKeyID != *b.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.MonitoringResources.ClusterConfig.VolumeKMSKeyID {\n\t\t\t\t\t\t\tdelta.Add(\"Spec.MonitoringScheduleConfig.MonitoringJobDefinition.MonitoringResources.ClusterConfig.VolumeKMSKeyID\", a.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.MonitoringResources.ClusterConfig.VolumeKMSKeyID, b.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.MonitoringResources.ClusterConfig.VolumeKMSKeyID)\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t\tif ackcompare.HasNilDifference(a.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.MonitoringResources.ClusterConfig.VolumeSizeInGB, b.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.MonitoringResources.ClusterConfig.VolumeSizeInGB) {\n\t\t\t\t\t\tdelta.Add(\"Spec.MonitoringScheduleConfig.MonitoringJobDefinition.MonitoringResources.ClusterConfig.VolumeSizeInGB\", a.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.MonitoringResources.ClusterConfig.VolumeSizeInGB, b.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.MonitoringResources.ClusterConfig.VolumeSizeInGB)\n\t\t\t\t\t} else if a.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.MonitoringResources.ClusterConfig.VolumeSizeInGB != nil && b.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.MonitoringResources.ClusterConfig.VolumeSizeInGB != nil {\n\t\t\t\t\t\tif *a.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.MonitoringResources.ClusterConfig.VolumeSizeInGB != *b.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.MonitoringResources.ClusterConfig.VolumeSizeInGB {\n\t\t\t\t\t\t\tdelta.Add(\"Spec.MonitoringScheduleConfig.MonitoringJobDefinition.MonitoringResources.ClusterConfig.VolumeSizeInGB\", a.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.MonitoringResources.ClusterConfig.VolumeSizeInGB, b.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.MonitoringResources.ClusterConfig.VolumeSizeInGB)\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t\tif ackcompare.HasNilDifference(a.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.NetworkConfig, b.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.NetworkConfig) {\n\t\t\t\tdelta.Add(\"Spec.MonitoringScheduleConfig.MonitoringJobDefinition.NetworkConfig\", a.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.NetworkConfig, b.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.NetworkConfig)\n\t\t\t} else if a.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.NetworkConfig != nil && b.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.NetworkConfig != nil {\n\t\t\t\tif ackcompare.HasNilDifference(a.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.NetworkConfig.EnableInterContainerTrafficEncryption, b.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.NetworkConfig.EnableInterContainerTrafficEncryption) {\n\t\t\t\t\tdelta.Add(\"Spec.MonitoringScheduleConfig.MonitoringJobDefinition.NetworkConfig.EnableInterContainerTrafficEncryption\", a.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.NetworkConfig.EnableInterContainerTrafficEncryption, b.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.NetworkConfig.EnableInterContainerTrafficEncryption)\n\t\t\t\t} else if a.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.NetworkConfig.EnableInterContainerTrafficEncryption != nil && b.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.NetworkConfig.EnableInterContainerTrafficEncryption != nil {\n\t\t\t\t\tif *a.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.NetworkConfig.EnableInterContainerTrafficEncryption != *b.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.NetworkConfig.EnableInterContainerTrafficEncryption {\n\t\t\t\t\t\tdelta.Add(\"Spec.MonitoringScheduleConfig.MonitoringJobDefinition.NetworkConfig.EnableInterContainerTrafficEncryption\", a.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.NetworkConfig.EnableInterContainerTrafficEncryption, b.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.NetworkConfig.EnableInterContainerTrafficEncryption)\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\tif ackcompare.HasNilDifference(a.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.NetworkConfig.EnableNetworkIsolation, b.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.NetworkConfig.EnableNetworkIsolation) {\n\t\t\t\t\tdelta.Add(\"Spec.MonitoringScheduleConfig.MonitoringJobDefinition.NetworkConfig.EnableNetworkIsolation\", a.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.NetworkConfig.EnableNetworkIsolation, b.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.NetworkConfig.EnableNetworkIsolation)\n\t\t\t\t} else if a.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.NetworkConfig.EnableNetworkIsolation != nil && b.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.NetworkConfig.EnableNetworkIsolation != nil {\n\t\t\t\t\tif *a.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.NetworkConfig.EnableNetworkIsolation != *b.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.NetworkConfig.EnableNetworkIsolation {\n\t\t\t\t\t\tdelta.Add(\"Spec.MonitoringScheduleConfig.MonitoringJobDefinition.NetworkConfig.EnableNetworkIsolation\", a.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.NetworkConfig.EnableNetworkIsolation, b.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.NetworkConfig.EnableNetworkIsolation)\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\tif ackcompare.HasNilDifference(a.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.NetworkConfig.VPCConfig, b.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.NetworkConfig.VPCConfig) {\n\t\t\t\t\tdelta.Add(\"Spec.MonitoringScheduleConfig.MonitoringJobDefinition.NetworkConfig.VPCConfig\", a.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.NetworkConfig.VPCConfig, b.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.NetworkConfig.VPCConfig)\n\t\t\t\t} else if a.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.NetworkConfig.VPCConfig != nil && b.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.NetworkConfig.VPCConfig != nil {\n\t\t\t\t\tif !ackcompare.SliceStringPEqual(a.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.NetworkConfig.VPCConfig.SecurityGroupIDs, b.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.NetworkConfig.VPCConfig.SecurityGroupIDs) {\n\t\t\t\t\t\tdelta.Add(\"Spec.MonitoringScheduleConfig.MonitoringJobDefinition.NetworkConfig.VPCConfig.SecurityGroupIDs\", a.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.NetworkConfig.VPCConfig.SecurityGroupIDs, b.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.NetworkConfig.VPCConfig.SecurityGroupIDs)\n\t\t\t\t\t}\n\t\t\t\t\tif !ackcompare.SliceStringPEqual(a.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.NetworkConfig.VPCConfig.Subnets, b.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.NetworkConfig.VPCConfig.Subnets) {\n\t\t\t\t\t\tdelta.Add(\"Spec.MonitoringScheduleConfig.MonitoringJobDefinition.NetworkConfig.VPCConfig.Subnets\", a.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.NetworkConfig.VPCConfig.Subnets, b.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.NetworkConfig.VPCConfig.Subnets)\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t\tif ackcompare.HasNilDifference(a.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.RoleARN, b.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.RoleARN) {\n\t\t\t\tdelta.Add(\"Spec.MonitoringScheduleConfig.MonitoringJobDefinition.RoleARN\", a.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.RoleARN, b.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.RoleARN)\n\t\t\t} else if a.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.RoleARN != nil && b.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.RoleARN != nil {\n\t\t\t\tif *a.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.RoleARN != *b.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.RoleARN {\n\t\t\t\t\tdelta.Add(\"Spec.MonitoringScheduleConfig.MonitoringJobDefinition.RoleARN\", a.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.RoleARN, b.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.RoleARN)\n\t\t\t\t}\n\t\t\t}\n\t\t\tif ackcompare.HasNilDifference(a.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.StoppingCondition, b.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.StoppingCondition) {\n\t\t\t\tdelta.Add(\"Spec.MonitoringScheduleConfig.MonitoringJobDefinition.StoppingCondition\", a.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.StoppingCondition, b.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.StoppingCondition)\n\t\t\t} else if a.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.StoppingCondition != nil && b.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.StoppingCondition != nil {\n\t\t\t\tif ackcompare.HasNilDifference(a.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.StoppingCondition.MaxRuntimeInSeconds, b.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.StoppingCondition.MaxRuntimeInSeconds) {\n\t\t\t\t\tdelta.Add(\"Spec.MonitoringScheduleConfig.MonitoringJobDefinition.StoppingCondition.MaxRuntimeInSeconds\", a.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.StoppingCondition.MaxRuntimeInSeconds, b.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.StoppingCondition.MaxRuntimeInSeconds)\n\t\t\t\t} else if a.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.StoppingCondition.MaxRuntimeInSeconds != nil && b.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.StoppingCondition.MaxRuntimeInSeconds != nil {\n\t\t\t\t\tif *a.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.StoppingCondition.MaxRuntimeInSeconds != *b.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.StoppingCondition.MaxRuntimeInSeconds {\n\t\t\t\t\t\tdelta.Add(\"Spec.MonitoringScheduleConfig.MonitoringJobDefinition.StoppingCondition.MaxRuntimeInSeconds\", a.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.StoppingCondition.MaxRuntimeInSeconds, b.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinition.StoppingCondition.MaxRuntimeInSeconds)\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\tif ackcompare.HasNilDifference(a.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinitionName, b.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinitionName) {\n\t\t\tdelta.Add(\"Spec.MonitoringScheduleConfig.MonitoringJobDefinitionName\", a.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinitionName, b.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinitionName)\n\t\t} else if a.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinitionName != nil && b.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinitionName != nil {\n\t\t\tif *a.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinitionName != *b.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinitionName {\n\t\t\t\tdelta.Add(\"Spec.MonitoringScheduleConfig.MonitoringJobDefinitionName\", a.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinitionName, b.ko.Spec.MonitoringScheduleConfig.MonitoringJobDefinitionName)\n\t\t\t}\n\t\t}\n\t\tif ackcompare.HasNilDifference(a.ko.Spec.MonitoringScheduleConfig.MonitoringType, b.ko.Spec.MonitoringScheduleConfig.MonitoringType) {\n\t\t\tdelta.Add(\"Spec.MonitoringScheduleConfig.MonitoringType\", a.ko.Spec.MonitoringScheduleConfig.MonitoringType, b.ko.Spec.MonitoringScheduleConfig.MonitoringType)\n\t\t} else if a.ko.Spec.MonitoringScheduleConfig.MonitoringType != nil && b.ko.Spec.MonitoringScheduleConfig.MonitoringType != nil {\n\t\t\tif *a.ko.Spec.MonitoringScheduleConfig.MonitoringType != *b.ko.Spec.MonitoringScheduleConfig.MonitoringType {\n\t\t\t\tdelta.Add(\"Spec.MonitoringScheduleConfig.MonitoringType\", a.ko.Spec.MonitoringScheduleConfig.MonitoringType, b.ko.Spec.MonitoringScheduleConfig.MonitoringType)\n\t\t\t}\n\t\t}\n\t\tif ackcompare.HasNilDifference(a.ko.Spec.MonitoringScheduleConfig.ScheduleConfig, b.ko.Spec.MonitoringScheduleConfig.ScheduleConfig) {\n\t\t\tdelta.Add(\"Spec.MonitoringScheduleConfig.ScheduleConfig\", a.ko.Spec.MonitoringScheduleConfig.ScheduleConfig, b.ko.Spec.MonitoringScheduleConfig.ScheduleConfig)\n\t\t} else if a.ko.Spec.MonitoringScheduleConfig.ScheduleConfig != nil && b.ko.Spec.MonitoringScheduleConfig.ScheduleConfig != nil {\n\t\t\tif ackcompare.HasNilDifference(a.ko.Spec.MonitoringScheduleConfig.ScheduleConfig.ScheduleExpression, b.ko.Spec.MonitoringScheduleConfig.ScheduleConfig.ScheduleExpression) {\n\t\t\t\tdelta.Add(\"Spec.MonitoringScheduleConfig.ScheduleConfig.ScheduleExpression\", a.ko.Spec.MonitoringScheduleConfig.ScheduleConfig.ScheduleExpression, b.ko.Spec.MonitoringScheduleConfig.ScheduleConfig.ScheduleExpression)\n\t\t\t} else if a.ko.Spec.MonitoringScheduleConfig.ScheduleConfig.ScheduleExpression != nil && b.ko.Spec.MonitoringScheduleConfig.ScheduleConfig.ScheduleExpression != nil {\n\t\t\t\tif *a.ko.Spec.MonitoringScheduleConfig.ScheduleConfig.ScheduleExpression != *b.ko.Spec.MonitoringScheduleConfig.ScheduleConfig.ScheduleExpression {\n\t\t\t\t\tdelta.Add(\"Spec.MonitoringScheduleConfig.ScheduleConfig.ScheduleExpression\", a.ko.Spec.MonitoringScheduleConfig.ScheduleConfig.ScheduleExpression, b.ko.Spec.MonitoringScheduleConfig.ScheduleConfig.ScheduleExpression)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\tif ackcompare.HasNilDifference(a.ko.Spec.MonitoringScheduleName, b.ko.Spec.MonitoringScheduleName) {\n\t\tdelta.Add(\"Spec.MonitoringScheduleName\", a.ko.Spec.MonitoringScheduleName, b.ko.Spec.MonitoringScheduleName)\n\t} else if a.ko.Spec.MonitoringScheduleName != nil && b.ko.Spec.MonitoringScheduleName != nil {\n\t\tif *a.ko.Spec.MonitoringScheduleName != *b.ko.Spec.MonitoringScheduleName {\n\t\t\tdelta.Add(\"Spec.MonitoringScheduleName\", a.ko.Spec.MonitoringScheduleName, b.ko.Spec.MonitoringScheduleName)\n\t\t}\n\t}\n\n\treturn delta\n}", "func newReportErr(metricsPublisher *metrics.Publisher) *reportErr {\n\treturn &reportErr{\n\t\tmetricsPublisher: metricsPublisher,\n\t}\n}", "func (pool *Workspaces_BigDataPool_Spec) ConvertSpecTo(destination genruntime.ConvertibleSpec) error {\n\tif destination == pool {\n\t\treturn errors.New(\"attempted conversion between unrelated implementations of github.com/Azure/azure-service-operator/v2/pkg/genruntime/ConvertibleSpec\")\n\t}\n\n\treturn destination.ConvertSpecFrom(pool)\n}", "func newReconciler(mgr manager.Manager) reconcile.Reconciler {\n\treturn &ReconcileDfJob{client: mgr.GetClient(), scheme: mgr.GetScheme()}\n}", "func BuildJobSpec(pod *podtemplatespec.Builder) *jobspec.Builder {\n\tjobSpecObj := jobspec.NewBuilder().\n\t\tWithPodTemplateSpecBuilder(pod)\n\t_, err := jobSpecObj.Build()\n\tif err != nil {\n\t\tlog.Errorln(err)\n\t}\n\treturn jobSpecObj\n}", "func New(client plugins.Client, cryptor txcrypto.Cryptor) (*AppchainMonitor, error) {\n\tmeta, err := client.GetOutMeta()\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"get out interchainCounter from broker contract :%w\", err)\n\t}\n\n\tctx, cancel := context.WithCancel(context.Background())\n\treturn &AppchainMonitor{\n\t\tclient: client,\n\t\tinterchainCounter: meta,\n\t\tcryptor: cryptor,\n\t\trecvCh: make(chan *pb.IBTP, 1024),\n\t\tctx: ctx,\n\t\tcancel: cancel,\n\t}, nil\n}", "func newReconciler(mgr manager.Manager) reconcile.Reconciler {\n\treturn &ReconcileApplicationMonitoring{\n\t\tclient: mgr.GetClient(),\n\t\tscheme: mgr.GetScheme(),\n\t\thelper: NewKubeHelper(),\n\t\textraParams: make(map[string]string),\n\t}\n}", "func initProvider() func() {\n\tctx := context.Background()\n\n\tres, err := resource.New(ctx,\n\t\tresource.WithFromEnv(),\n\t\tresource.WithProcess(),\n\t\tresource.WithTelemetrySDK(),\n\t\tresource.WithHost(),\n\t\tresource.WithAttributes(\n\t\t\t// the service name used to display traces in backends\n\t\t\tsemconv.ServiceNameKey.String(\"demo-client\"),\n\t\t),\n\t)\n\thandleErr(err, \"failed to create resource\")\n\n\totelAgentAddr, ok := os.LookupEnv(\"OTEL_EXPORTER_OTLP_ENDPOINT\")\n\tif !ok {\n\t\totelAgentAddr = \"0.0.0.0:4317\"\n\t}\n\n\tmetricExp, err := otlpmetricgrpc.New(\n\t\tctx,\n\t\totlpmetricgrpc.WithInsecure(),\n\t\totlpmetricgrpc.WithEndpoint(otelAgentAddr),\n\t)\n\thandleErr(err, \"Failed to create the collector metric exporter\")\n\n\tmeterProvider := sdkmetric.NewMeterProvider(\n\t\tsdkmetric.WithResource(res),\n\t\tsdkmetric.WithReader(\n\t\t\tsdkmetric.NewPeriodicReader(\n\t\t\t\tmetricExp,\n\t\t\t\tsdkmetric.WithInterval(2*time.Second),\n\t\t\t),\n\t\t),\n\t)\n\totel.SetMeterProvider(meterProvider)\n\n\ttraceClient := otlptracegrpc.NewClient(\n\t\totlptracegrpc.WithInsecure(),\n\t\totlptracegrpc.WithEndpoint(otelAgentAddr),\n\t\totlptracegrpc.WithDialOption(grpc.WithBlock()))\n\tsctx, cancel := context.WithTimeout(ctx, time.Second)\n\tdefer cancel()\n\ttraceExp, err := otlptrace.New(sctx, traceClient)\n\thandleErr(err, \"Failed to create the collector trace exporter\")\n\n\tbsp := sdktrace.NewBatchSpanProcessor(traceExp)\n\ttracerProvider := sdktrace.NewTracerProvider(\n\t\tsdktrace.WithSampler(sdktrace.AlwaysSample()),\n\t\tsdktrace.WithResource(res),\n\t\tsdktrace.WithSpanProcessor(bsp),\n\t)\n\n\t// set global propagator to tracecontext (the default is no-op).\n\totel.SetTextMapPropagator(propagation.NewCompositeTextMapPropagator(propagation.TraceContext{}, propagation.Baggage{}))\n\totel.SetTracerProvider(tracerProvider)\n\n\treturn func() {\n\t\tcxt, cancel := context.WithTimeout(ctx, time.Second)\n\t\tdefer cancel()\n\t\tif err := traceExp.Shutdown(cxt); err != nil {\n\t\t\totel.Handle(err)\n\t\t}\n\t\t// pushes any last exports to the receiver\n\t\tif err := meterProvider.Shutdown(cxt); err != nil {\n\t\t\totel.Handle(err)\n\t\t}\n\t}\n}", "func newReconciler(mgr manager.Manager) reconcile.Reconciler {\n\treturn &ReconcileJenkinsInstance{\n\t\tClient: mgr.GetClient(),\n\t\tEventRecorder: mgr.GetRecorder(\"JenkinsInstanceController\"),\n\t\tscheme: mgr.GetScheme(),\n\t}\n}", "func newProfile(ctx context.Context, cfg config.KubeSchedulerProfile, r frameworkruntime.Registry, recorderFact RecorderFactory,\n\topts ...frameworkruntime.Option) (framework.Framework, error) {\n\trecorder := recorderFact(cfg.SchedulerName)\n\topts = append(opts, frameworkruntime.WithEventRecorder(recorder))\n\treturn frameworkruntime.NewFramework(ctx, r, &cfg, opts...)\n}", "func New(workernum int) *disp {\n\treturn &disp{\n\t\t//Pipelines: make([]*worker2.Pipeline, pipelinenum),\n\t\tPipelineChan: make(worker2.PipelineChannel),\n\t\tPipelineQueue: make(worker2.PipelineQueue),\n\t\tWorkers: make([]*worker2.Worker, workernum),\n\t\tJobChan: make(worker2.JobChannel),\n\t\tQueue: make(worker2.JobQueue),\n\t}\n}", "func newReconciler(mgr manager.Manager) reconcile.Reconciler {\n\treturn &JenkinsReconciler{client: mgr.GetClient(), scheme: mgr.GetScheme()}\n}", "func NewStatusReportRequestWithoutParam() *StatusReportRequest {\n\n return &StatusReportRequest{\n JDCloudRequest: core.JDCloudRequest{\n URL: \"/regions/{regionId}/statusReport\",\n Method: \"POST\",\n Header: nil,\n Version: \"v1\",\n },\n }\n}", "func newReconciler(mgr manager.Manager) reconcile.Reconciler {\n\tlogger := log.WithName(\"newReconciler\")\n\tpodResourcesClient, err := podresourcesclient.NewPodResourcesClient()\n\tif err != nil {\n\t\tlogger.Error(err, \"unable to create podresources client\")\n\t\treturn nil\n\t}\n\treturn &ReconcilePod{client: mgr.GetClient(), scheme: mgr.GetScheme(), podResourcesClient: podResourcesClient}\n}", "func NewFromSpec(spec *rspec.Spec) Generator {\n\treturn Generator{\n\t\tspec: spec,\n\t}\n}", "func NewKeeperSpec(spec *job.KeeperSpec) *KeeperSpec {\n\treturn &KeeperSpec{\n\t\tContractAddress: spec.ContractAddress,\n\t\tFromAddress: spec.FromAddress,\n\t\tCreatedAt: spec.CreatedAt,\n\t\tUpdatedAt: spec.UpdatedAt,\n\t\tEVMChainID: spec.EVMChainID,\n\t}\n}", "func New(opts ...OptionFunc) (*WorkOracle, error) {\n\toptions := &option{\n\t\tmaxRetries: 10,\n\t\tretryDelay: time.Second,\n\t\tlogger: zap.NewNop(),\n\t}\n\n\tfor _, opt := range opts {\n\t\tif err := opt(options); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\tif err := options.validate(); err != nil {\n\t\treturn nil, err\n\t}\n\n\tscrypt := options.scrypter\n\tif scrypt == nil {\n\t\tscrypt = &LazyScrypter{init: func() (postrs.Scrypter, error) {\n\t\t\tif options.providerID == nil {\n\t\t\t\treturn nil, errors.New(\"no provider specified\")\n\t\t\t}\n\n\t\t\treturn postrs.NewScrypt(\n\t\t\t\tpostrs.WithProviderID(*options.providerID),\n\t\t\t\tpostrs.WithCommitment(options.commitment),\n\t\t\t\tpostrs.WithScryptN(options.n),\n\t\t\t\tpostrs.WithVRFDifficulty(options.vrfDifficulty),\n\t\t\t\tpostrs.WithLogger(options.logger),\n\t\t\t)\n\t\t}}\n\t}\n\n\treturn &WorkOracle{\n\t\toptions: options,\n\t\tscrypt: scrypt,\n\t}, nil\n}", "func newEchoServerPodSpec(podName string) *api.Pod {\n\tport := 8080\n\tpod := &api.Pod{\n\t\tObjectMeta: api.ObjectMeta{\n\t\t\tName: podName,\n\t\t},\n\t\tSpec: api.PodSpec{\n\t\t\tContainers: []api.Container{\n\t\t\t\t{\n\t\t\t\t\tName: \"echoserver\",\n\t\t\t\t\tImage: \"gcr.io/google_containers/echoserver:1.4\",\n\t\t\t\t\tPorts: []api.ContainerPort{{ContainerPort: int32(port)}},\n\t\t\t\t},\n\t\t\t},\n\t\t\tRestartPolicy: api.RestartPolicyNever,\n\t\t},\n\t}\n\treturn pod\n}", "func CreateRenderedSpec(app apptypes.AppType, sequence int64, kotsKinds *kotsutil.KotsKinds, opts types.TroubleshootOptions) (*troubleshootv1beta2.SupportBundle, error) {\n\tbuiltBundle := kotsKinds.SupportBundle.DeepCopy()\n\tif builtBundle == nil {\n\t\tbuiltBundle = &troubleshootv1beta2.SupportBundle{\n\t\t\tTypeMeta: v1.TypeMeta{\n\t\t\t\tKind: \"SupportBundle\",\n\t\t\t\tAPIVersion: \"troubleshoot.sh/v1beta2\",\n\t\t\t},\n\t\t\tObjectMeta: v1.ObjectMeta{\n\t\t\t\tName: \"default-supportbundle\",\n\t\t\t},\n\t\t}\n\n\t\tif kotsKinds.Collector != nil {\n\t\t\tbuiltBundle.Spec.Collectors = kotsKinds.Collector.DeepCopy().Spec.Collectors\n\t\t}\n\t\tif kotsKinds.Analyzer != nil {\n\t\t\tbuiltBundle.Spec.Analyzers = kotsKinds.Analyzer.DeepCopy().Spec.Analyzers\n\t\t}\n\t}\n\n\tclientset, err := k8sutil.GetClientset()\n\tif err != nil {\n\t\treturn nil, errors.Wrap(err, \"failed to get k8s clientset\")\n\t}\n\n\tnamespacesToCollect := []string{}\n\tnamespacesToAnalyze := []string{}\n\n\tisKurl, err := kurl.IsKurl(clientset)\n\tif err != nil {\n\t\treturn nil, errors.Wrap(err, \"failed to check if cluster is kurl\")\n\t}\n\n\tif !isKurl {\n\t\t// with cluster access, collect everything, but only analyze application namespaces\n\t\t// with minimal RBAC collect only application namespaces\n\t\tif k8sutil.IsKotsadmClusterScoped(context.TODO(), clientset, util.PodNamespace) {\n\t\t\tnamespacesToAnalyze = append(namespacesToAnalyze, util.PodNamespace)\n\t\t\tnamespacesToAnalyze = append(namespacesToAnalyze, kotsKinds.KotsApplication.Spec.AdditionalNamespaces...)\n\t\t\tveleroNamespace, err := kotssnapshot.DetectVeleroNamespace(context.TODO(), clientset, util.PodNamespace)\n\t\t\tif err != nil {\n\t\t\t\tlogger.Errorf(\"Failed to detect velero namespace for the support bundle: %v\", err)\n\t\t\t} else {\n\t\t\t\tnamespacesToAnalyze = append(namespacesToAnalyze, veleroNamespace)\n\t\t\t}\n\t\t} else {\n\t\t\tnamespacesToCollect = append(namespacesToCollect, util.PodNamespace)\n\t\t\tnamespacesToCollect = append(namespacesToCollect, kotsKinds.KotsApplication.Spec.AdditionalNamespaces...)\n\t\t}\n\t}\n\n\t// split the default kotsadm support bundle into multiple support bundles\n\tvendorSpec, err := createVendorSpec(builtBundle)\n\tif err != nil {\n\t\treturn nil, errors.Wrap(err, \"failed to create vendor support bundle spec\")\n\t}\n\n\tclusterSpec, err := createClusterSpecificSpec(app, builtBundle, clientset)\n\tif err != nil {\n\t\treturn nil, errors.Wrap(err, \"failed to create cluster specific support bundle spec\")\n\t}\n\n\tdefaultSpec, err := createDefaultSpec(app, builtBundle, opts, namespacesToCollect, namespacesToAnalyze, clientset)\n\tif err != nil {\n\t\treturn nil, errors.Wrap(err, \"failed to create defaults support bundle spec\")\n\t}\n\n\tbuiltBundles := map[string]*troubleshootv1beta2.SupportBundle{\n\t\tkotstypes.VendorSpecificSupportBundleSpecKey: vendorSpec, //vendors' application support-bundle spec\n\t\tkotstypes.ClusterSpecificSupportBundleSpecKey: clusterSpec, //cluster-specific support-bundle spec discovered from the cluster\n\t\tkotstypes.DefaultSupportBundleSpecKey: defaultSpec, //default support-bundle spec\n\t}\n\n\tfor key, builtBundle := range builtBundles {\n\t\tconfigMapName := GetSpecName(app.GetSlug()) + \"-\" + key\n\t\terr := createSupportBundleSpecConfigMap(app, sequence, kotsKinds, configMapName, builtBundle, opts, clientset)\n\t\tif err != nil {\n\t\t\treturn nil, errors.Wrap(err, \"failed to create support bundle configmap\")\n\t\t}\n\t}\n\n\tmergedBundle := mergeSupportBundleSpecs(builtBundles)\n\terr = createSupportBundleSpecSecret(app, sequence, kotsKinds, GetSpecName(app.GetSlug()), mergedBundle, opts, clientset)\n\tif err != nil {\n\t\treturn nil, errors.Wrap(err, \"failed to create support bundle configmap\")\n\t}\n\n\t// Include discovered support bundle specs and multiple kotsadm support bundle specs. Perform this action here so\n\t// as not to add discovered specs to the default support bundle spec secret.c\n\t// use cluster specific support bundle spec to add Spec.AfterCollection as default\n\treturn mergedBundle, nil\n}", "func newReceiver(config *Config, nextConsumer consumer.Traces, settings receiver.CreateSettings) (*zipkinReceiver, error) {\n\tif nextConsumer == nil {\n\t\treturn nil, component.ErrNilNextConsumer\n\t}\n\n\ttransports := []string{receiverTransportV1Thrift, receiverTransportV1JSON, receiverTransportV2JSON, receiverTransportV2PROTO}\n\tobsrecvrs := make(map[string]*obsreport.Receiver)\n\tfor _, transport := range transports {\n\t\tobsrecv, err := obsreport.NewReceiver(obsreport.ReceiverSettings{\n\t\t\tReceiverID: settings.ID,\n\t\t\tTransport: transport,\n\t\t\tReceiverCreateSettings: settings,\n\t\t})\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tobsrecvrs[transport] = obsrecv\n\t}\n\n\tzr := &zipkinReceiver{\n\t\tnextConsumer: nextConsumer,\n\t\tconfig: config,\n\t\tv1ThriftUnmarshaler: zipkinv1.NewThriftTracesUnmarshaler(),\n\t\tv1JSONUnmarshaler: zipkinv1.NewJSONTracesUnmarshaler(config.ParseStringTags),\n\t\tjsonUnmarshaler: zipkinv2.NewJSONTracesUnmarshaler(config.ParseStringTags),\n\t\tprotobufUnmarshaler: zipkinv2.NewProtobufTracesUnmarshaler(false, config.ParseStringTags),\n\t\tprotobufDebugUnmarshaler: zipkinv2.NewProtobufTracesUnmarshaler(true, config.ParseStringTags),\n\t\tsettings: settings,\n\t\tobsrecvrs: obsrecvrs,\n\t}\n\treturn zr, nil\n}", "func NewReport(startTime *time.Time, endTime *time.Time) (*Report, error) {\n\terr := DeleteReport(startTime, endTime)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn OpenReport(startTime, endTime)\n}", "func NewBusinessScenarioPlanner()(*BusinessScenarioPlanner) {\n m := &BusinessScenarioPlanner{\n Entity: *NewEntity(),\n }\n return m\n}", "func (suite *TestManagerSuite) TestManagerCreateWithExisting() {\n\terr := suite.m.UpdateStatus(\"tid001\", job.SuccessStatus.String(), 2000)\n\trequire.NoError(suite.T(), err)\n\n\trp := &scan.Report{\n\t\tDigest: \"d1000\",\n\t\tRegistrationUUID: \"ruuid\",\n\t\tMimeType: v1.MimeTypeNativeReport,\n\t\tTrackID: \"tid002\",\n\t}\n\n\tuuid, err := suite.m.Create(rp)\n\trequire.NoError(suite.T(), err)\n\trequire.NotEmpty(suite.T(), uuid)\n\n\tassert.NotEqual(suite.T(), suite.rpUUID, uuid)\n\tsuite.rpUUID = uuid\n}", "func NewKeeperSpec(spec *job.KeeperSpec) *KeeperSpec {\n\treturn &KeeperSpec{\n\t\tContractAddress: spec.ContractAddress,\n\t\tFromAddress: spec.FromAddress,\n\t\tCreatedAt: spec.CreatedAt,\n\t\tUpdatedAt: spec.UpdatedAt,\n\t}\n}", "func NewReport(author string) *Report {\n\tr := Report{}\n\tr.Date = time.Now().Format(\"20060102-150405\") // set to current time\n\tr.Author = author\n\tr.OS = runtime.GOOS\n\treturn &r\n}", "func (rm *resourceManager) newDescribeRequestPayload(\n\tr *resource,\n) (*svcsdk.DescribeModelBiasJobDefinitionInput, error) {\n\tres := &svcsdk.DescribeModelBiasJobDefinitionInput{}\n\n\tif r.ko.Spec.JobDefinitionName != nil {\n\t\tres.SetJobDefinitionName(*r.ko.Spec.JobDefinitionName)\n\t}\n\n\treturn res, nil\n}", "func newReconciler(mgr manager.Manager) reconcile.Reconciler {\n\n\tserverVersion, err := getServerVersion()\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\tlog.Info(fmt.Sprintf(\"Kubernetes Version: %s\", serverVersion))\n\n\treturn &ReconcileIBMBlockCSI{\n\t\tclient: mgr.GetClient(),\n\t\tscheme: mgr.GetScheme(),\n\t\trecorder: mgr.GetEventRecorderFor(\"controller_ibmblockcsi\"),\n\t\tserverVersion: serverVersion,\n\t}\n}", "func Convert_v1_ControlPlaneSpec_To_v2_ControlPlaneSpec(in *v1.ControlPlaneSpec, out *v2.ControlPlaneSpec, s conversion.Scope) (err error) {\n\tdefer func() {\n\t\tif err != nil {\n\t\t\tlogger.Error(err, fmt.Sprintf(\"unexpected error occurred during ServiceMeshControlPlane v1 to v2 conversion: %v\", err))\n\t\t\tif out.TechPreview == nil {\n\t\t\t\tout.TechPreview = v1.NewHelmValues(make(map[string]interface{}))\n\t\t\t}\n\t\t\tif err2 := out.TechPreview.SetField(TechPreviewErroredMessage, err.Error()); err2 != nil {\n\t\t\t\terr = err2\n\t\t\t\treturn\n\t\t\t}\n\t\t\tif len(in.Istio.GetContent()) > 0 {\n\t\t\t\tif err2 := out.TechPreview.SetField(TechPreviewErroredIstio, in.Istio.DeepCopy().GetContent()); err2 != nil {\n\t\t\t\t\terr = err2\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t}\n\t\t\tif len(in.ThreeScale.GetContent()) > 0 {\n\t\t\t\tif err2 := out.TechPreview.SetField(TechPreviewErrored3scale, in.ThreeScale.DeepCopy().GetContent()); err2 != nil {\n\t\t\t\t\terr = err2\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t}\n\t\t\t// erase anything that converted successfully\n\t\t\tout.Addons = nil\n\t\t\tout.Cluster = nil\n\t\t\tout.Gateways = nil\n\t\t\tout.General = nil\n\t\t\tout.Policy = nil\n\t\t\tout.Proxy = nil\n\t\t\tout.Runtime = nil\n\t\t\tout.Security = nil\n\t\t\tout.Telemetry = nil\n\t\t\tout.Tracing = nil\n\t\t}\n\t\terr = nil\n\t}()\n\n\tif err := autoConvert_v1_ControlPlaneSpec_To_v2_ControlPlaneSpec(in, out, s); err != nil {\n\t\treturn err\n\t}\n\n\t// we need to parse the version for some version-specific logic below\n\t// if it is not set (during an empty AppliedSpec conversion), we treat like DefaultVersion\n\t// this spares us headaches in a lot of our unit tests. Previously this wasn't an issue,\n\t// as we defaulted to v1.0 when no version was set.\n\teffectiveVersion := in.Version\n\tif effectiveVersion == \"\" {\n\t\teffectiveVersion = versions.DefaultVersion.String()\n\t}\n\tversion, versionErr := versions.ParseVersion(effectiveVersion)\n\tif versionErr != nil {\n\t\treturn versionErr\n\t}\n\n\t// legacy Template field\n\tif len(in.Profiles) == 0 && in.Template != \"\" {\n\t\tout.Profiles = []string{in.Template}\n\t}\n\n\t// copy to preserve input\n\tvalues := in.Istio.DeepCopy()\n\tif techPreview, ok, err := values.GetMap(\"techPreview\"); ok {\n\t\tif len(techPreview) > 0 {\n\t\t\tout.TechPreview = v1.NewHelmValues(techPreview).DeepCopy()\n\t\t}\n\t\tdelete(values.GetContent(), \"techPreview\")\n\t} else if err != nil {\n\t\treturn err\n\t}\n\n\tif err := v1ToV2Hacks(in, values); err != nil {\n\t\treturn err\n\t}\n\n\t// Cluster settings\n\tif err := populateClusterConfig(values, out); err != nil {\n\t\treturn err\n\t}\n\n\t// General\n\tif err := populateGeneralConfig(values, out); err != nil {\n\t\treturn err\n\t}\n\n\t// Policy - ensure policy runs before telemetry, as both may use mixer.adapters\n\t// policy won't remove these from values, but telemetry will\n\tif err := populatePolicyConfig(values, out, version); err != nil {\n\t\treturn err\n\t}\n\n\t// Proxy\n\tif err := populateProxyConfig(values, out); err != nil {\n\t\treturn err\n\t}\n\n\t// Security\n\tif err := populateSecurityConfig(values, out, version); err != nil {\n\t\treturn err\n\t}\n\n\t// Telemetry\n\tif err := populateTelemetryConfig(values, out, version); err != nil {\n\t\treturn err\n\t}\n\n\t// Tracing\n\tif err := populateTracingConfig(values, out); err != nil {\n\t\treturn err\n\t}\n\n\t// Gateways\n\tif err := populateGatewaysConfig(values, out); err != nil {\n\t\treturn err\n\t}\n\n\t// Addons\n\tif err := populateAddonsConfig(values, out); err != nil {\n\t\treturn err\n\t}\n\n\t// Runtime\n\tif err := populateControlPlaneRuntimeConfig(values, out); err != nil {\n\t\treturn err\n\t}\n\n\t// remove common mixer settings (used by both telemetry and policy)\n\tvalues.RemoveField(\"global.istioRemote\")\n\n\t// save anything that's left for proper round tripping\n\tif len(values.GetContent()) > 0 {\n\t\tif out.TechPreview == nil {\n\t\t\tout.TechPreview = v1.NewHelmValues(make(map[string]interface{}))\n\t\t}\n\t\tif err := overwriteHelmValues(out.TechPreview.GetContent(), values.GetContent()); err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif len(out.TechPreview.GetContent()) == 0 {\n\t\t\tout.TechPreview = nil\n\t\t}\n\t}\n\n\treturn nil\n}", "func New(c Components, o Options) Interface {\n\tif o.ProcessedQueueSize == 0 {\n\t\to.ProcessedQueueSize = 5000\n\t}\n\n\th := &component{\n\t\tComponents: c,\n\t\tPublicNetAddr: o.PublicNetAddr,\n\t\tPrivateNetAddr: o.PrivateNetAddr,\n\t\tPrivateNetAddrAnnounce: o.PrivateNetAddrAnnounce,\n\t\tProcessed: newPQueue(o.ProcessedQueueSize),\n\t}\n\n\t// TODO Make it configurable\n\th.Configuration.CFList = [5]uint32{867100000, 867300000, 867500000, 867700000, 867900000}\n\th.Configuration.NetID = [3]byte{14, 14, 14}\n\t//h.Configuration.RX1DROffset = 0\n\th.Configuration.Rx1DrOffset = 0\n\th.Configuration.RFChain = 0\n\th.Configuration.InvPolarity = true\n\n\tset := make(chan bundle)\n\tbundles := make(chan []bundle)\n\n\th.ChBundles = set\n\tgo h.consumeBundles(bundles)\n\tgo h.consumeSet(bundles, set)\n\n\treturn h\n}", "func newReconciler(mgr manager.Manager) reconcile.Reconciler {\n\treturn &ReconcileParameterStore{client: mgr.GetClient(), scheme: mgr.GetScheme()}\n}", "func InitFromSpec(spec string) string {\n\terr := Global.ActivateSpec(spec)\n\tif err != nil {\n\t\tfmt.Fprintf(os.Stderr, \"failed to activate logging spec: %s\", err)\n\t}\n\treturn DefaultLevel()\n}", "func newOtlpReceiver(cfg *Config, logger *zap.Logger) *otlpReceiver {\n\tr := &otlpReceiver{\n\t\tcfg: cfg,\n\t\tlogger: logger,\n\t}\n\tif cfg.HTTP != nil {\n\t\tr.httpMux = mux.NewRouter()\n\t}\n\n\treturn r\n}", "func newResourceDelta(\n\ta *resource,\n\tb *resource,\n) *ackcompare.Delta {\n\tdelta := ackcompare.NewDelta()\n\tif (a == nil && b != nil) ||\n\t\t(a != nil && b == nil) {\n\t\tdelta.Add(\"\", a, b)\n\t\treturn delta\n\t}\n\tcustomSetDefaults(a, b)\n\n\tif ackcompare.HasNilDifference(a.ko.Spec.HyperParameterTuningJobConfig, b.ko.Spec.HyperParameterTuningJobConfig) {\n\t\tdelta.Add(\"Spec.HyperParameterTuningJobConfig\", a.ko.Spec.HyperParameterTuningJobConfig, b.ko.Spec.HyperParameterTuningJobConfig)\n\t} else if a.ko.Spec.HyperParameterTuningJobConfig != nil && b.ko.Spec.HyperParameterTuningJobConfig != nil {\n\t\tif ackcompare.HasNilDifference(a.ko.Spec.HyperParameterTuningJobConfig.HyperParameterTuningJobObjective, b.ko.Spec.HyperParameterTuningJobConfig.HyperParameterTuningJobObjective) {\n\t\t\tdelta.Add(\"Spec.HyperParameterTuningJobConfig.HyperParameterTuningJobObjective\", a.ko.Spec.HyperParameterTuningJobConfig.HyperParameterTuningJobObjective, b.ko.Spec.HyperParameterTuningJobConfig.HyperParameterTuningJobObjective)\n\t\t} else if a.ko.Spec.HyperParameterTuningJobConfig.HyperParameterTuningJobObjective != nil && b.ko.Spec.HyperParameterTuningJobConfig.HyperParameterTuningJobObjective != nil {\n\t\t\tif ackcompare.HasNilDifference(a.ko.Spec.HyperParameterTuningJobConfig.HyperParameterTuningJobObjective.MetricName, b.ko.Spec.HyperParameterTuningJobConfig.HyperParameterTuningJobObjective.MetricName) {\n\t\t\t\tdelta.Add(\"Spec.HyperParameterTuningJobConfig.HyperParameterTuningJobObjective.MetricName\", a.ko.Spec.HyperParameterTuningJobConfig.HyperParameterTuningJobObjective.MetricName, b.ko.Spec.HyperParameterTuningJobConfig.HyperParameterTuningJobObjective.MetricName)\n\t\t\t} else if a.ko.Spec.HyperParameterTuningJobConfig.HyperParameterTuningJobObjective.MetricName != nil && b.ko.Spec.HyperParameterTuningJobConfig.HyperParameterTuningJobObjective.MetricName != nil {\n\t\t\t\tif *a.ko.Spec.HyperParameterTuningJobConfig.HyperParameterTuningJobObjective.MetricName != *b.ko.Spec.HyperParameterTuningJobConfig.HyperParameterTuningJobObjective.MetricName {\n\t\t\t\t\tdelta.Add(\"Spec.HyperParameterTuningJobConfig.HyperParameterTuningJobObjective.MetricName\", a.ko.Spec.HyperParameterTuningJobConfig.HyperParameterTuningJobObjective.MetricName, b.ko.Spec.HyperParameterTuningJobConfig.HyperParameterTuningJobObjective.MetricName)\n\t\t\t\t}\n\t\t\t}\n\t\t\tif ackcompare.HasNilDifference(a.ko.Spec.HyperParameterTuningJobConfig.HyperParameterTuningJobObjective.Type, b.ko.Spec.HyperParameterTuningJobConfig.HyperParameterTuningJobObjective.Type) {\n\t\t\t\tdelta.Add(\"Spec.HyperParameterTuningJobConfig.HyperParameterTuningJobObjective.Type\", a.ko.Spec.HyperParameterTuningJobConfig.HyperParameterTuningJobObjective.Type, b.ko.Spec.HyperParameterTuningJobConfig.HyperParameterTuningJobObjective.Type)\n\t\t\t} else if a.ko.Spec.HyperParameterTuningJobConfig.HyperParameterTuningJobObjective.Type != nil && b.ko.Spec.HyperParameterTuningJobConfig.HyperParameterTuningJobObjective.Type != nil {\n\t\t\t\tif *a.ko.Spec.HyperParameterTuningJobConfig.HyperParameterTuningJobObjective.Type != *b.ko.Spec.HyperParameterTuningJobConfig.HyperParameterTuningJobObjective.Type {\n\t\t\t\t\tdelta.Add(\"Spec.HyperParameterTuningJobConfig.HyperParameterTuningJobObjective.Type\", a.ko.Spec.HyperParameterTuningJobConfig.HyperParameterTuningJobObjective.Type, b.ko.Spec.HyperParameterTuningJobConfig.HyperParameterTuningJobObjective.Type)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\tif ackcompare.HasNilDifference(a.ko.Spec.HyperParameterTuningJobConfig.ParameterRanges, b.ko.Spec.HyperParameterTuningJobConfig.ParameterRanges) {\n\t\t\tdelta.Add(\"Spec.HyperParameterTuningJobConfig.ParameterRanges\", a.ko.Spec.HyperParameterTuningJobConfig.ParameterRanges, b.ko.Spec.HyperParameterTuningJobConfig.ParameterRanges)\n\t\t} else if a.ko.Spec.HyperParameterTuningJobConfig.ParameterRanges != nil && b.ko.Spec.HyperParameterTuningJobConfig.ParameterRanges != nil {\n\t\t\tif !reflect.DeepEqual(a.ko.Spec.HyperParameterTuningJobConfig.ParameterRanges.CategoricalParameterRanges, b.ko.Spec.HyperParameterTuningJobConfig.ParameterRanges.CategoricalParameterRanges) {\n\t\t\t\tdelta.Add(\"Spec.HyperParameterTuningJobConfig.ParameterRanges.CategoricalParameterRanges\", a.ko.Spec.HyperParameterTuningJobConfig.ParameterRanges.CategoricalParameterRanges, b.ko.Spec.HyperParameterTuningJobConfig.ParameterRanges.CategoricalParameterRanges)\n\t\t\t}\n\t\t\tif !reflect.DeepEqual(a.ko.Spec.HyperParameterTuningJobConfig.ParameterRanges.ContinuousParameterRanges, b.ko.Spec.HyperParameterTuningJobConfig.ParameterRanges.ContinuousParameterRanges) {\n\t\t\t\tdelta.Add(\"Spec.HyperParameterTuningJobConfig.ParameterRanges.ContinuousParameterRanges\", a.ko.Spec.HyperParameterTuningJobConfig.ParameterRanges.ContinuousParameterRanges, b.ko.Spec.HyperParameterTuningJobConfig.ParameterRanges.ContinuousParameterRanges)\n\t\t\t}\n\t\t\tif !reflect.DeepEqual(a.ko.Spec.HyperParameterTuningJobConfig.ParameterRanges.IntegerParameterRanges, b.ko.Spec.HyperParameterTuningJobConfig.ParameterRanges.IntegerParameterRanges) {\n\t\t\t\tdelta.Add(\"Spec.HyperParameterTuningJobConfig.ParameterRanges.IntegerParameterRanges\", a.ko.Spec.HyperParameterTuningJobConfig.ParameterRanges.IntegerParameterRanges, b.ko.Spec.HyperParameterTuningJobConfig.ParameterRanges.IntegerParameterRanges)\n\t\t\t}\n\t\t}\n\t\tif ackcompare.HasNilDifference(a.ko.Spec.HyperParameterTuningJobConfig.ResourceLimits, b.ko.Spec.HyperParameterTuningJobConfig.ResourceLimits) {\n\t\t\tdelta.Add(\"Spec.HyperParameterTuningJobConfig.ResourceLimits\", a.ko.Spec.HyperParameterTuningJobConfig.ResourceLimits, b.ko.Spec.HyperParameterTuningJobConfig.ResourceLimits)\n\t\t} else if a.ko.Spec.HyperParameterTuningJobConfig.ResourceLimits != nil && b.ko.Spec.HyperParameterTuningJobConfig.ResourceLimits != nil {\n\t\t\tif ackcompare.HasNilDifference(a.ko.Spec.HyperParameterTuningJobConfig.ResourceLimits.MaxNumberOfTrainingJobs, b.ko.Spec.HyperParameterTuningJobConfig.ResourceLimits.MaxNumberOfTrainingJobs) {\n\t\t\t\tdelta.Add(\"Spec.HyperParameterTuningJobConfig.ResourceLimits.MaxNumberOfTrainingJobs\", a.ko.Spec.HyperParameterTuningJobConfig.ResourceLimits.MaxNumberOfTrainingJobs, b.ko.Spec.HyperParameterTuningJobConfig.ResourceLimits.MaxNumberOfTrainingJobs)\n\t\t\t} else if a.ko.Spec.HyperParameterTuningJobConfig.ResourceLimits.MaxNumberOfTrainingJobs != nil && b.ko.Spec.HyperParameterTuningJobConfig.ResourceLimits.MaxNumberOfTrainingJobs != nil {\n\t\t\t\tif *a.ko.Spec.HyperParameterTuningJobConfig.ResourceLimits.MaxNumberOfTrainingJobs != *b.ko.Spec.HyperParameterTuningJobConfig.ResourceLimits.MaxNumberOfTrainingJobs {\n\t\t\t\t\tdelta.Add(\"Spec.HyperParameterTuningJobConfig.ResourceLimits.MaxNumberOfTrainingJobs\", a.ko.Spec.HyperParameterTuningJobConfig.ResourceLimits.MaxNumberOfTrainingJobs, b.ko.Spec.HyperParameterTuningJobConfig.ResourceLimits.MaxNumberOfTrainingJobs)\n\t\t\t\t}\n\t\t\t}\n\t\t\tif ackcompare.HasNilDifference(a.ko.Spec.HyperParameterTuningJobConfig.ResourceLimits.MaxParallelTrainingJobs, b.ko.Spec.HyperParameterTuningJobConfig.ResourceLimits.MaxParallelTrainingJobs) {\n\t\t\t\tdelta.Add(\"Spec.HyperParameterTuningJobConfig.ResourceLimits.MaxParallelTrainingJobs\", a.ko.Spec.HyperParameterTuningJobConfig.ResourceLimits.MaxParallelTrainingJobs, b.ko.Spec.HyperParameterTuningJobConfig.ResourceLimits.MaxParallelTrainingJobs)\n\t\t\t} else if a.ko.Spec.HyperParameterTuningJobConfig.ResourceLimits.MaxParallelTrainingJobs != nil && b.ko.Spec.HyperParameterTuningJobConfig.ResourceLimits.MaxParallelTrainingJobs != nil {\n\t\t\t\tif *a.ko.Spec.HyperParameterTuningJobConfig.ResourceLimits.MaxParallelTrainingJobs != *b.ko.Spec.HyperParameterTuningJobConfig.ResourceLimits.MaxParallelTrainingJobs {\n\t\t\t\t\tdelta.Add(\"Spec.HyperParameterTuningJobConfig.ResourceLimits.MaxParallelTrainingJobs\", a.ko.Spec.HyperParameterTuningJobConfig.ResourceLimits.MaxParallelTrainingJobs, b.ko.Spec.HyperParameterTuningJobConfig.ResourceLimits.MaxParallelTrainingJobs)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\tif ackcompare.HasNilDifference(a.ko.Spec.HyperParameterTuningJobConfig.Strategy, b.ko.Spec.HyperParameterTuningJobConfig.Strategy) {\n\t\t\tdelta.Add(\"Spec.HyperParameterTuningJobConfig.Strategy\", a.ko.Spec.HyperParameterTuningJobConfig.Strategy, b.ko.Spec.HyperParameterTuningJobConfig.Strategy)\n\t\t} else if a.ko.Spec.HyperParameterTuningJobConfig.Strategy != nil && b.ko.Spec.HyperParameterTuningJobConfig.Strategy != nil {\n\t\t\tif *a.ko.Spec.HyperParameterTuningJobConfig.Strategy != *b.ko.Spec.HyperParameterTuningJobConfig.Strategy {\n\t\t\t\tdelta.Add(\"Spec.HyperParameterTuningJobConfig.Strategy\", a.ko.Spec.HyperParameterTuningJobConfig.Strategy, b.ko.Spec.HyperParameterTuningJobConfig.Strategy)\n\t\t\t}\n\t\t}\n\t\tif ackcompare.HasNilDifference(a.ko.Spec.HyperParameterTuningJobConfig.TrainingJobEarlyStoppingType, b.ko.Spec.HyperParameterTuningJobConfig.TrainingJobEarlyStoppingType) {\n\t\t\tdelta.Add(\"Spec.HyperParameterTuningJobConfig.TrainingJobEarlyStoppingType\", a.ko.Spec.HyperParameterTuningJobConfig.TrainingJobEarlyStoppingType, b.ko.Spec.HyperParameterTuningJobConfig.TrainingJobEarlyStoppingType)\n\t\t} else if a.ko.Spec.HyperParameterTuningJobConfig.TrainingJobEarlyStoppingType != nil && b.ko.Spec.HyperParameterTuningJobConfig.TrainingJobEarlyStoppingType != nil {\n\t\t\tif *a.ko.Spec.HyperParameterTuningJobConfig.TrainingJobEarlyStoppingType != *b.ko.Spec.HyperParameterTuningJobConfig.TrainingJobEarlyStoppingType {\n\t\t\t\tdelta.Add(\"Spec.HyperParameterTuningJobConfig.TrainingJobEarlyStoppingType\", a.ko.Spec.HyperParameterTuningJobConfig.TrainingJobEarlyStoppingType, b.ko.Spec.HyperParameterTuningJobConfig.TrainingJobEarlyStoppingType)\n\t\t\t}\n\t\t}\n\t\tif ackcompare.HasNilDifference(a.ko.Spec.HyperParameterTuningJobConfig.TuningJobCompletionCriteria, b.ko.Spec.HyperParameterTuningJobConfig.TuningJobCompletionCriteria) {\n\t\t\tdelta.Add(\"Spec.HyperParameterTuningJobConfig.TuningJobCompletionCriteria\", a.ko.Spec.HyperParameterTuningJobConfig.TuningJobCompletionCriteria, b.ko.Spec.HyperParameterTuningJobConfig.TuningJobCompletionCriteria)\n\t\t} else if a.ko.Spec.HyperParameterTuningJobConfig.TuningJobCompletionCriteria != nil && b.ko.Spec.HyperParameterTuningJobConfig.TuningJobCompletionCriteria != nil {\n\t\t\tif ackcompare.HasNilDifference(a.ko.Spec.HyperParameterTuningJobConfig.TuningJobCompletionCriteria.TargetObjectiveMetricValue, b.ko.Spec.HyperParameterTuningJobConfig.TuningJobCompletionCriteria.TargetObjectiveMetricValue) {\n\t\t\t\tdelta.Add(\"Spec.HyperParameterTuningJobConfig.TuningJobCompletionCriteria.TargetObjectiveMetricValue\", a.ko.Spec.HyperParameterTuningJobConfig.TuningJobCompletionCriteria.TargetObjectiveMetricValue, b.ko.Spec.HyperParameterTuningJobConfig.TuningJobCompletionCriteria.TargetObjectiveMetricValue)\n\t\t\t} else if a.ko.Spec.HyperParameterTuningJobConfig.TuningJobCompletionCriteria.TargetObjectiveMetricValue != nil && b.ko.Spec.HyperParameterTuningJobConfig.TuningJobCompletionCriteria.TargetObjectiveMetricValue != nil {\n\t\t\t\tif *a.ko.Spec.HyperParameterTuningJobConfig.TuningJobCompletionCriteria.TargetObjectiveMetricValue != *b.ko.Spec.HyperParameterTuningJobConfig.TuningJobCompletionCriteria.TargetObjectiveMetricValue {\n\t\t\t\t\tdelta.Add(\"Spec.HyperParameterTuningJobConfig.TuningJobCompletionCriteria.TargetObjectiveMetricValue\", a.ko.Spec.HyperParameterTuningJobConfig.TuningJobCompletionCriteria.TargetObjectiveMetricValue, b.ko.Spec.HyperParameterTuningJobConfig.TuningJobCompletionCriteria.TargetObjectiveMetricValue)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\tif ackcompare.HasNilDifference(a.ko.Spec.HyperParameterTuningJobName, b.ko.Spec.HyperParameterTuningJobName) {\n\t\tdelta.Add(\"Spec.HyperParameterTuningJobName\", a.ko.Spec.HyperParameterTuningJobName, b.ko.Spec.HyperParameterTuningJobName)\n\t} else if a.ko.Spec.HyperParameterTuningJobName != nil && b.ko.Spec.HyperParameterTuningJobName != nil {\n\t\tif *a.ko.Spec.HyperParameterTuningJobName != *b.ko.Spec.HyperParameterTuningJobName {\n\t\t\tdelta.Add(\"Spec.HyperParameterTuningJobName\", a.ko.Spec.HyperParameterTuningJobName, b.ko.Spec.HyperParameterTuningJobName)\n\t\t}\n\t}\n\tif ackcompare.HasNilDifference(a.ko.Spec.TrainingJobDefinition, b.ko.Spec.TrainingJobDefinition) {\n\t\tdelta.Add(\"Spec.TrainingJobDefinition\", a.ko.Spec.TrainingJobDefinition, b.ko.Spec.TrainingJobDefinition)\n\t} else if a.ko.Spec.TrainingJobDefinition != nil && b.ko.Spec.TrainingJobDefinition != nil {\n\t\tif ackcompare.HasNilDifference(a.ko.Spec.TrainingJobDefinition.AlgorithmSpecification, b.ko.Spec.TrainingJobDefinition.AlgorithmSpecification) {\n\t\t\tdelta.Add(\"Spec.TrainingJobDefinition.AlgorithmSpecification\", a.ko.Spec.TrainingJobDefinition.AlgorithmSpecification, b.ko.Spec.TrainingJobDefinition.AlgorithmSpecification)\n\t\t} else if a.ko.Spec.TrainingJobDefinition.AlgorithmSpecification != nil && b.ko.Spec.TrainingJobDefinition.AlgorithmSpecification != nil {\n\t\t\tif ackcompare.HasNilDifference(a.ko.Spec.TrainingJobDefinition.AlgorithmSpecification.AlgorithmName, b.ko.Spec.TrainingJobDefinition.AlgorithmSpecification.AlgorithmName) {\n\t\t\t\tdelta.Add(\"Spec.TrainingJobDefinition.AlgorithmSpecification.AlgorithmName\", a.ko.Spec.TrainingJobDefinition.AlgorithmSpecification.AlgorithmName, b.ko.Spec.TrainingJobDefinition.AlgorithmSpecification.AlgorithmName)\n\t\t\t} else if a.ko.Spec.TrainingJobDefinition.AlgorithmSpecification.AlgorithmName != nil && b.ko.Spec.TrainingJobDefinition.AlgorithmSpecification.AlgorithmName != nil {\n\t\t\t\tif *a.ko.Spec.TrainingJobDefinition.AlgorithmSpecification.AlgorithmName != *b.ko.Spec.TrainingJobDefinition.AlgorithmSpecification.AlgorithmName {\n\t\t\t\t\tdelta.Add(\"Spec.TrainingJobDefinition.AlgorithmSpecification.AlgorithmName\", a.ko.Spec.TrainingJobDefinition.AlgorithmSpecification.AlgorithmName, b.ko.Spec.TrainingJobDefinition.AlgorithmSpecification.AlgorithmName)\n\t\t\t\t}\n\t\t\t}\n\t\t\tif !reflect.DeepEqual(a.ko.Spec.TrainingJobDefinition.AlgorithmSpecification.MetricDefinitions, b.ko.Spec.TrainingJobDefinition.AlgorithmSpecification.MetricDefinitions) {\n\t\t\t\tdelta.Add(\"Spec.TrainingJobDefinition.AlgorithmSpecification.MetricDefinitions\", a.ko.Spec.TrainingJobDefinition.AlgorithmSpecification.MetricDefinitions, b.ko.Spec.TrainingJobDefinition.AlgorithmSpecification.MetricDefinitions)\n\t\t\t}\n\t\t\tif ackcompare.HasNilDifference(a.ko.Spec.TrainingJobDefinition.AlgorithmSpecification.TrainingImage, b.ko.Spec.TrainingJobDefinition.AlgorithmSpecification.TrainingImage) {\n\t\t\t\tdelta.Add(\"Spec.TrainingJobDefinition.AlgorithmSpecification.TrainingImage\", a.ko.Spec.TrainingJobDefinition.AlgorithmSpecification.TrainingImage, b.ko.Spec.TrainingJobDefinition.AlgorithmSpecification.TrainingImage)\n\t\t\t} else if a.ko.Spec.TrainingJobDefinition.AlgorithmSpecification.TrainingImage != nil && b.ko.Spec.TrainingJobDefinition.AlgorithmSpecification.TrainingImage != nil {\n\t\t\t\tif *a.ko.Spec.TrainingJobDefinition.AlgorithmSpecification.TrainingImage != *b.ko.Spec.TrainingJobDefinition.AlgorithmSpecification.TrainingImage {\n\t\t\t\t\tdelta.Add(\"Spec.TrainingJobDefinition.AlgorithmSpecification.TrainingImage\", a.ko.Spec.TrainingJobDefinition.AlgorithmSpecification.TrainingImage, b.ko.Spec.TrainingJobDefinition.AlgorithmSpecification.TrainingImage)\n\t\t\t\t}\n\t\t\t}\n\t\t\tif ackcompare.HasNilDifference(a.ko.Spec.TrainingJobDefinition.AlgorithmSpecification.TrainingInputMode, b.ko.Spec.TrainingJobDefinition.AlgorithmSpecification.TrainingInputMode) {\n\t\t\t\tdelta.Add(\"Spec.TrainingJobDefinition.AlgorithmSpecification.TrainingInputMode\", a.ko.Spec.TrainingJobDefinition.AlgorithmSpecification.TrainingInputMode, b.ko.Spec.TrainingJobDefinition.AlgorithmSpecification.TrainingInputMode)\n\t\t\t} else if a.ko.Spec.TrainingJobDefinition.AlgorithmSpecification.TrainingInputMode != nil && b.ko.Spec.TrainingJobDefinition.AlgorithmSpecification.TrainingInputMode != nil {\n\t\t\t\tif *a.ko.Spec.TrainingJobDefinition.AlgorithmSpecification.TrainingInputMode != *b.ko.Spec.TrainingJobDefinition.AlgorithmSpecification.TrainingInputMode {\n\t\t\t\t\tdelta.Add(\"Spec.TrainingJobDefinition.AlgorithmSpecification.TrainingInputMode\", a.ko.Spec.TrainingJobDefinition.AlgorithmSpecification.TrainingInputMode, b.ko.Spec.TrainingJobDefinition.AlgorithmSpecification.TrainingInputMode)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\tif ackcompare.HasNilDifference(a.ko.Spec.TrainingJobDefinition.CheckpointConfig, b.ko.Spec.TrainingJobDefinition.CheckpointConfig) {\n\t\t\tdelta.Add(\"Spec.TrainingJobDefinition.CheckpointConfig\", a.ko.Spec.TrainingJobDefinition.CheckpointConfig, b.ko.Spec.TrainingJobDefinition.CheckpointConfig)\n\t\t} else if a.ko.Spec.TrainingJobDefinition.CheckpointConfig != nil && b.ko.Spec.TrainingJobDefinition.CheckpointConfig != nil {\n\t\t\tif ackcompare.HasNilDifference(a.ko.Spec.TrainingJobDefinition.CheckpointConfig.LocalPath, b.ko.Spec.TrainingJobDefinition.CheckpointConfig.LocalPath) {\n\t\t\t\tdelta.Add(\"Spec.TrainingJobDefinition.CheckpointConfig.LocalPath\", a.ko.Spec.TrainingJobDefinition.CheckpointConfig.LocalPath, b.ko.Spec.TrainingJobDefinition.CheckpointConfig.LocalPath)\n\t\t\t} else if a.ko.Spec.TrainingJobDefinition.CheckpointConfig.LocalPath != nil && b.ko.Spec.TrainingJobDefinition.CheckpointConfig.LocalPath != nil {\n\t\t\t\tif *a.ko.Spec.TrainingJobDefinition.CheckpointConfig.LocalPath != *b.ko.Spec.TrainingJobDefinition.CheckpointConfig.LocalPath {\n\t\t\t\t\tdelta.Add(\"Spec.TrainingJobDefinition.CheckpointConfig.LocalPath\", a.ko.Spec.TrainingJobDefinition.CheckpointConfig.LocalPath, b.ko.Spec.TrainingJobDefinition.CheckpointConfig.LocalPath)\n\t\t\t\t}\n\t\t\t}\n\t\t\tif ackcompare.HasNilDifference(a.ko.Spec.TrainingJobDefinition.CheckpointConfig.S3URI, b.ko.Spec.TrainingJobDefinition.CheckpointConfig.S3URI) {\n\t\t\t\tdelta.Add(\"Spec.TrainingJobDefinition.CheckpointConfig.S3URI\", a.ko.Spec.TrainingJobDefinition.CheckpointConfig.S3URI, b.ko.Spec.TrainingJobDefinition.CheckpointConfig.S3URI)\n\t\t\t} else if a.ko.Spec.TrainingJobDefinition.CheckpointConfig.S3URI != nil && b.ko.Spec.TrainingJobDefinition.CheckpointConfig.S3URI != nil {\n\t\t\t\tif *a.ko.Spec.TrainingJobDefinition.CheckpointConfig.S3URI != *b.ko.Spec.TrainingJobDefinition.CheckpointConfig.S3URI {\n\t\t\t\t\tdelta.Add(\"Spec.TrainingJobDefinition.CheckpointConfig.S3URI\", a.ko.Spec.TrainingJobDefinition.CheckpointConfig.S3URI, b.ko.Spec.TrainingJobDefinition.CheckpointConfig.S3URI)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\tif ackcompare.HasNilDifference(a.ko.Spec.TrainingJobDefinition.DefinitionName, b.ko.Spec.TrainingJobDefinition.DefinitionName) {\n\t\t\tdelta.Add(\"Spec.TrainingJobDefinition.DefinitionName\", a.ko.Spec.TrainingJobDefinition.DefinitionName, b.ko.Spec.TrainingJobDefinition.DefinitionName)\n\t\t} else if a.ko.Spec.TrainingJobDefinition.DefinitionName != nil && b.ko.Spec.TrainingJobDefinition.DefinitionName != nil {\n\t\t\tif *a.ko.Spec.TrainingJobDefinition.DefinitionName != *b.ko.Spec.TrainingJobDefinition.DefinitionName {\n\t\t\t\tdelta.Add(\"Spec.TrainingJobDefinition.DefinitionName\", a.ko.Spec.TrainingJobDefinition.DefinitionName, b.ko.Spec.TrainingJobDefinition.DefinitionName)\n\t\t\t}\n\t\t}\n\t\tif ackcompare.HasNilDifference(a.ko.Spec.TrainingJobDefinition.EnableInterContainerTrafficEncryption, b.ko.Spec.TrainingJobDefinition.EnableInterContainerTrafficEncryption) {\n\t\t\tdelta.Add(\"Spec.TrainingJobDefinition.EnableInterContainerTrafficEncryption\", a.ko.Spec.TrainingJobDefinition.EnableInterContainerTrafficEncryption, b.ko.Spec.TrainingJobDefinition.EnableInterContainerTrafficEncryption)\n\t\t} else if a.ko.Spec.TrainingJobDefinition.EnableInterContainerTrafficEncryption != nil && b.ko.Spec.TrainingJobDefinition.EnableInterContainerTrafficEncryption != nil {\n\t\t\tif *a.ko.Spec.TrainingJobDefinition.EnableInterContainerTrafficEncryption != *b.ko.Spec.TrainingJobDefinition.EnableInterContainerTrafficEncryption {\n\t\t\t\tdelta.Add(\"Spec.TrainingJobDefinition.EnableInterContainerTrafficEncryption\", a.ko.Spec.TrainingJobDefinition.EnableInterContainerTrafficEncryption, b.ko.Spec.TrainingJobDefinition.EnableInterContainerTrafficEncryption)\n\t\t\t}\n\t\t}\n\t\tif ackcompare.HasNilDifference(a.ko.Spec.TrainingJobDefinition.EnableManagedSpotTraining, b.ko.Spec.TrainingJobDefinition.EnableManagedSpotTraining) {\n\t\t\tdelta.Add(\"Spec.TrainingJobDefinition.EnableManagedSpotTraining\", a.ko.Spec.TrainingJobDefinition.EnableManagedSpotTraining, b.ko.Spec.TrainingJobDefinition.EnableManagedSpotTraining)\n\t\t} else if a.ko.Spec.TrainingJobDefinition.EnableManagedSpotTraining != nil && b.ko.Spec.TrainingJobDefinition.EnableManagedSpotTraining != nil {\n\t\t\tif *a.ko.Spec.TrainingJobDefinition.EnableManagedSpotTraining != *b.ko.Spec.TrainingJobDefinition.EnableManagedSpotTraining {\n\t\t\t\tdelta.Add(\"Spec.TrainingJobDefinition.EnableManagedSpotTraining\", a.ko.Spec.TrainingJobDefinition.EnableManagedSpotTraining, b.ko.Spec.TrainingJobDefinition.EnableManagedSpotTraining)\n\t\t\t}\n\t\t}\n\t\tif ackcompare.HasNilDifference(a.ko.Spec.TrainingJobDefinition.EnableNetworkIsolation, b.ko.Spec.TrainingJobDefinition.EnableNetworkIsolation) {\n\t\t\tdelta.Add(\"Spec.TrainingJobDefinition.EnableNetworkIsolation\", a.ko.Spec.TrainingJobDefinition.EnableNetworkIsolation, b.ko.Spec.TrainingJobDefinition.EnableNetworkIsolation)\n\t\t} else if a.ko.Spec.TrainingJobDefinition.EnableNetworkIsolation != nil && b.ko.Spec.TrainingJobDefinition.EnableNetworkIsolation != nil {\n\t\t\tif *a.ko.Spec.TrainingJobDefinition.EnableNetworkIsolation != *b.ko.Spec.TrainingJobDefinition.EnableNetworkIsolation {\n\t\t\t\tdelta.Add(\"Spec.TrainingJobDefinition.EnableNetworkIsolation\", a.ko.Spec.TrainingJobDefinition.EnableNetworkIsolation, b.ko.Spec.TrainingJobDefinition.EnableNetworkIsolation)\n\t\t\t}\n\t\t}\n\t\tif ackcompare.HasNilDifference(a.ko.Spec.TrainingJobDefinition.HyperParameterRanges, b.ko.Spec.TrainingJobDefinition.HyperParameterRanges) {\n\t\t\tdelta.Add(\"Spec.TrainingJobDefinition.HyperParameterRanges\", a.ko.Spec.TrainingJobDefinition.HyperParameterRanges, b.ko.Spec.TrainingJobDefinition.HyperParameterRanges)\n\t\t} else if a.ko.Spec.TrainingJobDefinition.HyperParameterRanges != nil && b.ko.Spec.TrainingJobDefinition.HyperParameterRanges != nil {\n\t\t\tif !reflect.DeepEqual(a.ko.Spec.TrainingJobDefinition.HyperParameterRanges.CategoricalParameterRanges, b.ko.Spec.TrainingJobDefinition.HyperParameterRanges.CategoricalParameterRanges) {\n\t\t\t\tdelta.Add(\"Spec.TrainingJobDefinition.HyperParameterRanges.CategoricalParameterRanges\", a.ko.Spec.TrainingJobDefinition.HyperParameterRanges.CategoricalParameterRanges, b.ko.Spec.TrainingJobDefinition.HyperParameterRanges.CategoricalParameterRanges)\n\t\t\t}\n\t\t\tif !reflect.DeepEqual(a.ko.Spec.TrainingJobDefinition.HyperParameterRanges.ContinuousParameterRanges, b.ko.Spec.TrainingJobDefinition.HyperParameterRanges.ContinuousParameterRanges) {\n\t\t\t\tdelta.Add(\"Spec.TrainingJobDefinition.HyperParameterRanges.ContinuousParameterRanges\", a.ko.Spec.TrainingJobDefinition.HyperParameterRanges.ContinuousParameterRanges, b.ko.Spec.TrainingJobDefinition.HyperParameterRanges.ContinuousParameterRanges)\n\t\t\t}\n\t\t\tif !reflect.DeepEqual(a.ko.Spec.TrainingJobDefinition.HyperParameterRanges.IntegerParameterRanges, b.ko.Spec.TrainingJobDefinition.HyperParameterRanges.IntegerParameterRanges) {\n\t\t\t\tdelta.Add(\"Spec.TrainingJobDefinition.HyperParameterRanges.IntegerParameterRanges\", a.ko.Spec.TrainingJobDefinition.HyperParameterRanges.IntegerParameterRanges, b.ko.Spec.TrainingJobDefinition.HyperParameterRanges.IntegerParameterRanges)\n\t\t\t}\n\t\t}\n\t\tif !reflect.DeepEqual(a.ko.Spec.TrainingJobDefinition.InputDataConfig, b.ko.Spec.TrainingJobDefinition.InputDataConfig) {\n\t\t\tdelta.Add(\"Spec.TrainingJobDefinition.InputDataConfig\", a.ko.Spec.TrainingJobDefinition.InputDataConfig, b.ko.Spec.TrainingJobDefinition.InputDataConfig)\n\t\t}\n\t\tif ackcompare.HasNilDifference(a.ko.Spec.TrainingJobDefinition.OutputDataConfig, b.ko.Spec.TrainingJobDefinition.OutputDataConfig) {\n\t\t\tdelta.Add(\"Spec.TrainingJobDefinition.OutputDataConfig\", a.ko.Spec.TrainingJobDefinition.OutputDataConfig, b.ko.Spec.TrainingJobDefinition.OutputDataConfig)\n\t\t} else if a.ko.Spec.TrainingJobDefinition.OutputDataConfig != nil && b.ko.Spec.TrainingJobDefinition.OutputDataConfig != nil {\n\t\t\tif ackcompare.HasNilDifference(a.ko.Spec.TrainingJobDefinition.OutputDataConfig.KMSKeyID, b.ko.Spec.TrainingJobDefinition.OutputDataConfig.KMSKeyID) {\n\t\t\t\tdelta.Add(\"Spec.TrainingJobDefinition.OutputDataConfig.KMSKeyID\", a.ko.Spec.TrainingJobDefinition.OutputDataConfig.KMSKeyID, b.ko.Spec.TrainingJobDefinition.OutputDataConfig.KMSKeyID)\n\t\t\t} else if a.ko.Spec.TrainingJobDefinition.OutputDataConfig.KMSKeyID != nil && b.ko.Spec.TrainingJobDefinition.OutputDataConfig.KMSKeyID != nil {\n\t\t\t\tif *a.ko.Spec.TrainingJobDefinition.OutputDataConfig.KMSKeyID != *b.ko.Spec.TrainingJobDefinition.OutputDataConfig.KMSKeyID {\n\t\t\t\t\tdelta.Add(\"Spec.TrainingJobDefinition.OutputDataConfig.KMSKeyID\", a.ko.Spec.TrainingJobDefinition.OutputDataConfig.KMSKeyID, b.ko.Spec.TrainingJobDefinition.OutputDataConfig.KMSKeyID)\n\t\t\t\t}\n\t\t\t}\n\t\t\tif ackcompare.HasNilDifference(a.ko.Spec.TrainingJobDefinition.OutputDataConfig.S3OutputPath, b.ko.Spec.TrainingJobDefinition.OutputDataConfig.S3OutputPath) {\n\t\t\t\tdelta.Add(\"Spec.TrainingJobDefinition.OutputDataConfig.S3OutputPath\", a.ko.Spec.TrainingJobDefinition.OutputDataConfig.S3OutputPath, b.ko.Spec.TrainingJobDefinition.OutputDataConfig.S3OutputPath)\n\t\t\t} else if a.ko.Spec.TrainingJobDefinition.OutputDataConfig.S3OutputPath != nil && b.ko.Spec.TrainingJobDefinition.OutputDataConfig.S3OutputPath != nil {\n\t\t\t\tif *a.ko.Spec.TrainingJobDefinition.OutputDataConfig.S3OutputPath != *b.ko.Spec.TrainingJobDefinition.OutputDataConfig.S3OutputPath {\n\t\t\t\t\tdelta.Add(\"Spec.TrainingJobDefinition.OutputDataConfig.S3OutputPath\", a.ko.Spec.TrainingJobDefinition.OutputDataConfig.S3OutputPath, b.ko.Spec.TrainingJobDefinition.OutputDataConfig.S3OutputPath)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\tif ackcompare.HasNilDifference(a.ko.Spec.TrainingJobDefinition.ResourceConfig, b.ko.Spec.TrainingJobDefinition.ResourceConfig) {\n\t\t\tdelta.Add(\"Spec.TrainingJobDefinition.ResourceConfig\", a.ko.Spec.TrainingJobDefinition.ResourceConfig, b.ko.Spec.TrainingJobDefinition.ResourceConfig)\n\t\t} else if a.ko.Spec.TrainingJobDefinition.ResourceConfig != nil && b.ko.Spec.TrainingJobDefinition.ResourceConfig != nil {\n\t\t\tif ackcompare.HasNilDifference(a.ko.Spec.TrainingJobDefinition.ResourceConfig.InstanceCount, b.ko.Spec.TrainingJobDefinition.ResourceConfig.InstanceCount) {\n\t\t\t\tdelta.Add(\"Spec.TrainingJobDefinition.ResourceConfig.InstanceCount\", a.ko.Spec.TrainingJobDefinition.ResourceConfig.InstanceCount, b.ko.Spec.TrainingJobDefinition.ResourceConfig.InstanceCount)\n\t\t\t} else if a.ko.Spec.TrainingJobDefinition.ResourceConfig.InstanceCount != nil && b.ko.Spec.TrainingJobDefinition.ResourceConfig.InstanceCount != nil {\n\t\t\t\tif *a.ko.Spec.TrainingJobDefinition.ResourceConfig.InstanceCount != *b.ko.Spec.TrainingJobDefinition.ResourceConfig.InstanceCount {\n\t\t\t\t\tdelta.Add(\"Spec.TrainingJobDefinition.ResourceConfig.InstanceCount\", a.ko.Spec.TrainingJobDefinition.ResourceConfig.InstanceCount, b.ko.Spec.TrainingJobDefinition.ResourceConfig.InstanceCount)\n\t\t\t\t}\n\t\t\t}\n\t\t\tif ackcompare.HasNilDifference(a.ko.Spec.TrainingJobDefinition.ResourceConfig.InstanceType, b.ko.Spec.TrainingJobDefinition.ResourceConfig.InstanceType) {\n\t\t\t\tdelta.Add(\"Spec.TrainingJobDefinition.ResourceConfig.InstanceType\", a.ko.Spec.TrainingJobDefinition.ResourceConfig.InstanceType, b.ko.Spec.TrainingJobDefinition.ResourceConfig.InstanceType)\n\t\t\t} else if a.ko.Spec.TrainingJobDefinition.ResourceConfig.InstanceType != nil && b.ko.Spec.TrainingJobDefinition.ResourceConfig.InstanceType != nil {\n\t\t\t\tif *a.ko.Spec.TrainingJobDefinition.ResourceConfig.InstanceType != *b.ko.Spec.TrainingJobDefinition.ResourceConfig.InstanceType {\n\t\t\t\t\tdelta.Add(\"Spec.TrainingJobDefinition.ResourceConfig.InstanceType\", a.ko.Spec.TrainingJobDefinition.ResourceConfig.InstanceType, b.ko.Spec.TrainingJobDefinition.ResourceConfig.InstanceType)\n\t\t\t\t}\n\t\t\t}\n\t\t\tif ackcompare.HasNilDifference(a.ko.Spec.TrainingJobDefinition.ResourceConfig.VolumeKMSKeyID, b.ko.Spec.TrainingJobDefinition.ResourceConfig.VolumeKMSKeyID) {\n\t\t\t\tdelta.Add(\"Spec.TrainingJobDefinition.ResourceConfig.VolumeKMSKeyID\", a.ko.Spec.TrainingJobDefinition.ResourceConfig.VolumeKMSKeyID, b.ko.Spec.TrainingJobDefinition.ResourceConfig.VolumeKMSKeyID)\n\t\t\t} else if a.ko.Spec.TrainingJobDefinition.ResourceConfig.VolumeKMSKeyID != nil && b.ko.Spec.TrainingJobDefinition.ResourceConfig.VolumeKMSKeyID != nil {\n\t\t\t\tif *a.ko.Spec.TrainingJobDefinition.ResourceConfig.VolumeKMSKeyID != *b.ko.Spec.TrainingJobDefinition.ResourceConfig.VolumeKMSKeyID {\n\t\t\t\t\tdelta.Add(\"Spec.TrainingJobDefinition.ResourceConfig.VolumeKMSKeyID\", a.ko.Spec.TrainingJobDefinition.ResourceConfig.VolumeKMSKeyID, b.ko.Spec.TrainingJobDefinition.ResourceConfig.VolumeKMSKeyID)\n\t\t\t\t}\n\t\t\t}\n\t\t\tif ackcompare.HasNilDifference(a.ko.Spec.TrainingJobDefinition.ResourceConfig.VolumeSizeInGB, b.ko.Spec.TrainingJobDefinition.ResourceConfig.VolumeSizeInGB) {\n\t\t\t\tdelta.Add(\"Spec.TrainingJobDefinition.ResourceConfig.VolumeSizeInGB\", a.ko.Spec.TrainingJobDefinition.ResourceConfig.VolumeSizeInGB, b.ko.Spec.TrainingJobDefinition.ResourceConfig.VolumeSizeInGB)\n\t\t\t} else if a.ko.Spec.TrainingJobDefinition.ResourceConfig.VolumeSizeInGB != nil && b.ko.Spec.TrainingJobDefinition.ResourceConfig.VolumeSizeInGB != nil {\n\t\t\t\tif *a.ko.Spec.TrainingJobDefinition.ResourceConfig.VolumeSizeInGB != *b.ko.Spec.TrainingJobDefinition.ResourceConfig.VolumeSizeInGB {\n\t\t\t\t\tdelta.Add(\"Spec.TrainingJobDefinition.ResourceConfig.VolumeSizeInGB\", a.ko.Spec.TrainingJobDefinition.ResourceConfig.VolumeSizeInGB, b.ko.Spec.TrainingJobDefinition.ResourceConfig.VolumeSizeInGB)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\tif ackcompare.HasNilDifference(a.ko.Spec.TrainingJobDefinition.RoleARN, b.ko.Spec.TrainingJobDefinition.RoleARN) {\n\t\t\tdelta.Add(\"Spec.TrainingJobDefinition.RoleARN\", a.ko.Spec.TrainingJobDefinition.RoleARN, b.ko.Spec.TrainingJobDefinition.RoleARN)\n\t\t} else if a.ko.Spec.TrainingJobDefinition.RoleARN != nil && b.ko.Spec.TrainingJobDefinition.RoleARN != nil {\n\t\t\tif *a.ko.Spec.TrainingJobDefinition.RoleARN != *b.ko.Spec.TrainingJobDefinition.RoleARN {\n\t\t\t\tdelta.Add(\"Spec.TrainingJobDefinition.RoleARN\", a.ko.Spec.TrainingJobDefinition.RoleARN, b.ko.Spec.TrainingJobDefinition.RoleARN)\n\t\t\t}\n\t\t}\n\t\tif ackcompare.HasNilDifference(a.ko.Spec.TrainingJobDefinition.StaticHyperParameters, b.ko.Spec.TrainingJobDefinition.StaticHyperParameters) {\n\t\t\tdelta.Add(\"Spec.TrainingJobDefinition.StaticHyperParameters\", a.ko.Spec.TrainingJobDefinition.StaticHyperParameters, b.ko.Spec.TrainingJobDefinition.StaticHyperParameters)\n\t\t} else if a.ko.Spec.TrainingJobDefinition.StaticHyperParameters != nil && b.ko.Spec.TrainingJobDefinition.StaticHyperParameters != nil {\n\t\t\tif !ackcompare.MapStringStringPEqual(a.ko.Spec.TrainingJobDefinition.StaticHyperParameters, b.ko.Spec.TrainingJobDefinition.StaticHyperParameters) {\n\t\t\t\tdelta.Add(\"Spec.TrainingJobDefinition.StaticHyperParameters\", a.ko.Spec.TrainingJobDefinition.StaticHyperParameters, b.ko.Spec.TrainingJobDefinition.StaticHyperParameters)\n\t\t\t}\n\t\t}\n\t\tif ackcompare.HasNilDifference(a.ko.Spec.TrainingJobDefinition.StoppingCondition, b.ko.Spec.TrainingJobDefinition.StoppingCondition) {\n\t\t\tdelta.Add(\"Spec.TrainingJobDefinition.StoppingCondition\", a.ko.Spec.TrainingJobDefinition.StoppingCondition, b.ko.Spec.TrainingJobDefinition.StoppingCondition)\n\t\t} else if a.ko.Spec.TrainingJobDefinition.StoppingCondition != nil && b.ko.Spec.TrainingJobDefinition.StoppingCondition != nil {\n\t\t\tif ackcompare.HasNilDifference(a.ko.Spec.TrainingJobDefinition.StoppingCondition.MaxRuntimeInSeconds, b.ko.Spec.TrainingJobDefinition.StoppingCondition.MaxRuntimeInSeconds) {\n\t\t\t\tdelta.Add(\"Spec.TrainingJobDefinition.StoppingCondition.MaxRuntimeInSeconds\", a.ko.Spec.TrainingJobDefinition.StoppingCondition.MaxRuntimeInSeconds, b.ko.Spec.TrainingJobDefinition.StoppingCondition.MaxRuntimeInSeconds)\n\t\t\t} else if a.ko.Spec.TrainingJobDefinition.StoppingCondition.MaxRuntimeInSeconds != nil && b.ko.Spec.TrainingJobDefinition.StoppingCondition.MaxRuntimeInSeconds != nil {\n\t\t\t\tif *a.ko.Spec.TrainingJobDefinition.StoppingCondition.MaxRuntimeInSeconds != *b.ko.Spec.TrainingJobDefinition.StoppingCondition.MaxRuntimeInSeconds {\n\t\t\t\t\tdelta.Add(\"Spec.TrainingJobDefinition.StoppingCondition.MaxRuntimeInSeconds\", a.ko.Spec.TrainingJobDefinition.StoppingCondition.MaxRuntimeInSeconds, b.ko.Spec.TrainingJobDefinition.StoppingCondition.MaxRuntimeInSeconds)\n\t\t\t\t}\n\t\t\t}\n\t\t\tif ackcompare.HasNilDifference(a.ko.Spec.TrainingJobDefinition.StoppingCondition.MaxWaitTimeInSeconds, b.ko.Spec.TrainingJobDefinition.StoppingCondition.MaxWaitTimeInSeconds) {\n\t\t\t\tdelta.Add(\"Spec.TrainingJobDefinition.StoppingCondition.MaxWaitTimeInSeconds\", a.ko.Spec.TrainingJobDefinition.StoppingCondition.MaxWaitTimeInSeconds, b.ko.Spec.TrainingJobDefinition.StoppingCondition.MaxWaitTimeInSeconds)\n\t\t\t} else if a.ko.Spec.TrainingJobDefinition.StoppingCondition.MaxWaitTimeInSeconds != nil && b.ko.Spec.TrainingJobDefinition.StoppingCondition.MaxWaitTimeInSeconds != nil {\n\t\t\t\tif *a.ko.Spec.TrainingJobDefinition.StoppingCondition.MaxWaitTimeInSeconds != *b.ko.Spec.TrainingJobDefinition.StoppingCondition.MaxWaitTimeInSeconds {\n\t\t\t\t\tdelta.Add(\"Spec.TrainingJobDefinition.StoppingCondition.MaxWaitTimeInSeconds\", a.ko.Spec.TrainingJobDefinition.StoppingCondition.MaxWaitTimeInSeconds, b.ko.Spec.TrainingJobDefinition.StoppingCondition.MaxWaitTimeInSeconds)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\tif ackcompare.HasNilDifference(a.ko.Spec.TrainingJobDefinition.TuningObjective, b.ko.Spec.TrainingJobDefinition.TuningObjective) {\n\t\t\tdelta.Add(\"Spec.TrainingJobDefinition.TuningObjective\", a.ko.Spec.TrainingJobDefinition.TuningObjective, b.ko.Spec.TrainingJobDefinition.TuningObjective)\n\t\t} else if a.ko.Spec.TrainingJobDefinition.TuningObjective != nil && b.ko.Spec.TrainingJobDefinition.TuningObjective != nil {\n\t\t\tif ackcompare.HasNilDifference(a.ko.Spec.TrainingJobDefinition.TuningObjective.MetricName, b.ko.Spec.TrainingJobDefinition.TuningObjective.MetricName) {\n\t\t\t\tdelta.Add(\"Spec.TrainingJobDefinition.TuningObjective.MetricName\", a.ko.Spec.TrainingJobDefinition.TuningObjective.MetricName, b.ko.Spec.TrainingJobDefinition.TuningObjective.MetricName)\n\t\t\t} else if a.ko.Spec.TrainingJobDefinition.TuningObjective.MetricName != nil && b.ko.Spec.TrainingJobDefinition.TuningObjective.MetricName != nil {\n\t\t\t\tif *a.ko.Spec.TrainingJobDefinition.TuningObjective.MetricName != *b.ko.Spec.TrainingJobDefinition.TuningObjective.MetricName {\n\t\t\t\t\tdelta.Add(\"Spec.TrainingJobDefinition.TuningObjective.MetricName\", a.ko.Spec.TrainingJobDefinition.TuningObjective.MetricName, b.ko.Spec.TrainingJobDefinition.TuningObjective.MetricName)\n\t\t\t\t}\n\t\t\t}\n\t\t\tif ackcompare.HasNilDifference(a.ko.Spec.TrainingJobDefinition.TuningObjective.Type, b.ko.Spec.TrainingJobDefinition.TuningObjective.Type) {\n\t\t\t\tdelta.Add(\"Spec.TrainingJobDefinition.TuningObjective.Type\", a.ko.Spec.TrainingJobDefinition.TuningObjective.Type, b.ko.Spec.TrainingJobDefinition.TuningObjective.Type)\n\t\t\t} else if a.ko.Spec.TrainingJobDefinition.TuningObjective.Type != nil && b.ko.Spec.TrainingJobDefinition.TuningObjective.Type != nil {\n\t\t\t\tif *a.ko.Spec.TrainingJobDefinition.TuningObjective.Type != *b.ko.Spec.TrainingJobDefinition.TuningObjective.Type {\n\t\t\t\t\tdelta.Add(\"Spec.TrainingJobDefinition.TuningObjective.Type\", a.ko.Spec.TrainingJobDefinition.TuningObjective.Type, b.ko.Spec.TrainingJobDefinition.TuningObjective.Type)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\tif ackcompare.HasNilDifference(a.ko.Spec.TrainingJobDefinition.VPCConfig, b.ko.Spec.TrainingJobDefinition.VPCConfig) {\n\t\t\tdelta.Add(\"Spec.TrainingJobDefinition.VPCConfig\", a.ko.Spec.TrainingJobDefinition.VPCConfig, b.ko.Spec.TrainingJobDefinition.VPCConfig)\n\t\t} else if a.ko.Spec.TrainingJobDefinition.VPCConfig != nil && b.ko.Spec.TrainingJobDefinition.VPCConfig != nil {\n\t\t\tif !ackcompare.SliceStringPEqual(a.ko.Spec.TrainingJobDefinition.VPCConfig.SecurityGroupIDs, b.ko.Spec.TrainingJobDefinition.VPCConfig.SecurityGroupIDs) {\n\t\t\t\tdelta.Add(\"Spec.TrainingJobDefinition.VPCConfig.SecurityGroupIDs\", a.ko.Spec.TrainingJobDefinition.VPCConfig.SecurityGroupIDs, b.ko.Spec.TrainingJobDefinition.VPCConfig.SecurityGroupIDs)\n\t\t\t}\n\t\t\tif !ackcompare.SliceStringPEqual(a.ko.Spec.TrainingJobDefinition.VPCConfig.Subnets, b.ko.Spec.TrainingJobDefinition.VPCConfig.Subnets) {\n\t\t\t\tdelta.Add(\"Spec.TrainingJobDefinition.VPCConfig.Subnets\", a.ko.Spec.TrainingJobDefinition.VPCConfig.Subnets, b.ko.Spec.TrainingJobDefinition.VPCConfig.Subnets)\n\t\t\t}\n\t\t}\n\t}\n\tif !reflect.DeepEqual(a.ko.Spec.TrainingJobDefinitions, b.ko.Spec.TrainingJobDefinitions) {\n\t\tdelta.Add(\"Spec.TrainingJobDefinitions\", a.ko.Spec.TrainingJobDefinitions, b.ko.Spec.TrainingJobDefinitions)\n\t}\n\tif ackcompare.HasNilDifference(a.ko.Spec.WarmStartConfig, b.ko.Spec.WarmStartConfig) {\n\t\tdelta.Add(\"Spec.WarmStartConfig\", a.ko.Spec.WarmStartConfig, b.ko.Spec.WarmStartConfig)\n\t} else if a.ko.Spec.WarmStartConfig != nil && b.ko.Spec.WarmStartConfig != nil {\n\t\tif !reflect.DeepEqual(a.ko.Spec.WarmStartConfig.ParentHyperParameterTuningJobs, b.ko.Spec.WarmStartConfig.ParentHyperParameterTuningJobs) {\n\t\t\tdelta.Add(\"Spec.WarmStartConfig.ParentHyperParameterTuningJobs\", a.ko.Spec.WarmStartConfig.ParentHyperParameterTuningJobs, b.ko.Spec.WarmStartConfig.ParentHyperParameterTuningJobs)\n\t\t}\n\t\tif ackcompare.HasNilDifference(a.ko.Spec.WarmStartConfig.WarmStartType, b.ko.Spec.WarmStartConfig.WarmStartType) {\n\t\t\tdelta.Add(\"Spec.WarmStartConfig.WarmStartType\", a.ko.Spec.WarmStartConfig.WarmStartType, b.ko.Spec.WarmStartConfig.WarmStartType)\n\t\t} else if a.ko.Spec.WarmStartConfig.WarmStartType != nil && b.ko.Spec.WarmStartConfig.WarmStartType != nil {\n\t\t\tif *a.ko.Spec.WarmStartConfig.WarmStartType != *b.ko.Spec.WarmStartConfig.WarmStartType {\n\t\t\t\tdelta.Add(\"Spec.WarmStartConfig.WarmStartType\", a.ko.Spec.WarmStartConfig.WarmStartType, b.ko.Spec.WarmStartConfig.WarmStartType)\n\t\t\t}\n\t\t}\n\t}\n\n\treturn delta\n}", "func New(config *types.Config, logger *logrus.Logger) (*Converter, error) {\n\n\t// Load the OpenAPI spec:\n\tspec, err := openapi2proto.LoadFile(config.SpecPath)\n\tif err != nil {\n\t\treturn nil, errors.Wrapf(err, \"Unable to load spec (%s)\", config.SpecPath)\n\t}\n\n\t// Make sure the provided spec is really OpenAPI 2.x:\n\tif !strings.HasPrefix(spec.Swagger, \"2\") {\n\t\treturn nil, fmt.Errorf(\"This spec (%s) is not OpenAPI 2.x\", spec.Swagger)\n\t}\n\n\tlogger.WithField(\"title\", spec.Info.Title).WithField(\"version\", spec.Info.Version).Info(\"Ready to convert Swagger / OpenAPI2\")\n\tlogger.WithField(\"description\", spec.Info.Description).Trace(\"Description\")\n\n\t// Return a new *Converter:\n\treturn &Converter{\n\t\tspec: spec,\n\t\tconfig: config,\n\t\tlogger: logger,\n\t\tnestedAdditionalProperties: make(map[string]json.RawMessage),\n\t}, nil\n}", "func (r *ReconcileDescheduler) createJob(descheduler *deschedulerv1alpha1.Descheduler) (*batch.Job, error) {\n\tactiveDeadline := int64(100)\n\tlog.Printf(\"Creating descheduler job\")\n\tjob := &batch.Job{\n\t\tTypeMeta: metav1.TypeMeta{\n\t\t\tKind: \"Job\",\n\t\t\tAPIVersion: batch.SchemeGroupVersion.String(),\n\t\t},\n\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\tName: descheduler.Name,\n\t\t\tNamespace: descheduler.Namespace,\n\t\t},\n\t\tSpec: batch.JobSpec{\n\t\t\tActiveDeadlineSeconds: &activeDeadline,\n\t\t\tTemplate: v1.PodTemplateSpec{\n\t\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\t\tName: \"descheduler-job-spec\",\n\t\t\t\t},\n\t\t\t\tSpec: v1.PodSpec{\n\t\t\t\t\tVolumes: []v1.Volume{{\n\t\t\t\t\t\tName: \"policy-volume\",\n\t\t\t\t\t\tVolumeSource: v1.VolumeSource{\n\t\t\t\t\t\t\tConfigMap: &v1.ConfigMapVolumeSource{\n\t\t\t\t\t\t\t\tLocalObjectReference: v1.LocalObjectReference{\n\t\t\t\t\t\t\t\t\tName: descheduler.Name,\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\tRestartPolicy: \"Never\",\n\t\t\t\t\tContainers: []v1.Container{{\n\t\t\t\t\t\tName: \"openshift-descheduler\",\n\t\t\t\t\t\tImage: \"registry.svc.ci.openshift.org/openshift/origin-v4.0:descheduler\", // TODO: Make this configurable too.\n\t\t\t\t\t\tPorts: []v1.ContainerPort{{ContainerPort: 80}},\n\t\t\t\t\t\tResources: v1.ResourceRequirements{\n\t\t\t\t\t\t\tLimits: v1.ResourceList{\n\t\t\t\t\t\t\t\tv1.ResourceCPU: resource.MustParse(\"100m\"),\n\t\t\t\t\t\t\t\tv1.ResourceMemory: resource.MustParse(\"500Mi\"),\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\tRequests: v1.ResourceList{\n\t\t\t\t\t\t\t\tv1.ResourceCPU: resource.MustParse(\"100m\"),\n\t\t\t\t\t\t\t\tv1.ResourceMemory: resource.MustParse(\"500Mi\"),\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tCommand: []string{\"/bin/descheduler\", \"--policy-config-file\", \"/policy-dir/policy.yaml\"},\n\t\t\t\t\t\tVolumeMounts: []v1.VolumeMount{{\n\t\t\t\t\t\t\tMountPath: \"/policy-dir\",\n\t\t\t\t\t\t\tName: \"policy-volume\",\n\t\t\t\t\t\t}},\n\t\t\t\t\t}},\n\t\t\t\t\tServiceAccountName: \"openshift-descheduler\", // TODO: This is hardcoded as of now, find a way to reference it from rbac.yaml.\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n\terr := controllerutil.SetControllerReference(descheduler, job, r.scheme)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"Error setting owner references %v\", err)\n\t}\n\treturn job, nil\n}", "func newOOMv2Poller(publisher shim.Publisher) (oomPoller, error) {\n\treturn &watcherV2{\n\t\titemCh: make(chan itemV2),\n\t\tpublisher: publisher,\n\t}, nil\n}", "func newReconciler(mgr manager.Manager) reconcile.Reconciler {\n\tsubscriberMap := make(map[string]appv1alpha1.Subscriber)\n\treturn &ReconcileSubscription{client: mgr.GetClient(), scheme: mgr.GetScheme(), subscriberMap: subscriberMap}\n}", "func New(id string) *Spec {\n\treturn &Spec{ID: id, Target: make(map[string]string)}\n}", "func (a *API) CreateOutlierReport(cfg *OutlierReport) (*OutlierReport, error) {\n\tif cfg == nil {\n\t\treturn nil, fmt.Errorf(\"Invalid outlier report config [nil]\")\n\t}\n\n\tjsonCfg, err := json.Marshal(cfg)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif a.Debug {\n\t\ta.Log.Printf(\"[DEBUG] create outlier report, sending JSON: %s\", string(jsonCfg))\n\t}\n\n\tresult, err := a.Post(config.OutlierReportPrefix, jsonCfg)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treport := &OutlierReport{}\n\tif err := json.Unmarshal(result, report); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn report, nil\n}", "func newAddrBook(cfg config.SwarmConfig, path string, logger log.Log) *addrBook {\n\t//TODO use config for const params.\n\tam := addrBook{\n\t\tlogger: logger,\n\t\tpath: path,\n\t\tpeersFileName: cfg.PeersFile,\n\t\trand: rand.New(rand.NewSource(time.Now().UnixNano())),\n\t\tquit: make(chan struct{}),\n\t}\n\tam.reset()\n\treturn &am\n}", "func newReconciler(mgr manager.Manager) reconcile.Reconciler {\n\tolmClientset, err := olmclient.NewForConfig(mgr.GetConfig())\n\tif err != nil {\n\t\tklog.Error(\"Initialize the OLM client failed: \", err)\n\t\treturn nil\n\t}\n\treturn &ReconcileOperandRequest{\n\t\tclient: mgr.GetClient(),\n\t\trecorder: mgr.GetEventRecorderFor(\"OperandRequest\"),\n\t\tscheme: mgr.GetScheme(),\n\t\tolmClient: olmClientset}\n}", "func (server *FlexibleServer_Spec) ConvertSpecTo(destination genruntime.ConvertibleSpec) error {\n\tif destination == server {\n\t\treturn errors.New(\"attempted conversion between unrelated implementations of github.com/Azure/azure-service-operator/v2/pkg/genruntime/ConvertibleSpec\")\n\t}\n\n\treturn destination.ConvertSpecFrom(server)\n}", "func NewAUDIT(config configuration.CONFIGURATION) *AUDIT_IMPL {\r\n client := new(AUDIT_IMPL)\r\n client.config = config\r\n return client\r\n}", "func testSpec1() *v1.EnvironmentSpec {\n\treturn &v1.EnvironmentSpec{\n\t\tInfra: v1.InfraSpec{\n\t\t\tAZ: v1.AZSpec{\n\t\t\t\tSubscription: []v1.AZSubscription{\n\t\t\t\t\t{Name: \"dummy\", ID: \"12345\"},\n\t\t\t\t},\n\t\t\t},\n\t\t\tSource: v1.SourceSpec{\n\t\t\t\tType: \"local\",\n\t\t\t\tURL: \"../e2e/testdata/terraform\", // relative to dir containing this _test.go file.\n\t\t\t},\n\t\t\tMain: \"main.tf\",\n\t\t},\n\t\tDefaults: v1.ClusterSpec{\n\t\t\tInfra: v1.ClusterInfraSpec{\n\t\t\t\tX: map[string]string{\n\t\t\t\t\t\"overridden\": \"default\",\n\t\t\t\t\t\"notOverridden\": \"default\",\n\t\t\t\t},\n\t\t\t},\n\t\t\tAddons: v1.ClusterAddonSpec{\n\t\t\t\tSource: v1.SourceSpec{\n\t\t\t\t\tType: \"local\",\n\t\t\t\t\tURL: \"../e2e/testdata/addons\", // relative to dir containing this _test.go file.\n\t\t\t\t},\n\t\t\t\tJobs: []string{\n\t\t\t\t\t\"cluster/local/minikube/all.yaml\",\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\tClusters: []v1.ClusterSpec{\n\t\t\t{\n\t\t\t\tName: \"cpe\",\n\t\t\t\tInfra: v1.ClusterInfraSpec{\n\t\t\t\t\tX: map[string]string{\n\t\t\t\t\t\t\"overridden\": \"cpe-cluster\",\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t}, {\n\t\t\t\tName: \"second\",\n\t\t\t\tInfra: v1.ClusterInfraSpec{\n\t\t\t\t\tX: map[string]string{\n\t\t\t\t\t\t\"overridden\": \"second-cluster\",\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n}", "func (s *BaseMySqlParserListener) EnterQuerySpecification2(ctx *QuerySpecification2Context) {}", "func PrometheusSpec() *PrometheusSpecApplyConfiguration {\n\treturn &PrometheusSpecApplyConfiguration{}\n}", "func reconcilerCtor() adaptesting.Ctor {\n\treturn func(t *testing.T, ctx context.Context, tr *rt.TableRow, ls *adaptesting.Listers) controller.Reconciler {\n\n\t\tsrcClienset := fakeinjectionclient.Get(ctx)\n\n\t\ta := &adapter{\n\t\t\tlogger: logtesting.TestLogger(t),\n\t\t\tceClient: adaptertest.NewTestClient(),\n\t\t\tsnsCg: snsClientGetterFromContext(ctx),\n\t\t\trouter: &router.Router{},\n\t\t\tstatusPatcher: status.NewPatcher(tComponent,\n\t\t\t\tsrcClienset.SourcesV1alpha1().AWSSNSSources(tNs),\n\t\t\t),\n\t\t}\n\n\t\t// inject adapter into test data so that table tests can perform\n\t\t// assertions on it\n\t\tif tr.OtherTestData == nil {\n\t\t\ttr.OtherTestData = make(map[string]interface{}, 1)\n\t\t}\n\t\ttr.OtherTestData[testAdapterDataKey] = a\n\n\t\tr := &Reconciler{\n\t\t\tadapter: a,\n\t\t}\n\n\t\treturn reconcilerv1alpha1.NewReconciler(ctx, logging.FromContext(ctx),\n\t\t\tsrcClienset, ls.GetAWSSNSSourceLister(),\n\t\t\tcontroller.GetEventRecorder(ctx), r)\n\t}\n}", "func newConfigAuditReports(c *AquasecurityV1alpha1Client, namespace string) *configAuditReports {\n\treturn &configAuditReports{\n\t\tclient: c.RESTClient(),\n\t\tns: namespace,\n\t}\n}", "func Init(hc discovery.HealthCheck, topoServer *topo.Server, serv srvtopo.Server, statsName, cell string, retryCount int, tabletTypesToWait []topodatapb.TabletType) *L2VTGate {\n\tif l2VTGate != nil {\n\t\tlog.Fatalf(\"L2VTGate already initialized\")\n\t}\n\n\ttabletCallErrorCountStatsName := \"\"\n\tif statsName != \"\" {\n\t\ttabletCallErrorCountStatsName = statsName + \"ErrorCount\"\n\t}\n\n\tgw := gateway.GetCreator()(hc, topoServer, serv, cell, retryCount)\n\tif err := gateway.WaitForTablets(gw, tabletTypesToWait); err != nil {\n\t\tlog.Fatalf(\"gateway.WaitForTablets failed: %v\", err)\n\t}\n\n\tl2VTGate = &L2VTGate{\n\t\ttimings: stats.NewMultiTimings(statsName, []string{\"Operation\", \"Keyspace\", \"ShardName\", \"DbType\"}),\n\t\ttabletCallErrorCount: stats.NewMultiCounters(tabletCallErrorCountStatsName, []string{\"Operation\", \"Keyspace\", \"ShardName\", \"DbType\"}),\n\t\tgateway: gw,\n\t}\n\tl2VTGate.QueryService = queryservice.Wrap(\n\t\tgw,\n\t\tfunc(ctx context.Context, target *querypb.Target, conn queryservice.QueryService, name string, inTransaction bool, inner func(context.Context, *querypb.Target, queryservice.QueryService) (error, bool)) (err error) {\n\t\t\tif target != nil {\n\t\t\t\tstartTime, statsKey := l2VTGate.startAction(name, target)\n\t\t\t\tdefer l2VTGate.endAction(startTime, statsKey, &err)\n\t\t\t}\n\t\t\terr, _ = inner(ctx, target, conn)\n\t\t\treturn err\n\t\t},\n\t)\n\tservenv.OnRun(func() {\n\t\tfor _, f := range RegisterL2VTGates {\n\t\t\tf(l2VTGate)\n\t\t}\n\t})\n\treturn l2VTGate\n}", "func newReconciler(mgr manager.Manager) reconcile.Reconciler {\n\treturn &ReconcileDeploymentConfig{Client: mgr.GetClient(), scheme: mgr.GetScheme()}\n}", "func newReconciler(mgr manager.Manager, opts options.AddOptions) reconcile.Reconciler {\n\tr := &ReconcileTiers{\n\t\tclient: mgr.GetClient(),\n\t\tscheme: mgr.GetScheme(),\n\t\tprovider: opts.DetectedProvider,\n\t\tstatus: status.New(mgr.GetClient(), \"tiers\", opts.KubernetesVersion),\n\t}\n\tr.status.Run(opts.ShutdownContext)\n\treturn r\n}", "func CreatePlanSpec(spec *PlanSpec) *plan.Spec {\n\treturn createPlanSpec(spec.Nodes, spec.Edges, spec.Resources, spec.Now)\n}", "func newReconciler(mgr manager.Manager, certs *certs.Certs) reconcile.Reconciler {\n\treturn &ReconcileOutgoingPortal{client: mgr.GetClient(), scheme: mgr.GetScheme(), certs: certs}\n}", "func newReconciler(mgr manager.Manager, certs *certs.Certs) reconcile.Reconciler {\n\treturn &ReconcileOutgoingPortal{client: mgr.GetClient(), scheme: mgr.GetScheme(), certs: certs}\n}", "func getResMgr() *fields {\n\tvar resMgr fields\n\tresMgr.KVStore = &db.Backend{\n\t\tClient: &MockResKVClient{},\n\t}\n\tresMgr.PonRsrMgr = &ponrmgr.PONResourceManager{}\n\tranges := make(map[string]interface{})\n\tsharedIdxByType := make(map[string]string)\n\tsharedIdxByType[\"ALLOC_ID\"] = \"ALLOC_ID\"\n\tsharedIdxByType[\"ONU_ID\"] = \"ONU_ID\"\n\tsharedIdxByType[\"GEMPORT_ID\"] = \"GEMPORT_ID\"\n\tsharedIdxByType[\"FLOW_ID\"] = \"FLOW_ID\"\n\tranges[\"ONU_ID\"] = uint32(0)\n\tranges[\"GEMPORT_ID\"] = uint32(0)\n\tranges[\"ALLOC_ID\"] = uint32(0)\n\tranges[\"FLOW_ID\"] = uint32(0)\n\tranges[\"onu_id_shared\"] = uint32(0)\n\tranges[\"alloc_id_shared\"] = uint32(0)\n\tranges[\"gemport_id_shared\"] = uint32(0)\n\tranges[\"flow_id_shared\"] = uint32(0)\n\tresMgr.NumOfPonPorts = 16\n\tresMgr.DevInfo = &openolt.DeviceInfo{PonPorts: 16}\n\tresMgr.PonRsrMgr.DeviceID = \"onu-1\"\n\tresMgr.PonRsrMgr.IntfIDs = []uint32{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15}\n\tresMgr.PonRsrMgr.KVStore = &db.Backend{\n\t\tClient: &MockResKVClient{},\n\t}\n\tresMgr.PonRsrMgr.Technology = \"XGS-PON\"\n\tresMgr.PonRsrMgr.PonResourceRanges = ranges\n\tresMgr.PonRsrMgr.SharedIdxByType = sharedIdxByType\n\tresMgr.TechProfileRef = mocks.MockTechProfile{}\n\n\t/*\n\t\ttpMgr, err := tp.NewTechProfile(ctx, resMgr.PonRsrMgr, \"etcd\", \"127.0.0.1\", \"/\")\n\t\tif err != nil {\n\t\t\tlogger.Fatal(ctx, err.Error())\n\t\t}\n\t*/\n\n\treturn &resMgr\n}", "func NewReporter(output chan string, protocol string) Reporter {\n\treturn Reporter{output, protocol}\n}", "func TestConfigure_UseExistingASN(t *testing.T) {\n\tASNOnDevice := \"65001\"\n\tMockLeafDeviceAdapter := mock.DeviceAdapter{\n\t\tMockGetInterfaces: func(FabricID uint, DeviceID uint, DeviceIP string) ([]domain.Interface, error) {\n\n\t\t\treturn []domain.Interface{domain.Interface{FabricID: FabricID, DeviceID: DeviceID,\n\t\t\t\tIntType: \"ethernet\", IntName: \"1/11\", Mac: \"M1\", ConfigState: \"up\"}}, nil\n\n\t\t},\n\t\tMockGetLLDPs: func(FabricID uint, DeviceID uint, DeviceIP string) ([]domain.LLDP, error) {\n\n\t\t\treturn []domain.LLDP{domain.LLDP{FabricID: FabricID, DeviceID: DeviceID,\n\t\t\t\tLocalIntType: \"ethernet\", LocalIntName: \"1/11\", LocalIntMac: \"M1\",\n\t\t\t\tRemoteIntType: \"ethernet\", RemoteIntName: \"1/22\", RemoteIntMac: \"M2\"}}, nil\n\n\t\t},\n\t\tMockGetASN: func(FabricID uint, Device uint, DeviceIP string) (string, error) {\n\t\t\treturn ASNOnDevice, nil\n\t\t},\n\t}\n\n\tdatabase.Setup(constants.TESTDBLocation)\n\tdefer cleanupDB(database.GetWorkingInstance())\n\n\tDatabaseRepository := gateway.DatabaseRepository{Database: database.GetWorkingInstance()}\n\tdevUC := usecase.DeviceInteractor{Db: &DatabaseRepository, DeviceAdapterFactory: mock.GetDeviceAdapterFactory(MockLeafDeviceAdapter)}\n\tdevUC.AddFabric(context.Background(), MockFabricName)\n\n\tresp, err := devUC.AddDevices(context.Background(), MockFabricName, []string{MockLeaf1IP}, []string{},\n\t\tUserName, Password, false)\n\tassert.Contains(t, resp, usecase.AddDeviceResponse{FabricName: MockFabricName, FabricID: 1, IPAddress: MockLeaf1IP, Role: usecase.LeafRole})\n\n\tassert.Nil(t, err)\n\n\tswitchConfig, err := DatabaseRepository.GetSwitchConfigOnDeviceIP(MockFabricName, MockLeaf1IP)\n\tassert.Equal(t, ASNOnDevice, switchConfig.LocalAS)\n\t//Verify ASN is already on the Switch, so need to push\n\tassert.Equal(t, domain.ConfigNone, switchConfig.ASConfigType)\n\n\t//Next Call without Device having ASN\n\tMockLeafDeviceAdapterWithoutASN := mock.DeviceAdapter{\n\t\tMockGetInterfaces: func(FabricID uint, DeviceID uint, DeviceIP string) ([]domain.Interface, error) {\n\n\t\t\treturn []domain.Interface{domain.Interface{FabricID: FabricID, DeviceID: DeviceID,\n\t\t\t\tIntType: \"ethernet\", IntName: \"1/11\", Mac: \"M1\", ConfigState: \"up\"}}, nil\n\n\t\t},\n\t\tMockGetLLDPs: func(FabricID uint, DeviceID uint, DeviceIP string) ([]domain.LLDP, error) {\n\n\t\t\treturn []domain.LLDP{domain.LLDP{FabricID: FabricID, DeviceID: DeviceID,\n\t\t\t\tLocalIntType: \"ethernet\", LocalIntName: \"1/11\", LocalIntMac: \"M1\",\n\t\t\t\tRemoteIntType: \"ethernet\", RemoteIntName: \"1/22\", RemoteIntMac: \"M2\"}}, nil\n\n\t\t},\n\t}\n\tdevUC.DeviceAdapterFactory = mock.GetDeviceAdapterFactory(MockLeafDeviceAdapterWithoutASN)\n\tresp, err = devUC.AddDevices(context.Background(), MockFabricName, []string{MockLeaf1IP}, []string{},\n\t\tUserName, Password, false)\n\tassert.Contains(t, resp, usecase.AddDeviceResponse{FabricName: MockFabricName, FabricID: 1, IPAddress: MockLeaf1IP, Role: usecase.LeafRole})\n\n\tassert.Nil(t, err)\n\tswitchConfigSecond, err := DatabaseRepository.GetSwitchConfigOnDeviceIP(MockFabricName, MockLeaf1IP)\n\tassert.Equal(t, ASNOnDevice, switchConfigSecond.LocalAS)\n\t//Verify ASN is to be created\n\tassert.Equal(t, domain.ConfigCreate, switchConfigSecond.ASConfigType)\n}", "func NewReportRoot()(*ReportRoot) {\n m := &ReportRoot{\n Entity: *NewEntity(),\n }\n return m\n}", "func (machine *VirtualMachine_Spec) ConvertSpecTo(destination genruntime.ConvertibleSpec) error {\n\tif destination == machine {\n\t\treturn errors.New(\"attempted conversion between unrelated implementations of github.com/Azure/azure-service-operator/v2/pkg/genruntime/ConvertibleSpec\")\n\t}\n\n\treturn destination.ConvertSpecFrom(machine)\n}", "func newExporter(cfg component.Config, set exporter.CreateSettings) (*baseExporter, error) {\n\toCfg := cfg.(*Config)\n\n\tif oCfg.Endpoint == \"\" {\n\t\treturn nil, errors.New(\"OTLP exporter config requires an Endpoint\")\n\t}\n\n\tuserAgent := fmt.Sprintf(\"%s/%s (%s/%s)\",\n\t\tset.BuildInfo.Description, set.BuildInfo.Version, runtime.GOOS, runtime.GOARCH)\n\n\treturn &baseExporter{config: oCfg, settings: set.TelemetrySettings, userAgent: userAgent}, nil\n}", "func (d Delegate) ServicesForSpec(jobSpec job.Job) (services []job.ServiceCtx, err error) {\n\tspec := jobSpec.BootstrapSpec\n\tif spec == nil {\n\t\treturn nil, errors.Errorf(\"Bootstrap.Delegate expects an *job.BootstrapSpec to be present, got %v\", jobSpec)\n\t}\n\n\tocr2Provider, err := d.relayer.NewOCR2Provider(jobSpec.ExternalJobID, &relay.OCR2ProviderArgs{\n\t\tID: spec.ID,\n\t\tContractID: spec.ContractID,\n\t\tTransmitterID: null.String{},\n\t\tRelay: spec.Relay,\n\t\tRelayConfig: spec.RelayConfig,\n\t\tIsBootstrapPeer: true,\n\t})\n\tif err != nil {\n\t\treturn nil, errors.Wrap(err, \"error calling 'relayer.NewOCR2Provider'\")\n\t}\n\tservices = append(services, ocr2Provider)\n\n\tconfigDB := NewDB(d.db.DB, spec.ID, d.lggr)\n\tpeerWrapper := d.peerWrapper\n\tif peerWrapper == nil {\n\t\treturn nil, errors.New(\"cannot setup OCR2 job service, libp2p peer was missing\")\n\t} else if !peerWrapper.IsStarted() {\n\t\treturn nil, errors.New(\"peerWrapper is not started. OCR2 jobs require a started and running peer. Did you forget to specify P2P_LISTEN_PORT?\")\n\t}\n\n\tloggerWith := d.lggr.Named(\"OCR\").With(\n\t\t\"contractID\", spec.ContractID,\n\t\t\"jobName\", jobSpec.Name.ValueOrZero(),\n\t\t\"jobID\", jobSpec.ID,\n\t)\n\tocrLogger := logger.NewOCRWrapper(loggerWith, true, func(msg string) {\n\t\td.lggr.ErrorIf(d.jobORM.RecordError(jobSpec.ID, msg), \"unable to record error\")\n\t})\n\n\tocr2Spec := spec.AsOCR2Spec()\n\tlc := validate.ToLocalConfig(d.cfg, ocr2Spec)\n\tif err = ocr.SanityCheckLocalConfig(lc); err != nil {\n\t\treturn nil, err\n\t}\n\td.lggr.Infow(\"OCR2 job using local config\",\n\t\t\"BlockchainTimeout\", lc.BlockchainTimeout,\n\t\t\"ContractConfigConfirmations\", lc.ContractConfigConfirmations,\n\t\t\"ContractConfigTrackerPollInterval\", lc.ContractConfigTrackerPollInterval,\n\t\t\"ContractTransmitterTransmitTimeout\", lc.ContractTransmitterTransmitTimeout,\n\t\t\"DatabaseTimeout\", lc.DatabaseTimeout,\n\t)\n\ttracker := ocr2Provider.ContractConfigTracker()\n\toffchainConfigDigester := ocr2Provider.OffchainConfigDigester()\n\n\tbootstrapNodeArgs := ocr.BootstrapperArgs{\n\t\tBootstrapperFactory: peerWrapper.Peer2,\n\t\tContractConfigTracker: tracker,\n\t\tDatabase: configDB,\n\t\tLocalConfig: lc,\n\t\tLogger: ocrLogger,\n\t\tOffchainConfigDigester: offchainConfigDigester,\n\t}\n\n\td.lggr.Debugw(\"Launching new bootstrap node\", \"args\", bootstrapNodeArgs)\n\tbootstrapper, err := ocr.NewBootstrapper(bootstrapNodeArgs)\n\tif err != nil {\n\t\treturn nil, errors.Wrap(err, \"error calling NewBootstrapNode\")\n\t}\n\tservices = append(services, job.NewServiceAdapter(bootstrapper))\n\n\treturn services, nil\n}", "func constructReportConfiguration(provider IReportConfigProvider) *ReportConfiguration {\n\tfields, err := provider.Provide()\n\tif err != nil {\n\t\tlogger.Logger().WriteToLog(logger.Fatal, \"[ReportConfiguration | constructReportConfiguration] Error while constructing report configuration. Error: \", err)\n\t}\n\tconfiguration := &ReportConfiguration{\n\t\tfields: fields,\n\t}\n\treturn configuration\n}", "func TestConfigure_ReserveDifferentASNSecondTime(t *testing.T) {\n\tASNOnDevice := \"65001\"\n\tASNOnDevice2 := \"65002\"\n\n\tMockLeafDeviceAdapter := mock.DeviceAdapter{\n\t\tMockGetInterfaces: func(FabricID uint, DeviceID uint, DeviceIP string) ([]domain.Interface, error) {\n\n\t\t\treturn []domain.Interface{domain.Interface{FabricID: FabricID, DeviceID: DeviceID,\n\t\t\t\tIntType: \"ethernet\", IntName: \"1/11\", Mac: \"M1\", ConfigState: \"up\"}}, nil\n\n\t\t},\n\t\tMockGetLLDPs: func(FabricID uint, DeviceID uint, DeviceIP string) ([]domain.LLDP, error) {\n\n\t\t\treturn []domain.LLDP{domain.LLDP{FabricID: FabricID, DeviceID: DeviceID,\n\t\t\t\tLocalIntType: \"ethernet\", LocalIntName: \"1/11\", LocalIntMac: \"M1\",\n\t\t\t\tRemoteIntType: \"ethernet\", RemoteIntName: \"1/22\", RemoteIntMac: \"M2\"}}, nil\n\n\t\t},\n\t\tMockGetASN: func(FabricID uint, Device uint, DeviceIP string) (string, error) {\n\t\t\treturn ASNOnDevice, nil\n\t\t},\n\t}\n\n\tdatabase.Setup(constants.TESTDBLocation)\n\tdefer cleanupDB(database.GetWorkingInstance())\n\n\tDatabaseRepository := gateway.DatabaseRepository{Database: database.GetWorkingInstance()}\n\tdevUC := usecase.DeviceInteractor{Db: &DatabaseRepository, DeviceAdapterFactory: mock.GetDeviceAdapterFactory(MockLeafDeviceAdapter)}\n\tdevUC.AddFabric(context.Background(), MockFabricName)\n\n\tresp, err := devUC.AddDevices(context.Background(), MockFabricName, []string{MockLeaf1IP}, []string{},\n\t\tUserName, Password, false)\n\tassert.Contains(t, resp, usecase.AddDeviceResponse{FabricName: MockFabricName, FabricID: 1, IPAddress: MockLeaf1IP, Role: usecase.LeafRole})\n\n\tassert.Nil(t, err)\n\n\tswitchConfig, err := DatabaseRepository.GetSwitchConfigOnDeviceIP(MockFabricName, MockLeaf1IP)\n\tassert.Equal(t, ASNOnDevice, switchConfig.LocalAS)\n\t//Verify ASN is to be created\n\tassert.Equal(t, domain.ConfigNone, switchConfig.ASConfigType)\n\n\t//Next Call without Device having ASN\n\tMockLeafDeviceAdapterWithAnotherASN := mock.DeviceAdapter{\n\t\tMockGetInterfaces: func(FabricID uint, DeviceID uint, DeviceIP string) ([]domain.Interface, error) {\n\n\t\t\treturn []domain.Interface{domain.Interface{FabricID: FabricID, DeviceID: DeviceID,\n\t\t\t\tIntType: \"ethernet\", IntName: \"1/11\", Mac: \"M1\", ConfigState: \"up\"}}, nil\n\n\t\t},\n\t\tMockGetLLDPs: func(FabricID uint, DeviceID uint, DeviceIP string) ([]domain.LLDP, error) {\n\n\t\t\treturn []domain.LLDP{domain.LLDP{FabricID: FabricID, DeviceID: DeviceID,\n\t\t\t\tLocalIntType: \"ethernet\", LocalIntName: \"1/11\", LocalIntMac: \"M1\",\n\t\t\t\tRemoteIntType: \"ethernet\", RemoteIntName: \"1/22\", RemoteIntMac: \"M2\"}}, nil\n\n\t\t},\n\t\tMockGetASN: func(FabricID uint, Device uint, DeviceIP string) (string, error) {\n\t\t\treturn ASNOnDevice2, nil\n\t\t},\n\t}\n\tdevUC.DeviceAdapterFactory = mock.GetDeviceAdapterFactory(MockLeafDeviceAdapterWithAnotherASN)\n\n\tresp, err = devUC.AddDevices(context.Background(), MockFabricName, []string{MockLeaf1IP}, []string{},\n\t\tUserName, Password, false)\n\tassert.Contains(t, resp, usecase.AddDeviceResponse{FabricName: MockFabricName, FabricID: 1, IPAddress: MockLeaf1IP, Role: usecase.LeafRole})\n\n\tassert.Nil(t, err)\n\tswitchConfigSecond, err := DatabaseRepository.GetSwitchConfigOnDeviceIP(MockFabricName, MockLeaf1IP)\n\tassert.Equal(t, ASNOnDevice2, switchConfigSecond.LocalAS)\n\t//Since switch already has the ASN no need to push to switch\n\tassert.Equal(t, domain.ConfigNone, switchConfigSecond.ASConfigType)\n}", "func (constructor *Constructor) New(resource string, specs *specs.ParameterMap) (codec.Manager, error) {\n\tif specs == nil {\n\t\treturn nil, ErrUndefinedSpecs{}\n\t}\n\n\treturn &Manager{\n\t\tresource: resource,\n\t\tspecs: specs.Property,\n\t}, nil\n}", "func New(divideFactor, compareThreshold int) Diff {\n\tif divideFactor < 2 {\n\t\tdivideFactor = 2\n\t}\n\tif compareThreshold < 1 {\n\t\tcompareThreshold = 1\n\t}\n\td := &diff{\n\t\tdivideFactor: divideFactor,\n\t\tcompareThreshold: compareThreshold,\n\t}\n\td.sl = skiplist.New(d)\n\treturn d\n}", "func newInternalPlanner(\n\topName string,\n\ttxn *kv.Txn,\n\tuser security.SQLUsername,\n\tmemMetrics *MemoryMetrics,\n\texecCfg *ExecutorConfig,\n\tsessionData sessiondatapb.SessionData,\n\topts ...InternalPlannerParamsOption,\n) (*planner, func()) {\n\t// Default parameters which may be override by the supplied options.\n\tparams := &internalPlannerParams{\n\t\t// The table collection used by the internal planner does not rely on the\n\t\t// deprecatedDatabaseCache and there are no subscribers to the\n\t\t// deprecatedDatabaseCache, so we can leave it uninitialized.\n\t\t// Furthermore, we're not concerned about the efficiency of querying tables\n\t\t// with user-defined types, hence the nil hydratedTables.\n\t\tcollection: descs.NewCollection(execCfg.Settings, execCfg.LeaseManager, nil /* hydratedTables */),\n\t}\n\tfor _, opt := range opts {\n\t\topt(params)\n\t}\n\n\t// We need a context that outlives all the uses of the planner (since the\n\t// planner captures it in the EvalCtx, and so does the cleanup function that\n\t// we're going to return. We just create one here instead of asking the caller\n\t// for a ctx with this property. This is really ugly, but the alternative of\n\t// asking the caller for one is hard to explain. What we need is better and\n\t// separate interfaces for planning and running plans, which could take\n\t// suitable contexts.\n\tctx := logtags.AddTag(context.Background(), opName, \"\")\n\n\tsd := &sessiondata.SessionData{\n\t\tSessionData: sessionData,\n\t\tSearchPath: sessiondata.DefaultSearchPathForUser(user),\n\t\tSequenceState: sessiondata.NewSequenceState(),\n\t\tLocation: time.UTC,\n\t}\n\tsd.SessionData.Database = \"system\"\n\tsd.SessionData.UserProto = user.EncodeProto()\n\tdataMutator := &sessionDataMutator{\n\t\tdata: sd,\n\t\tdefaults: SessionDefaults(map[string]string{\n\t\t\t\"application_name\": \"crdb-internal\",\n\t\t\t\"database\": \"system\",\n\t\t}),\n\t\tsettings: execCfg.Settings,\n\t\tparamStatusUpdater: &noopParamStatusUpdater{},\n\t\tsetCurTxnReadOnly: func(bool) {},\n\t}\n\n\tvar ts time.Time\n\tif txn != nil {\n\t\treadTimestamp := txn.ReadTimestamp()\n\t\tif readTimestamp.IsEmpty() {\n\t\t\tpanic(\"makeInternalPlanner called with a transaction without timestamps\")\n\t\t}\n\t\tts = readTimestamp.GoTime()\n\t}\n\n\tp := &planner{execCfg: execCfg, alloc: &rowenc.DatumAlloc{}}\n\n\tp.txn = txn\n\tp.stmt = Statement{}\n\tp.cancelChecker = cancelchecker.NewCancelChecker(ctx)\n\tp.isInternalPlanner = true\n\n\tp.semaCtx = tree.MakeSemaContext()\n\tp.semaCtx.SearchPath = sd.SearchPath\n\tp.semaCtx.TypeResolver = p\n\n\tplannerMon := mon.NewUnlimitedMonitor(ctx,\n\t\tfmt.Sprintf(\"internal-planner.%s.%s\", user, opName),\n\t\tmon.MemoryResource,\n\t\tmemMetrics.CurBytesCount, memMetrics.MaxBytesHist,\n\t\tnoteworthyInternalMemoryUsageBytes, execCfg.Settings)\n\n\tp.extendedEvalCtx = internalExtendedEvalCtx(\n\t\tctx, sd, dataMutator, params.collection, txn, ts, ts, execCfg, plannerMon,\n\t)\n\tp.extendedEvalCtx.Planner = p\n\tp.extendedEvalCtx.PrivilegedAccessor = p\n\tp.extendedEvalCtx.SessionAccessor = p\n\tp.extendedEvalCtx.ClientNoticeSender = p\n\tp.extendedEvalCtx.Sequence = p\n\tp.extendedEvalCtx.Tenant = p\n\tp.extendedEvalCtx.JoinTokenCreator = p\n\tp.extendedEvalCtx.ClusterID = execCfg.ClusterID()\n\tp.extendedEvalCtx.ClusterName = execCfg.RPCContext.ClusterName()\n\tp.extendedEvalCtx.NodeID = execCfg.NodeID\n\tp.extendedEvalCtx.Locality = execCfg.Locality\n\n\tp.sessionDataMutator = dataMutator\n\tp.autoCommit = false\n\n\tp.extendedEvalCtx.MemMetrics = memMetrics\n\tp.extendedEvalCtx.ExecCfg = execCfg\n\tp.extendedEvalCtx.Placeholders = &p.semaCtx.Placeholders\n\tp.extendedEvalCtx.Annotations = &p.semaCtx.Annotations\n\tp.extendedEvalCtx.Descs = params.collection\n\n\tp.queryCacheSession.Init()\n\tp.optPlanningCtx.init(p)\n\n\treturn p, func() {\n\t\t// Note that we capture ctx here. This is only valid as long as we create\n\t\t// the context as explained at the top of the method.\n\n\t\t// The collection will accumulate descriptors read during planning as well\n\t\t// as type descriptors read during execution on the local node. Many users\n\t\t// of the internal planner do set the `skipCache` flag on the resolver but\n\t\t// this is not respected by type resolution underneath execution. That\n\t\t// subtle details means that the type descriptor used by execution may be\n\t\t// stale, but that must be okay. Correctness concerns aside, we must release\n\t\t// the leases to ensure that we don't leak a descriptor lease.\n\t\tp.Descriptors().ReleaseAll(ctx)\n\n\t\t// Stop the memory monitor.\n\t\tplannerMon.Stop(ctx)\n\t}\n}", "func newReconciler(mgr manager.Manager) reconcile.Reconciler {\n\tgo setupAddressObserver(mgr, C)\n\treturn &ReconcileActiveMQArtemisAddress{client: mgr.GetClient(), scheme: mgr.GetScheme()}\n}" ]
[ "0.74342453", "0.7407351", "0.5452599", "0.52549857", "0.5217595", "0.4960075", "0.48198467", "0.4791364", "0.4668639", "0.46265647", "0.46166524", "0.46087602", "0.46001962", "0.4571639", "0.45662475", "0.4562123", "0.45615572", "0.45457885", "0.4515329", "0.451332", "0.4507463", "0.45063534", "0.45031145", "0.4496286", "0.44665655", "0.44501254", "0.44401747", "0.44224146", "0.44101915", "0.43984616", "0.43856075", "0.43754852", "0.43705395", "0.43549475", "0.43471938", "0.43407828", "0.43336082", "0.43311936", "0.43291995", "0.43270424", "0.43080032", "0.4295871", "0.42955235", "0.42927155", "0.42883673", "0.42843807", "0.42734858", "0.42720208", "0.4267105", "0.42640436", "0.42632574", "0.4262599", "0.42618644", "0.42614892", "0.4261377", "0.42585912", "0.4255168", "0.42494202", "0.42310387", "0.42291102", "0.42287102", "0.42242828", "0.42063862", "0.4203967", "0.42003918", "0.4197897", "0.41975322", "0.41857874", "0.4179423", "0.41731668", "0.4172965", "0.4162836", "0.4161971", "0.41619053", "0.4159253", "0.41567433", "0.41548112", "0.41547593", "0.41543594", "0.4151574", "0.41492593", "0.41379347", "0.41369194", "0.41294616", "0.4126719", "0.4120159", "0.4120159", "0.41188705", "0.41172484", "0.4115991", "0.41151103", "0.4110113", "0.41056433", "0.41008696", "0.4098299", "0.40946212", "0.40935963", "0.4093146", "0.40896434", "0.4088233" ]
0.7729383
0
NewPipelineSpec generates a new PipelineSpec from a pipeline.Spec
func NewPipelineSpec(spec *pipeline.Spec) PipelineSpec { return PipelineSpec{ ID: spec.ID, JobID: spec.JobID, DotDAGSource: spec.DotDagSource, } }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func NewPipelineSpec(spec *pipeline.Spec) PipelineSpec {\n\treturn PipelineSpec{\n\t\tID: spec.ID,\n\t\tDotDAGSource: spec.DotDagSource,\n\t}\n}", "func (in *PipelineSpec) DeepCopy() *PipelineSpec {\n\tif in == nil {\n\t\treturn nil\n\t}\n\tout := new(PipelineSpec)\n\tin.DeepCopyInto(out)\n\treturn out\n}", "func (in *PipelineSpec) DeepCopy() *PipelineSpec {\n\tif in == nil {\n\t\treturn nil\n\t}\n\tout := new(PipelineSpec)\n\tin.DeepCopyInto(out)\n\treturn out\n}", "func (in *PipelineSpec) DeepCopy() *PipelineSpec {\n\tif in == nil {\n\t\treturn nil\n\t}\n\tout := new(PipelineSpec)\n\tin.DeepCopyInto(out)\n\treturn out\n}", "func NewSpec(details *SpecDetails) *Spec {\n\treturn &Spec{\n\t\tDetails: details,\n\t\tServices: NewServiceList(),\n\t\tStatus: SpecWaiting,\n\t}\n}", "func NewSpec(yamlConfig string) (*Spec, error) {\n\ts := &Spec{\n\t\tyamlConfig: yamlConfig,\n\t}\n\n\tmeta := &MetaSpec{}\n\terr := yaml.Unmarshal([]byte(yamlConfig), meta)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"unmarshal failed: %v\", err)\n\t}\n\tvr := v.Validate(meta, []byte(yamlConfig))\n\tif !vr.Valid() {\n\t\treturn nil, fmt.Errorf(\"validate metadata failed: \\n%s\", vr)\n\t}\n\n\trootObject, exists := objectRegistry[meta.Kind]\n\tif !exists {\n\t\treturn nil, fmt.Errorf(\"kind %s not found\", meta.Kind)\n\t}\n\n\ts.meta, s.objectSpec = meta, rootObject.DefaultSpec()\n\n\terr = yaml.Unmarshal([]byte(yamlConfig), s.objectSpec)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"unmarshal failed: %v\", err)\n\t}\n\tvr = v.Validate(s.objectSpec, []byte(yamlConfig))\n\tif !vr.Valid() {\n\t\treturn nil, fmt.Errorf(\"validate spec failed: \\n%s\", vr)\n\t}\n\n\treturn s, nil\n}", "func NewSpec(api *gmail.Service, db *db.DB) (*Spec, error) {\n\tlog.SetLevel(log.DebugLevel)\n\tlog.Info(\"starting new spec\")\n\n\tbytes, err := ioutil.ReadFile(\"./spec.yaml\")\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to read file: %v\", err)\n\t}\n\n\tspec := &Spec{\n\t\tapi: api,\n\t\tdb: db,\n\t}\n\n\terr = yaml.Unmarshal(bytes, spec)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to unmarshal: %v\", err)\n\t}\n\n\treturn spec, nil\n}", "func NewFromSpec(spec *rspec.Spec) Generator {\n\treturn Generator{\n\t\tspec: spec,\n\t}\n}", "func NewSpecFactory() *SpecFactory {\n\treturn &SpecFactory{}\n}", "func New() Generator {\n\tspec := rspec.Spec{\n\t\tVersion: rspec.Version,\n\t\tPlatform: rspec.Platform{\n\t\t\tOS: runtime.GOOS,\n\t\t\tArch: runtime.GOARCH,\n\t\t},\n\t\tRoot: rspec.Root{\n\t\t\tPath: \"\",\n\t\t\tReadonly: false,\n\t\t},\n\t\tProcess: rspec.Process{\n\t\t\tTerminal: false,\n\t\t\tUser: rspec.User{},\n\t\t\tArgs: []string{\n\t\t\t\t\"sh\",\n\t\t\t},\n\t\t\tEnv: []string{\n\t\t\t\t\"PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\",\n\t\t\t\t\"TERM=xterm\",\n\t\t\t},\n\t\t\tCwd: \"/\",\n\t\t\tCapabilities: []string{\n\t\t\t\t\"CAP_CHOWN\",\n\t\t\t\t\"CAP_DAC_OVERRIDE\",\n\t\t\t\t\"CAP_FSETID\",\n\t\t\t\t\"CAP_FOWNER\",\n\t\t\t\t\"CAP_MKNOD\",\n\t\t\t\t\"CAP_NET_RAW\",\n\t\t\t\t\"CAP_SETGID\",\n\t\t\t\t\"CAP_SETUID\",\n\t\t\t\t\"CAP_SETFCAP\",\n\t\t\t\t\"CAP_SETPCAP\",\n\t\t\t\t\"CAP_NET_BIND_SERVICE\",\n\t\t\t\t\"CAP_SYS_CHROOT\",\n\t\t\t\t\"CAP_KILL\",\n\t\t\t\t\"CAP_AUDIT_WRITE\",\n\t\t\t},\n\t\t\tRlimits: []rspec.Rlimit{\n\t\t\t\t{\n\t\t\t\t\tType: \"RLIMIT_NOFILE\",\n\t\t\t\t\tHard: uint64(1024),\n\t\t\t\t\tSoft: uint64(1024),\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\tHostname: \"mrsdalloway\",\n\t\tMounts: []rspec.Mount{\n\t\t\t{\n\t\t\t\tDestination: \"/proc\",\n\t\t\t\tType: \"proc\",\n\t\t\t\tSource: \"proc\",\n\t\t\t\tOptions: nil,\n\t\t\t},\n\t\t\t{\n\t\t\t\tDestination: \"/dev\",\n\t\t\t\tType: \"tmpfs\",\n\t\t\t\tSource: \"tmpfs\",\n\t\t\t\tOptions: []string{\"nosuid\", \"strictatime\", \"mode=755\", \"size=65536k\"},\n\t\t\t},\n\t\t\t{\n\t\t\t\tDestination: \"/dev/pts\",\n\t\t\t\tType: \"devpts\",\n\t\t\t\tSource: \"devpts\",\n\t\t\t\tOptions: []string{\"nosuid\", \"noexec\", \"newinstance\", \"ptmxmode=0666\", \"mode=0620\", \"gid=5\"},\n\t\t\t},\n\t\t\t{\n\t\t\t\tDestination: \"/dev/shm\",\n\t\t\t\tType: \"tmpfs\",\n\t\t\t\tSource: \"shm\",\n\t\t\t\tOptions: []string{\"nosuid\", \"noexec\", \"nodev\", \"mode=1777\", \"size=65536k\"},\n\t\t\t},\n\t\t\t{\n\t\t\t\tDestination: \"/dev/mqueue\",\n\t\t\t\tType: \"mqueue\",\n\t\t\t\tSource: \"mqueue\",\n\t\t\t\tOptions: []string{\"nosuid\", \"noexec\", \"nodev\"},\n\t\t\t},\n\t\t\t{\n\t\t\t\tDestination: \"/sys\",\n\t\t\t\tType: \"sysfs\",\n\t\t\t\tSource: \"sysfs\",\n\t\t\t\tOptions: []string{\"nosuid\", \"noexec\", \"nodev\", \"ro\"},\n\t\t\t},\n\t\t},\n\t\tLinux: &rspec.Linux{\n\t\t\tResources: &rspec.Resources{\n\t\t\t\tDevices: []rspec.DeviceCgroup{\n\t\t\t\t\t{\n\t\t\t\t\t\tAllow: false,\n\t\t\t\t\t\tAccess: strPtr(\"rwm\"),\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\tNamespaces: []rspec.Namespace{\n\t\t\t\t{\n\t\t\t\t\tType: \"pid\",\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tType: \"network\",\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tType: \"ipc\",\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tType: \"uts\",\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tType: \"mount\",\n\t\t\t\t},\n\t\t\t},\n\t\t\tDevices: []rspec.Device{},\n\t\t},\n\t}\n\treturn Generator{\n\t\tspec: &spec,\n\t}\n}", "func NewPipeline(ctx *pulumi.Context,\n\tname string, args *PipelineArgs, opts ...pulumi.ResourceOption) (*Pipeline, error) {\n\tif args == nil {\n\t\treturn nil, errors.New(\"missing one or more required arguments\")\n\t}\n\n\tif args.BootstrapConfiguration == nil {\n\t\treturn nil, errors.New(\"invalid value for required argument 'BootstrapConfiguration'\")\n\t}\n\tif args.PipelineType == nil {\n\t\treturn nil, errors.New(\"invalid value for required argument 'PipelineType'\")\n\t}\n\tif args.ResourceGroupName == nil {\n\t\treturn nil, errors.New(\"invalid value for required argument 'ResourceGroupName'\")\n\t}\n\taliases := pulumi.Aliases([]pulumi.Alias{\n\t\t{\n\t\t\tType: pulumi.String(\"azure-nextgen:devops/v20200713preview:Pipeline\"),\n\t\t},\n\t\t{\n\t\t\tType: pulumi.String(\"azure-native:devops:Pipeline\"),\n\t\t},\n\t\t{\n\t\t\tType: pulumi.String(\"azure-nextgen:devops:Pipeline\"),\n\t\t},\n\t\t{\n\t\t\tType: pulumi.String(\"azure-native:devops/v20190701preview:Pipeline\"),\n\t\t},\n\t\t{\n\t\t\tType: pulumi.String(\"azure-nextgen:devops/v20190701preview:Pipeline\"),\n\t\t},\n\t})\n\topts = append(opts, aliases)\n\tvar resource Pipeline\n\terr := ctx.RegisterResource(\"azure-native:devops/v20200713preview:Pipeline\", name, args, &resource, opts...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &resource, nil\n}", "func NewSpecGenerator(arg string, rootfs bool) *SpecGenerator {\n\tcsc := ContainerStorageConfig{}\n\tif rootfs {\n\t\tcsc.Rootfs = arg\n\t\t// check if rootfs should use overlay\n\t\tlastColonIndex := strings.LastIndex(csc.Rootfs, \":\")\n\t\tif lastColonIndex != -1 {\n\t\t\tlastPart := csc.Rootfs[lastColonIndex+1:]\n\t\t\tif lastPart == \"O\" {\n\t\t\t\tcsc.RootfsOverlay = true\n\t\t\t\tcsc.Rootfs = csc.Rootfs[:lastColonIndex]\n\t\t\t} else if lastPart == \"idmap\" || strings.HasPrefix(lastPart, \"idmap=\") {\n\t\t\t\tcsc.RootfsMapping = &lastPart\n\t\t\t\tcsc.Rootfs = csc.Rootfs[:lastColonIndex]\n\t\t\t}\n\t\t}\n\t} else {\n\t\tcsc.Image = arg\n\t}\n\treturn &SpecGenerator{\n\t\tContainerStorageConfig: csc,\n\t}\n}", "func InstallNewPipeline(ctx context.Context, driver ProtocolDriver, exporterOpts ...ExporterOption) (*Exporter,\n\t*sdktrace.TracerProvider, *basic.Controller, error) {\n\n\texp, tp, cntr, err := NewExportPipeline(ctx, driver, exporterOpts...)\n\tif err != nil {\n\t\treturn nil, nil, nil, err\n\t}\n\n\totel.SetTracerProvider(tp)\n\terr = cntr.Start(ctx)\n\tif err != nil {\n\t\treturn nil, nil, nil, err\n\t}\n\n\treturn exp, tp, cntr, err\n}", "func CreatePipeline(codecName string, pipelineStr string, clockRate float32) *Pipeline {\n\t// Generate C String from Input\n\tpipelineStrUnsafe := C.CString(pipelineStr)\n\tdefer C.free(unsafe.Pointer(pipelineStrUnsafe))\n\n\t// Lock Pipelines\n\tpipelinesLock.Lock()\n\tdefer pipelinesLock.Unlock()\n\n\t// Create new Pipeline\n\tpipeline := &Pipeline{\n\t\tPipeline: C.gstreamer_create_pipeline(pipelineStrUnsafe),\n\t\tid: utils.RandSeq(5),\n\t\tcodecName: codecName,\n\t\tclockRate: clockRate,\n\t}\n\tpipeline.outputTracks = []*webrtc.Track{}\n\t// Add new Pipeline\n\tpipelines[pipeline.id] = pipeline\n\treturn pipeline\n}", "func newBuildPipeline(t gaia.PipelineType) BuildPipeline {\n\tvar bP BuildPipeline\n\n\t// Create build pipeline for given pipeline type\n\tswitch t {\n\tcase gaia.PTypeGolang:\n\t\tbP = &BuildPipelineGolang{\n\t\t\tType: t,\n\t\t}\n\tcase gaia.PTypeJava:\n\t\tbP = &BuildPipelineJava{\n\t\t\tType: t,\n\t\t}\n\tcase gaia.PTypePython:\n\t\tbP = &BuildPipelinePython{\n\t\t\tType: t,\n\t\t}\n\tcase gaia.PTypeCpp:\n\t\tbP = &BuildPipelineCpp{\n\t\t\tType: t,\n\t\t}\n\tcase gaia.PTypeRuby:\n\t\tbP = &BuildPipelineRuby{\n\t\t\tType: t,\n\t\t}\n\tcase gaia.PTypeNodeJS:\n\t\tbP = &BuildPipelineNodeJS{\n\t\t\tType: t,\n\t\t}\n\t}\n\n\treturn bP\n}", "func testPipeline() *Pipeline {\n\treturn &Pipeline{\n\t\tID: sql.NullInt64{Int64: 1, Valid: true},\n\t\tRepoID: sql.NullInt64{Int64: 1, Valid: true},\n\t\tCommit: sql.NullString{String: \"48afb5bdc41ad69bf22588491333f7cf71135163\", Valid: true},\n\t\tFlavor: sql.NullString{String: \"large\", Valid: true},\n\t\tPlatform: sql.NullString{String: \"docker\", Valid: true},\n\t\tRef: sql.NullString{String: \"refs/heads/master\", Valid: true},\n\t\tType: sql.NullString{String: constants.PipelineTypeYAML, Valid: true},\n\t\tVersion: sql.NullString{String: \"1\", Valid: true},\n\t\tExternalSecrets: sql.NullBool{Bool: false, Valid: true},\n\t\tInternalSecrets: sql.NullBool{Bool: false, Valid: true},\n\t\tServices: sql.NullBool{Bool: true, Valid: true},\n\t\tStages: sql.NullBool{Bool: false, Valid: true},\n\t\tSteps: sql.NullBool{Bool: true, Valid: true},\n\t\tTemplates: sql.NullBool{Bool: false, Valid: true},\n\t\tData: testPipelineData(),\n\t}\n}", "func (xdcrf *XDCRFactory) NewPipeline(topic string, progress_recorder common.PipelineProgressRecorder) (common.Pipeline, error) {\n\tspec, err := xdcrf.repl_spec_svc.ReplicationSpec(topic)\n\tif err != nil {\n\t\txdcrf.logger.Errorf(\"Failed to get replication specification for pipeline %v, err=%v\\n\", topic, err)\n\t\treturn nil, err\n\t}\n\txdcrf.logger.Debugf(\"replication specification = %v\\n\", spec)\n\n\tlogger_ctx := log.CopyCtx(xdcrf.default_logger_ctx)\n\tlogger_ctx.SetLogLevel(spec.Settings.LogLevel)\n\n\ttargetClusterRef, err := xdcrf.remote_cluster_svc.RemoteClusterByUuid(spec.TargetClusterUUID, false)\n\tif err != nil {\n\t\txdcrf.logger.Errorf(\"Error getting remote cluster with uuid=%v for pipeline %v, err=%v\\n\", spec.TargetClusterUUID, spec.Id, err)\n\t\treturn nil, err\n\t}\n\n\tnozzleType, err := xdcrf.getOutNozzleType(targetClusterRef, spec)\n\tif err != nil {\n\t\txdcrf.logger.Errorf(\"Failed to get the nozzle type for %v, err=%v\\n\", spec.Id, err)\n\t\treturn nil, err\n\t}\n\tisCapiReplication := (nozzleType == base.Capi)\n\n\tconnStr, err := xdcrf.remote_cluster_svc.GetConnectionStringForRemoteCluster(targetClusterRef, isCapiReplication)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tusername, password, httpAuthMech, certificate, sanInCertificate, clientCertificate, clientKey, err := targetClusterRef.MyCredentials()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\ttargetBucketInfo, err := xdcrf.utils.GetBucketInfo(connStr, spec.TargetBucketName, username, password, httpAuthMech, certificate, sanInCertificate, clientCertificate, clientKey, xdcrf.logger)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tisTargetES := xdcrf.utils.CheckWhetherClusterIsESBasedOnBucketInfo(targetBucketInfo)\n\n\tconflictResolutionType, err := xdcrf.utils.GetConflictResolutionTypeFromBucketInfo(spec.TargetBucketName, targetBucketInfo)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// sourceCRMode is the conflict resolution mode to use when resolving conflicts for big documents at source side\n\t// capi replication always uses rev id based conflict resolution\n\tsourceCRMode := base.CRMode_RevId\n\tif !isCapiReplication {\n\t\t// for xmem replication, sourceCRMode is LWW if and only if target bucket is LWW enabled, so as to ensure that source side conflict\n\t\t// resolution and target side conflict resolution yield consistent results\n\t\tsourceCRMode = base.GetCRModeFromConflictResolutionTypeSetting(conflictResolutionType)\n\t}\n\n\txdcrf.logger.Infof(\"%v sourceCRMode=%v httpAuthMech=%v isCapiReplication=%v isTargetES=%v\\n\", topic, sourceCRMode, httpAuthMech, isCapiReplication, isTargetES)\n\n\t/**\n\t * Construct the Source nozzles\n\t * sourceNozzles - a map of DCPNozzleID -> *DCPNozzle\n\t * kv_vb_map - Map of SourceKVNode -> list of vbucket#'s that it's responsible for\n\t */\n\tsourceNozzles, kv_vb_map, err := xdcrf.constructSourceNozzles(spec, topic, isCapiReplication, logger_ctx)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif len(sourceNozzles) == 0 {\n\t\t// no pipeline is constructed if there is no source nozzle\n\t\treturn nil, base.ErrorNoSourceNozzle\n\t}\n\n\tprogress_recorder(fmt.Sprintf(\"%v source nozzles have been constructed\", len(sourceNozzles)))\n\n\txdcrf.logger.Infof(\"%v kv_vb_map=%v\\n\", topic, kv_vb_map)\n\t/**\n\t * Construct the outgoing (Destination) nozzles\n\t * 1. outNozzles - map of ID -> actual nozzle\n\t * 2. vbNozzleMap - map of VBucket# -> nozzle to be used (to be used by router)\n\t * 3. kvVBMap - map of remote KVNodes -> vbucket# responsible for per node\n\t */\n\toutNozzles, vbNozzleMap, target_kv_vb_map, targetUserName, targetPassword, targetClusterVersion, err :=\n\t\txdcrf.constructOutgoingNozzles(spec, kv_vb_map, sourceCRMode, targetBucketInfo, targetClusterRef, isCapiReplication, isTargetES, logger_ctx)\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tprogress_recorder(fmt.Sprintf(\"%v target nozzles have been constructed\", len(outNozzles)))\n\n\t// TODO construct queue parts. This will affect vbMap in router. may need an additional outNozzle -> downStreamPart/queue map in constructRouter\n\n\t// construct routers to be able to connect the nozzles\n\tfor _, sourceNozzle := range sourceNozzles {\n\t\tvblist := sourceNozzle.(*parts.DcpNozzle).GetVBList()\n\t\tdownStreamParts := make(map[string]common.Part)\n\t\tfor _, vb := range vblist {\n\t\t\ttargetNozzleId, ok := vbNozzleMap[vb]\n\t\t\tif !ok {\n\t\t\t\treturn nil, fmt.Errorf(\"Error constructing pipeline %v since there is no target nozzle for vb=%v\", topic, vb)\n\t\t\t}\n\n\t\t\toutNozzle, ok := outNozzles[targetNozzleId]\n\t\t\tif !ok {\n\t\t\t\treturn nil, fmt.Errorf(\"%v There is no corresponding target nozzle for vb=%v, targetNozzleId=%v\", topic, vb, targetNozzleId)\n\t\t\t}\n\t\t\tdownStreamParts[targetNozzleId] = outNozzle\n\t\t}\n\n\t\t// Construct a router - each Source nozzle has a router.\n\t\trouter, err := xdcrf.constructRouter(sourceNozzle.Id(), spec, downStreamParts, vbNozzleMap, sourceCRMode, logger_ctx)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tsourceNozzle.SetConnector(router)\n\t}\n\tprogress_recorder(\"Source nozzles have been wired to target nozzles\")\n\n\t// construct and initializes the pipeline\n\tpipeline := pp.NewPipelineWithSettingConstructor(topic, sourceNozzles, outNozzles, spec, targetClusterRef,\n\t\txdcrf.ConstructSettingsForPart, xdcrf.ConstructSettingsForConnector, xdcrf.ConstructSSLPortMap, xdcrf.ConstructUpdateSettingsForPart,\n\t\txdcrf.ConstructUpdateSettingsForConnector, xdcrf.SetStartSeqno, xdcrf.CheckpointBeforeStop, logger_ctx)\n\n\t// These listeners are the driving factors of the pipeline\n\txdcrf.registerAsyncListenersOnSources(pipeline, logger_ctx)\n\txdcrf.registerAsyncListenersOnTargets(pipeline, logger_ctx)\n\n\t// initialize component event listener map in pipeline\n\tpp.GetAllAsyncComponentEventListeners(pipeline)\n\n\t// Create PipelineContext\n\tif pipelineContext, err := pctx.NewWithSettingConstructor(pipeline, xdcrf.ConstructSettingsForService, xdcrf.ConstructUpdateSettingsForService, logger_ctx); err != nil {\n\n\t\treturn nil, err\n\t} else {\n\t\t//register services to the pipeline context, so when pipeline context starts as part of the pipeline starting, these services will start as well\n\t\tpipeline.SetRuntimeContext(pipelineContext)\n\t\terr = xdcrf.registerServices(pipeline, logger_ctx, kv_vb_map, targetUserName, targetPassword, spec.TargetBucketName, target_kv_vb_map, targetClusterRef, targetClusterVersion, isCapiReplication, isTargetES)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\tprogress_recorder(\"Pipeline has been constructed\")\n\n\txdcrf.logger.Infof(\"Pipeline %v has been constructed\", topic)\n\treturn pipeline, nil\n}", "func NewPodSpec(serviceAccountName string, containers []core.Container, volumes []core.Volume, nodeSelector map[string]string) core.PodSpec {\n\treturn core.PodSpec{\n\t\tContainers: containers,\n\t\tServiceAccountName: serviceAccountName,\n\t\tVolumes: volumes,\n\t\tNodeSelector: nodeSelector,\n\t}\n}", "func NewPipeline(transformer Transformer, classifier Classifier, filterer Filterer, encoder Encoder, compressor Compressor, storer Storer) *Pipeline {\n\tp := &Pipeline{\n\t\tTransformer: transformer,\n\t\tClassifier: classifier,\n\t\tFilterer: filterer,\n\t\tEncoder: encoder,\n\t\tCompressor: compressor,\n\t\tStorer: storer,\n\t}\n\tstorer.SetPipeline(p)\n\treturn p\n}", "func makePodSpecPatch(\n\tcontainer *pipelinespec.PipelineDeploymentConfig_PipelineContainerSpec,\n\tcomponentSpec *pipelinespec.ComponentSpec,\n\texecutorInput *pipelinespec.ExecutorInput,\n\texecutionID int64,\n\tpipelineName string,\n\trunID string,\n) (string, error) {\n\texecutorInputJSON, err := protojson.Marshal(executorInput)\n\tif err != nil {\n\t\treturn \"\", fmt.Errorf(\"failed to make podSpecPatch: %w\", err)\n\t}\n\tcomponentJSON, err := protojson.Marshal(componentSpec)\n\tif err != nil {\n\t\treturn \"\", fmt.Errorf(\"failed to make podSpecPatch: %w\", err)\n\t}\n\n\tuserCmdArgs := make([]string, 0, len(container.Command)+len(container.Args))\n\tuserCmdArgs = append(userCmdArgs, container.Command...)\n\tuserCmdArgs = append(userCmdArgs, container.Args...)\n\tlauncherCmd := []string{\n\t\t// TODO(Bobgy): workaround argo emissary executor bug, after we upgrade to an argo version with the bug fix, we can remove the following line.\n\t\t// Reference: https://github.com/argoproj/argo-workflows/issues/7406\n\t\t\"/var/run/argo/argoexec\", \"emissary\", \"--\",\n\t\tcomponent.KFPLauncherPath,\n\t\t// TODO(Bobgy): no need to pass pipeline_name and run_id, these info can be fetched via pipeline context and pipeline run context which have been created by root DAG driver.\n\t\t\"--pipeline_name\", pipelineName,\n\t\t\"--run_id\", runID,\n\t\t\"--execution_id\", fmt.Sprintf(\"%v\", executionID),\n\t\t\"--executor_input\", string(executorInputJSON),\n\t\t\"--component_spec\", string(componentJSON),\n\t\t\"--pod_name\",\n\t\tfmt.Sprintf(\"$(%s)\", component.EnvPodName),\n\t\t\"--pod_uid\",\n\t\tfmt.Sprintf(\"$(%s)\", component.EnvPodUID),\n\t\t\"--mlmd_server_address\",\n\t\tfmt.Sprintf(\"$(%s)\", component.EnvMetadataHost),\n\t\t\"--mlmd_server_port\",\n\t\tfmt.Sprintf(\"$(%s)\", component.EnvMetadataPort),\n\t\t\"--\", // separater before user command and args\n\t}\n\tres := k8score.ResourceRequirements{\n\t\tLimits: map[k8score.ResourceName]k8sres.Quantity{},\n\t}\n\tmemoryLimit := container.GetResources().GetMemoryLimit()\n\tif memoryLimit != 0 {\n\t\tq, err := k8sres.ParseQuantity(fmt.Sprintf(\"%vG\", memoryLimit))\n\t\tif err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\t\tres.Limits[k8score.ResourceMemory] = q\n\t}\n\tcpuLimit := container.GetResources().GetCpuLimit()\n\tif cpuLimit != 0 {\n\t\tq, err := k8sres.ParseQuantity(fmt.Sprintf(\"%v\", cpuLimit))\n\t\tif err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\t\tres.Limits[k8score.ResourceCPU] = q\n\t}\n\taccelerator := container.GetResources().GetAccelerator()\n\tif accelerator != nil {\n\t\treturn \"\", fmt.Errorf(\"accelerator resources are not supported yet: https://github.com/kubeflow/pipelines/issues/7043\")\n\t}\n\tpodSpec := &k8score.PodSpec{\n\t\tContainers: []k8score.Container{{\n\t\t\tName: \"main\", // argo task user container is always called \"main\"\n\t\t\tCommand: launcherCmd,\n\t\t\tArgs: userCmdArgs,\n\t\t\tImage: container.Image,\n\t\t\tResources: res,\n\t\t}},\n\t}\n\tpodSpecPatchBytes, err := json.Marshal(podSpec)\n\tif err != nil {\n\t\treturn \"\", fmt.Errorf(\"JSON marshaling pod spec patch: %w\", err)\n\t}\n\treturn string(podSpecPatchBytes), nil\n}", "func NewSpinnakerPipeline(params map[string]interface{}) (*pipeline, error) {\n\n\tpipeline := &pipeline{\n\t\tSchema: \"v2\",\n\t\tTemplate: Template{\n\t\t\tArtifactAccount: \"front50ArtifactCredentials\",\n\t\t\tReference: \"spinnaker://\" + params[\"pipeline_template\"].(string),\n\t\t\tType: \"front50/pipelineTemplate\",\n\t\t},\n\t\tApplication: params[\"spinnaker_application\"].(string),\n\t\tName: params[\"pipeline_name\"].(string),\n\t\tType: \"templatedPipeline\",\n\t\tTriggers: make([]interface{}, 0),\n\t\tStages: make([]interface{}, 0),\n\t\tVariables: Variables{\n\t\t\tNamespace: params[\"namespace\"].(string),\n\t\t\tDockerRegistry: params[\"docker_registry\"].(string),\n\t\t\tK8SAccount: params[\"k8s_account\"].(string),\n\t\t\tHelmPackageS3ObjectPath: params[\"helm_package_s3_object_path\"].(string),\n\t\t\tHelmOverrideFileS3ObjectPath: params[\"helm_override_file_s3_object_path\"].(string),\n\t\t\tDockerRegistryOrg: params[\"docker_registry_org\"].(string),\n\t\t\tDockerRepository: params[\"docker_repository\"].(string),\n\t\t\tHalS3Account: params[\"hal_s3_account\"].(string),\n\t\t\tHalDockerRegistryAccount: params[\"hal_docker_registry_account\"].(string),\n\t\t\tDockerImageTag: params[\"docker_image_tag\"].(string),\n\t\t\tSpinnakerApplication: params[\"spinnaker_application\"].(string),\n\t\t},\n\t\tExclude: make([]interface{}, 0),\n\t\tParameterConfig: make([]interface{}, 0),\n\t\tNotifications: make([]interface{}, 0),\n\t}\n\n\treturn pipeline, nil\n}", "func NewPipeline(logger log.Logger, stgs PipelineStages, jobName *string, registerer prometheus.Registerer) (*Pipeline, error) {\n\thist := prometheus.NewHistogramVec(prometheus.HistogramOpts{\n\t\tNamespace: \"logentry\",\n\t\tName: \"pipeline_duration_seconds\",\n\t\tHelp: \"Label and metric extraction pipeline processing time, in seconds\",\n\t\tBuckets: []float64{.000005, .000010, .000025, .000050, .000100, .000250, .000500, .001000, .002500, .005000, .010000, .025000},\n\t}, []string{\"job_name\"})\n\terr := registerer.Register(hist)\n\tif err != nil {\n\t\tif existing, ok := err.(prometheus.AlreadyRegisteredError); ok {\n\t\t\thist = existing.ExistingCollector.(*prometheus.HistogramVec)\n\t\t} else {\n\t\t\t// Same behavior as MustRegister if the error is not for AlreadyRegistered\n\t\t\tpanic(err)\n\t\t}\n\t}\n\n\tst := []Stage{}\n\tfor _, s := range stgs {\n\t\tstage, ok := s.(PipelineStage)\n\t\tif !ok {\n\t\t\treturn nil, errors.Errorf(\"invalid YAML config, \"+\n\t\t\t\t\"make sure each stage of your pipeline is a YAML object (must end with a `:`), check stage `- %s`\", s)\n\t\t}\n\t\tif len(stage) > 1 {\n\t\t\treturn nil, errors.New(\"pipeline stage must contain only one key\")\n\t\t}\n\t\tfor key, config := range stage {\n\t\t\tname, ok := key.(string)\n\t\t\tif !ok {\n\t\t\t\treturn nil, errors.New(\"pipeline stage key must be a string\")\n\t\t\t}\n\t\t\tnewStage, err := New(logger, jobName, name, config, registerer)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, errors.Wrapf(err, \"invalid %s stage config\", name)\n\t\t\t}\n\t\t\tst = append(st, newStage)\n\t\t}\n\t}\n\treturn &Pipeline{\n\t\tlogger: log.With(logger, \"component\", \"pipeline\"),\n\t\tstages: st,\n\t\tjobName: jobName,\n\t\tplDuration: hist,\n\t}, nil\n}", "func NewPipeline(ls ...interface{}) (*Pipe, error) {\n\tvar pipe []interface{}\n\n\tp := &Pipe{\n\t\tls: pipe,\n\t}\n\n\tfor _, f := range ls {\n\t\tif err := p.Add(f); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\treturn p, nil\n}", "func createSpec(name string) (*TestSpec, error) {\n\t// File must be a yaml file.\n\tif filepath.Ext(name) != \".yml\" {\n\t\treturn nil, fmt.Errorf(\"Cannot parse non-yaml file: %s\", name)\n\t}\n\n\t// Read testspec yaml file contents.\n\tcontents, err := ioutil.ReadFile(name)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"Unable to read yaml test spec: %s. Error: %v\", name, err)\n\t}\n\n\t// Unmarshals testspec yaml file contents into struct.\n\ttest := &TestSpec{}\n\tif err = yaml.Unmarshal(contents, &test); err != nil {\n\t\treturn nil, fmt.Errorf(\"Unable to unmarshal yaml test spec: %s. Error: %v\", name, err)\n\t}\n\n\t// Instanstiates cache for templating.\n\ttest.Cache = make(map[string]string)\n\n\t// Assigns default values for commands.\n\tfor i := range test.Commands {\n\t\t// Skip command by removing from command list.\n\t\tif test.Commands[i].Skip == true {\n\t\t\ttest.Commands = append(test.Commands[:i], test.Commands[i+1:]...)\n\t\t}\n\n\t\t// Default commandspec timeout.\n\t\tif test.Commands[i].Timeout == \"\" {\n\t\t\ttest.Commands[i].Timeout = test.CmdTimeout\n\t\t}\n\t}\n\treturn test, nil\n}", "func (in *PipelineHistorySpec) DeepCopy() *PipelineHistorySpec {\n\tif in == nil {\n\t\treturn nil\n\t}\n\tout := new(PipelineHistorySpec)\n\tin.DeepCopyInto(out)\n\treturn out\n}", "func New(stdin io.Reader, stdout io.Writer, stderr io.Writer) *Pipeline {\n pl := &Pipeline{}\n pl.input = stdin\n pl.output = stdout\n pl.err = stderr\n pl.tasks = []*exec.Cmd{}\n return pl\n}", "func NewPipeline(definitionPath, environmentPath string, environment types.StringMap, ignoredSteps types.StringSet, selectedSteps types.StringSet) (*Pipeline, error) {\n\tp := &Pipeline{}\n\tvar err error\n\t// Load environment\n\tp.Environment, err = NewPipelineEnvironment(environmentPath, environment, ignoredSteps, selectedSteps)\n\tif err != nil {\n\t\t// As environment files are optional, handle if non is accessible\n\t\tif e, ok := err.(*os.PathError); ok && e.Err == syscall.ENOENT {\n\t\t\tlog.Print(\"No environment file is used\")\n\t\t} else {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\t// Load definition\n\tp.Definition, err = NewPipelineDefinition(definitionPath, p.Environment)\n\tp.localRunner = NewLocalRunner(\"pipeline\", os.Stdout, os.Stderr)\n\tp.noopRunner = NewNoopRunner(false)\n\treturn p, err\n}", "func (p *Provider) newVSphereMachineProviderSpec(clusterID string) (*mapi.VSphereMachineProviderSpec, error) {\n\tif clusterID == \"\" {\n\t\treturn nil, fmt.Errorf(\"clusterID is empty\")\n\t}\n\tworkspace, err := p.getWorkspaceFromExistingMachineSet(clusterID)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tlog.Printf(\"creating machineset provider spec which targets %s\\n\", workspace.Server)\n\n\t// The template is an image which has been properly sysprepped. The image is derived from an environment variable\n\t// defined in the job spec.\n\tvmTemplate := os.Getenv(\"VM_TEMPLATE\")\n\tif vmTemplate == \"\" {\n\t\tvmTemplate = \"windows-golden-images/windows-server-2022-template\"\n\t}\n\n\tlog.Printf(\"creating machineset based on template %s\\n\", vmTemplate)\n\n\treturn &mapi.VSphereMachineProviderSpec{\n\t\tTypeMeta: meta.TypeMeta{\n\t\t\tAPIVersion: \"vsphereprovider.openshift.io/v1beta1\",\n\t\t\tKind: \"VSphereMachineProviderSpec\",\n\t\t},\n\t\tCredentialsSecret: &core.LocalObjectReference{\n\t\t\tName: defaultCredentialsSecretName,\n\t\t},\n\t\tDiskGiB: int32(128),\n\t\tMemoryMiB: int64(16384),\n\t\tNetwork: mapi.NetworkSpec{\n\t\t\tDevices: []mapi.NetworkDeviceSpec{{NetworkName: getNetwork()}},\n\t\t},\n\t\tNumCPUs: int32(4),\n\t\tNumCoresPerSocket: int32(1),\n\t\tTemplate: vmTemplate,\n\t\tWorkspace: workspace,\n\t}, nil\n}", "func NewCreateSpec(s *api.JobUpdateSettings) *stateless.CreateSpec {\n\tu := NewUpdateSpec(s, false)\n\treturn &stateless.CreateSpec{\n\t\tBatchSize: u.BatchSize,\n\t\tMaxInstanceRetries: u.MaxInstanceRetries,\n\t\tMaxTolerableInstanceFailures: u.MaxTolerableInstanceFailures,\n\t\tStartPaused: u.StartPaused,\n\t}\n}", "func (*PipelineSpec) Descriptor() ([]byte, []int) {\n\treturn file_pipelinerun_proto_rawDescGZIP(), []int{2}\n}", "func NewWebhookSpec(spec *job.WebhookSpec) *WebhookSpec {\n\treturn &WebhookSpec{\n\t\tCreatedAt: spec.CreatedAt,\n\t\tUpdatedAt: spec.UpdatedAt,\n\t}\n}", "func New(id string) *Spec {\n\treturn &Spec{ID: id, Target: make(map[string]string)}\n}", "func NewFTDeploySpec(account string, name string, org string, chain string) *FTDeploySpec {\n\treturn &FTDeploySpec{\n\t\tApplicationDeploySpec{\n\t\t\tOrg: org,\n\t\t\tChain: chain,\n\t\t\tName: name,\n\t\t\tAccount: account,\n\t\t\tCoinfigFileName: FTDeployConfigFileName,\n\t\t},\n\t}\n}", "func NewPipeline(ops []OpUnion) Pipeline {\n\treturn Pipeline{Operations: ops}\n}", "func CreatePipeline(codecName string, tracks []*webrtc.Track) *Pipeline {\n\tfmt.Printf(\"In create pipeline\")\n\tpipelineStr := \"\"\n\tswitch codecName {\n\tcase \"VP8\":\n\t\tpipelineStr += \", encoding-name=VP8-DRAFT-IETF-01 ! rtpvp8depay ! decodebin ! autovideosink\"\n\tcase \"Opus\":\n\t\tpipelineStr += \"appsrc name=src ! decodebin ! audioconvert ! audioresample ! audio/x-raw, rate=8000 ! mulawenc ! appsink name=appsink max-buffers=1\"\n\t// case webrtc.VP9:\n\t// \tpipelineStr += \" ! rtpvp9depay ! decodebin ! autovideosink\"\n\t// case webrtc.H264:\n\t// \tpipelineStr += \" ! rtph264depay ! decodebin ! autovideosink\"\n\t// case webrtc.G722:\n\t// \tpipelineStr += \" clock-rate=8000 ! rtpg722depay ! decodebin ! autoaudiosink\"\n\tdefault:\n\t\tpanic(\"Unhandled codec \" + codecName)\n\t}\n\n\tpipelineStrUnsafe := C.CString(pipelineStr)\n\tdefer C.free(unsafe.Pointer(pipelineStrUnsafe))\n\treturn &Pipeline{\n\t\tPipeline: C.gstreamer_receive_create_pipeline(pipelineStrUnsafe),\n\t\ttracks: tracks,\n\t}\n}", "func New() *Pipeline {\n\treturn &Pipeline{}\n}", "func NewFromProcedureSpec(data []*Table) *FromProcedureSpec {\n\t// Normalize data before anything can read it\n\tfor _, tbl := range data {\n\t\ttbl.Normalize()\n\t}\n\treturn &FromProcedureSpec{data: data}\n}", "func NewProblemSpec(name string) ProblemSpec {\n\tsteps, _ := NewSteps([]uint64{1})\n\treturn ProblemSpec{\n\t\tName: name,\n\t\tAttrs: map[string]string{},\n\t\tSteps: *steps,\n\t}\n}", "func New(v interface{}) (provider.Provider, error) {\n\ts := Spec{}\n\treturn &s, ioutil.Intermarshal(v, &s)\n}", "func CreatePlanSpec(spec *PlanSpec) *plan.Spec {\n\treturn createPlanSpec(spec.Nodes, spec.Edges, spec.Resources, spec.Now)\n}", "func NewPipeline() *Pipeline {\n\treturn &Pipeline{\n\t\tSerializablePipeline: NewSerializablePipeline(),\n\t}\n}", "func NewPipeline(stages ...Stage) *Pipeline {\n\treturn &Pipeline{stages: stages}\n}", "func NewMockPipelineFrom(i iface.Pipeline) *MockPipeline {\n\treturn &MockPipeline{\n\t\tAddFunc: &PipelineAddFunc{\n\t\t\tdefaultHook: i.Add,\n\t\t},\n\t\tRunFunc: &PipelineRunFunc{\n\t\t\tdefaultHook: i.Run,\n\t\t},\n\t}\n}", "func NewFromString(exp string) *Pipeline {\n\tcmds := ParseCommand(exp)\n\treturn NewPipeline(cmds...)\n}", "func NewBootstrapSpec(spec *job.BootstrapSpec) *BootstrapSpec {\n\treturn &BootstrapSpec{\n\t\tContractID: spec.ContractID,\n\t\tRelay: spec.Relay,\n\t\tRelayConfig: spec.RelayConfig,\n\t\tBlockchainTimeout: spec.BlockchainTimeout,\n\t\tContractConfigTrackerPollInterval: spec.ContractConfigTrackerPollInterval,\n\t\tContractConfigConfirmations: spec.ContractConfigConfirmations,\n\t\tCreatedAt: spec.CreatedAt,\n\t\tUpdatedAt: spec.UpdatedAt,\n\t}\n}", "func NewPipeline() *Pipeline {\n\treturn &Pipeline{\n\t\tmake(chan struct{}),\n\t\tsync.WaitGroup{},\n\t\tsync.Mutex{},\n\t\tnil,\n\t}\n}", "func New(s *session.Session) CodeDeploySpec {\n\treturn CodeDeploySpec{\n\t\tSession: s,\n\t}\n}", "func (in *Spec) DeepCopy() *Spec {\n\tif in == nil {\n\t\treturn nil\n\t}\n\tout := new(Spec)\n\tin.DeepCopyInto(out)\n\treturn out\n}", "func (in *Spec) DeepCopy() *Spec {\n\tif in == nil {\n\t\treturn nil\n\t}\n\tout := new(Spec)\n\tin.DeepCopyInto(out)\n\treturn out\n}", "func NewPipeline(factories []component.Factory) *Pipeline {\n\tpipeline := &Pipeline{}\n\n\tfor i, factory := range factories {\n\t\tpool := pool{\n\t\t\tfactory: factory,\n\t\t\tterminate: make(chan struct{}),\n\t\t\tdone: &sync.WaitGroup{},\n\t\t\tworkers: make(chan component.Component, factory.PoolSize()),\n\t\t}\n\n\t\tpool.produce = make(chan component.Message, factory.ChannelSize())\n\t\tpool.output = make(chan component.Message, factory.ChannelSize())\n\t\tif i > 0 {\n\t\t\tpool.input = pipeline.pools[i-1].output\n\t\t}\n\n\t\tfor j := 0; j < factory.PoolSize(); j++ {\n\t\t\tspawnWorker(factory, pool)\n\t\t}\n\n\t\tpipeline.pools = append(pipeline.pools, &pool)\n\t}\n\n\tgo func() {\n\t\tfor msg := range pipeline.pools[len(factories)-1].output {\n\t\t\tmsg.Release()\n\t\t}\n\t}()\n\n\treturn pipeline\n}", "func createPipeline(\n\tconfig Config, mgr types.Manager, logger log.Modular, stats metrics.Type,\n) (*util.ClosablePool, error) {\n\tpool := util.NewClosablePool()\n\n\t// Create our input pipe\n\tinputPipe, err := input.New(config.Input, mgr, logger, stats)\n\tif err != nil {\n\t\tlogger.Errorf(\"Input error (%s): %v\\n\", config.Input.Type, err)\n\t\treturn nil, err\n\t}\n\tpool.Add(1, inputPipe)\n\n\t// Create our benchmarking output pipe\n\toutputPipe := test.NewBenchOutput(\n\t\ttime.Duration(config.ReportPeriodMS)*time.Millisecond, logger, stats,\n\t)\n\tpool.Add(10, outputPipe)\n\n\toutputPipe.StartReceiving(inputPipe.TransactionChan())\n\treturn pool, nil\n}", "func NewMockPipeline() *MockPipeline {\n\treturn &MockPipeline{\n\t\tAddFunc: &PipelineAddFunc{\n\t\t\tdefaultHook: func(string, ...interface{}) {\n\t\t\t\treturn\n\t\t\t},\n\t\t},\n\t\tRunFunc: &PipelineRunFunc{\n\t\t\tdefaultHook: func() (interface{}, error) {\n\t\t\t\treturn nil, nil\n\t\t\t},\n\t\t},\n\t}\n}", "func NewContainerSpec(name string, image string, ) *ContainerSpec {\n\tthis := ContainerSpec{}\n\tthis.Name = name\n\tthis.Image = image\n\treturn &this\n}", "func New() *Pipeline {\n\treturn &Pipeline{\n\t\tconfig: DefaultConfig(),\n\t}\n}", "func TestYAMLPipelineSpec(t *testing.T) {\n\tif testing.Short() {\n\t\tt.Skip(\"Skipping integration tests in short mode\")\n\t}\n\tc, _ := minikubetestenv.AcquireCluster(t)\n\t// Note that BashCmd dedents all lines below including the YAML (which\n\t// wouldn't parse otherwise)\n\trequire.NoError(t, tu.PachctlBashCmd(t, c, `\n\t\tyes | pachctl delete all\n\t\tpachctl create project P\n\t\tpachctl create repo input --project P\n\t\tpachctl create pipeline -f - <<EOF\n\t\tpipeline:\n\t\t name: first\n\t\t project:\n\t\t name: P\n\t\tinput:\n\t\t pfs:\n\t\t glob: /*\n\t\t repo: input\n\t\t project: P\n\t\ttransform:\n\t\t cmd: [ /bin/bash ]\n\t\t stdin:\n\t\t - \"cp /pfs/input/* /pfs/out\"\n\t\t---\n\t\tpipeline:\n\t\t name: second\n\t\tinput:\n\t\t pfs:\n\t\t glob: /*\n\t\t repo: first\n\t\t project: P\n\t\ttransform:\n\t\t cmd: [ /bin/bash ]\n\t\t stdin:\n\t\t - \"cp /pfs/first/* /pfs/out\"\n\t\tEOF\n\t\tpachctl start commit input@master --project P\n\t\techo foo | pachctl put file input@master:/foo --project P\n\t\techo bar | pachctl put file input@master:/bar --project P\n\t\techo baz | pachctl put file input@master:/baz --project P\n\t\tpachctl finish commit input@master --project P\n\t\tpachctl wait commit second@master\n\t\tpachctl get file second@master:/foo | match foo\n\t\tpachctl get file second@master:/bar | match bar\n\t\tpachctl get file second@master:/baz | match baz\n\t\t`,\n\t).Run())\n}", "func (in *PipelineRunSpec) DeepCopy() *PipelineRunSpec {\n\tif in == nil {\n\t\treturn nil\n\t}\n\tout := new(PipelineRunSpec)\n\tin.DeepCopyInto(out)\n\treturn out\n}", "func New(ruleGetter ChannelRuleGetter) (*Pipeline, error) {\n\tp := &Pipeline{\n\t\truleGetter: ruleGetter,\n\t}\n\n\tif os.Getenv(\"GF_LIVE_PIPELINE_TRACE\") != \"\" {\n\t\t// Traces for development only at the moment.\n\t\t// Start local Jaeger and then run Grafana with GF_LIVE_PIPELINE_TRACE:\n\t\t// docker run --rm -it --name jaeger -e COLLECTOR_ZIPKIN_HOST_PORT=:9411 -p 5775:5775/udp -p 6831:6831/udp -p 6832:6832/udp -p 5778:5778 -p 16686:16686 -p 14268:14268 -p 14250:14250 -p 9411:9411 jaegertracing/all-in-one:1.26\n\t\t// Then visit http://localhost:16686/ where Jaeger UI is served.\n\t\ttp, err := tracerProvider(\"http://localhost:14268/api/traces\")\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\ttracer := tp.Tracer(\"gf.live.pipeline\")\n\t\tp.tracer = tracer\n\t}\n\n\tif os.Getenv(\"GF_LIVE_PIPELINE_DEV\") != \"\" {\n\t\tgo postTestData() // TODO: temporary for development, remove before merge.\n\t}\n\n\treturn p, nil\n}", "func GeneratePVCSpec(quantity resource.Quantity) *corev1.PersistentVolumeClaimSpec {\n\n\tpvcSpec := &corev1.PersistentVolumeClaimSpec{\n\t\tResources: corev1.ResourceRequirements{\n\t\t\tRequests: corev1.ResourceList{\n\t\t\t\tcorev1.ResourceStorage: quantity,\n\t\t\t},\n\t\t},\n\t\tAccessModes: []corev1.PersistentVolumeAccessMode{\n\t\t\tcorev1.ReadWriteOnce,\n\t\t},\n\t}\n\n\treturn pvcSpec\n}", "func (*CreatePipelineRequest) Descriptor() ([]byte, []int) {\n\treturn file_google_genomics_v1alpha2_pipelines_proto_rawDescGZIP(), []int{3}\n}", "func toK8SPodSpec(podSpec *pbpod.PodSpec) *corev1.Pod {\n\t// Create pod template spec and apply configurations to spec.\n\tlabels := make(map[string]string)\n\tfor _, label := range podSpec.GetLabels() {\n\t\tlabels[label.GetKey()] = label.GetValue()\n\t}\n\n\ttermGracePeriod := int64(podSpec.GetKillGracePeriodSeconds())\n\n\tpodTemp := corev1.PodTemplateSpec{\n\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\tLabels: labels,\n\t\t},\n\t\tSpec: corev1.PodSpec{\n\t\t\tContainers: toK8SContainerSpecs(podSpec.GetContainers()),\n\t\t\tInitContainers: toK8SContainerSpecs(podSpec.GetInitContainers()),\n\t\t\tRestartPolicy: \"Never\",\n\t\t\tTerminationGracePeriodSeconds: &termGracePeriod,\n\t\t},\n\t}\n\n\t// Bind node and create pod.\n\treturn &corev1.Pod{\n\t\tObjectMeta: podTemp.ObjectMeta,\n\t\tSpec: podTemp.Spec,\n\t}\n}", "func (*PipelineRunSpec) Descriptor() ([]byte, []int) {\n\treturn file_pipelinerun_proto_rawDescGZIP(), []int{1}\n}", "func NewPathSpec(fs *hugofs.Fs, cfg config.Provider) *PathSpec {\n\n\tps := &PathSpec{\n\t\tfs: fs,\n\t\tdisablePathToLower: cfg.GetBool(\"disablePathToLower\"),\n\t\tremovePathAccents: cfg.GetBool(\"removePathAccents\"),\n\t\tuglyURLs: cfg.GetBool(\"uglyURLs\"),\n\t\tcanonifyURLs: cfg.GetBool(\"canonifyURLs\"),\n\t\tmultilingual: cfg.GetBool(\"multilingual\"),\n\t\tdefaultContentLanguageInSubdir: cfg.GetBool(\"defaultContentLanguageInSubdir\"),\n\t\tdefaultContentLanguage: cfg.GetString(\"defaultContentLanguage\"),\n\t\tpaginatePath: cfg.GetString(\"paginatePath\"),\n\t\tbaseURL: cfg.GetString(\"baseURL\"),\n\t\tthemesDir: cfg.GetString(\"themesDir\"),\n\t\tlayoutDir: cfg.GetString(\"layoutDir\"),\n\t\tworkingDir: cfg.GetString(\"workingDir\"),\n\t\tstaticDir: cfg.GetString(\"staticDir\"),\n\t\ttheme: cfg.GetString(\"theme\"),\n\t}\n\n\tif language, ok := cfg.(*Language); ok {\n\t\tps.language = language\n\t}\n\n\treturn ps\n}", "func MakeSpec(\n\tconn, unique string,\n\tneedsUpdate func(db.Specifier, db.Specifier) bool,\n\tnewDBSpec db.Specifier,\n\tnewDBFunc DBMaker,\n\tnewDBError error,\n\tupdateFunc Updater,\n\tupdateErr error,\n) db.Specifier {\n\treturn &Spec{\n\t\tConn: conn,\n\t\tUnique: unique,\n\t\tUpdateNeeded: needsUpdate,\n\t\tNewDBSpec: newDBSpec,\n\t\tNewDBFunc: newDBFunc,\n\t\tNewDBError: newDBError,\n\t\tUpdateFunc: updateFunc,\n\t\tUpdateErr: updateErr,\n\t}\n}", "func NewConstraintSpec() *ConstraintSpec {\n\treturn &ConstraintSpec{\n\t\tIncludes: make([]ConstraintFunctor, 0),\n\t\tIncludeCombiner: And,\n\t\tExcludes: make([]ConstraintFunctor, 0),\n\t\tExcludeCombiner: Or,\n\t\tLimit: 25,\n\t\tPage: 0,\n\t}\n}", "func TestMalformedPipeline(t *testing.T) {\n\tt.Parallel()\n\tc, _ := minikubetestenv.AcquireCluster(t)\n\n\tpipelineName := tu.UniqueString(\"MalformedPipeline\")\n\n\tvar err error\n\t_, err = c.PpsAPIClient.CreatePipeline(c.Ctx(), &pps.CreatePipelineRequest{})\n\trequire.YesError(t, err)\n\trequire.Matches(t, \"request.Pipeline cannot be nil\", err.Error())\n\n\t_, err = c.PpsAPIClient.CreatePipeline(c.Ctx(), &pps.CreatePipelineRequest{\n\t\tPipeline: client.NewPipeline(pfs.DefaultProjectName, pipelineName)},\n\t)\n\trequire.YesError(t, err)\n\trequire.Matches(t, \"must specify a transform\", err.Error())\n\n\t_, err = c.PpsAPIClient.CreatePipeline(c.Ctx(), &pps.CreatePipelineRequest{\n\t\tPipeline: client.NewPipeline(pfs.DefaultProjectName, pipelineName),\n\t\tTransform: &pps.Transform{},\n\t\tInput: &pps.Input{},\n\t})\n\trequire.YesError(t, err)\n\trequire.Matches(t, \"no input set\", err.Error())\n\n\t_, err = c.PpsAPIClient.CreatePipeline(c.Ctx(), &pps.CreatePipelineRequest{\n\t\tPipeline: client.NewPipeline(pfs.DefaultProjectName, pipelineName),\n\t\tTransform: &pps.Transform{},\n\t\tService: &pps.Service{},\n\t\tParallelismSpec: &pps.ParallelismSpec{},\n\t})\n\trequire.YesError(t, err)\n\trequire.Matches(t, \"services can only be run with a constant parallelism of 1\", err.Error())\n\n\t_, err = c.PpsAPIClient.CreatePipeline(c.Ctx(), &pps.CreatePipelineRequest{\n\t\tPipeline: client.NewPipeline(pfs.DefaultProjectName, pipelineName),\n\t\tTransform: &pps.Transform{},\n\t\tSpecCommit: &pfs.Commit{},\n\t})\n\trequire.YesError(t, err)\n\trequire.Matches(t, \"cannot resolve commit with no repo\", err.Error())\n\n\t_, err = c.PpsAPIClient.CreatePipeline(c.Ctx(), &pps.CreatePipelineRequest{\n\t\tPipeline: client.NewPipeline(pfs.DefaultProjectName, pipelineName),\n\t\tTransform: &pps.Transform{},\n\t\tSpecCommit: &pfs.Commit{Branch: &pfs.Branch{}},\n\t})\n\trequire.YesError(t, err)\n\trequire.Matches(t, \"cannot resolve commit with no repo\", err.Error())\n\n\tdataRepo := tu.UniqueString(\"TestMalformedPipeline_data\")\n\trequire.NoError(t, c.CreateRepo(pfs.DefaultProjectName, dataRepo))\n\n\tdataCommit := client.NewCommit(pfs.DefaultProjectName, dataRepo, \"master\", \"\")\n\trequire.NoError(t, c.PutFile(dataCommit, \"file\", strings.NewReader(\"foo\"), client.WithAppendPutFile()))\n\n\t_, err = c.PpsAPIClient.CreatePipeline(c.Ctx(), &pps.CreatePipelineRequest{\n\t\tPipeline: client.NewPipeline(pfs.DefaultProjectName, pipelineName),\n\t\tTransform: &pps.Transform{},\n\t\tInput: &pps.Input{Pfs: &pps.PFSInput{}},\n\t})\n\trequire.YesError(t, err)\n\trequire.Matches(t, \"input must specify a name\", err.Error())\n\n\t_, err = c.PpsAPIClient.CreatePipeline(c.Ctx(), &pps.CreatePipelineRequest{\n\t\tPipeline: client.NewPipeline(pfs.DefaultProjectName, pipelineName),\n\t\tTransform: &pps.Transform{},\n\t\tInput: &pps.Input{Pfs: &pps.PFSInput{Name: \"data\"}},\n\t})\n\trequire.YesError(t, err)\n\trequire.Matches(t, \"input must specify a repo\", err.Error())\n\n\t_, err = c.PpsAPIClient.CreatePipeline(c.Ctx(), &pps.CreatePipelineRequest{\n\t\tPipeline: client.NewPipeline(pfs.DefaultProjectName, pipelineName),\n\t\tTransform: &pps.Transform{},\n\t\tInput: &pps.Input{Pfs: &pps.PFSInput{Repo: dataRepo}},\n\t})\n\trequire.YesError(t, err)\n\trequire.Matches(t, \"input must specify a glob\", err.Error())\n\n\t_, err = c.PpsAPIClient.CreatePipeline(c.Ctx(), &pps.CreatePipelineRequest{\n\t\tPipeline: client.NewPipeline(pfs.DefaultProjectName, pipelineName),\n\t\tTransform: &pps.Transform{},\n\t\tInput: client.NewPFSInput(pfs.DefaultProjectName, \"out\", \"/*\"),\n\t})\n\trequire.YesError(t, err)\n\trequire.Matches(t, \"input cannot be named out\", err.Error())\n\n\t_, err = c.PpsAPIClient.CreatePipeline(c.Ctx(), &pps.CreatePipelineRequest{\n\t\tPipeline: client.NewPipeline(pfs.DefaultProjectName, pipelineName),\n\t\tTransform: &pps.Transform{},\n\t\tInput: &pps.Input{Pfs: &pps.PFSInput{Name: \"out\", Repo: dataRepo, Glob: \"/*\"}},\n\t})\n\trequire.YesError(t, err)\n\trequire.Matches(t, \"input cannot be named out\", err.Error())\n\n\t_, err = c.PpsAPIClient.CreatePipeline(c.Ctx(), &pps.CreatePipelineRequest{\n\t\tPipeline: client.NewPipeline(pfs.DefaultProjectName, pipelineName),\n\t\tTransform: &pps.Transform{},\n\t\tInput: &pps.Input{Pfs: &pps.PFSInput{Name: \"data\", Repo: \"dne\", Glob: \"/*\"}},\n\t})\n\trequire.YesError(t, err)\n\trequire.Matches(t, \"dne[^ ]* not found\", err.Error())\n\n\t_, err = c.PpsAPIClient.CreatePipeline(c.Ctx(), &pps.CreatePipelineRequest{\n\t\tPipeline: client.NewPipeline(pfs.DefaultProjectName, pipelineName),\n\t\tTransform: &pps.Transform{},\n\t\tInput: client.NewCrossInput(\n\t\t\tclient.NewPFSInput(pfs.DefaultProjectName, \"foo\", \"/*\"),\n\t\t\tclient.NewPFSInput(pfs.DefaultProjectName, \"foo\", \"/*\"),\n\t\t),\n\t})\n\trequire.YesError(t, err)\n\trequire.Matches(t, \"name \\\"foo\\\" was used more than once\", err.Error())\n\n\t_, err = c.PpsAPIClient.CreatePipeline(c.Ctx(), &pps.CreatePipelineRequest{\n\t\tPipeline: client.NewPipeline(pfs.DefaultProjectName, pipelineName),\n\t\tTransform: &pps.Transform{},\n\t\tInput: &pps.Input{Cron: &pps.CronInput{}},\n\t})\n\trequire.YesError(t, err)\n\trequire.Matches(t, \"input must specify a name\", err.Error())\n\n\t_, err = c.PpsAPIClient.CreatePipeline(c.Ctx(), &pps.CreatePipelineRequest{\n\t\tPipeline: client.NewPipeline(pfs.DefaultProjectName, pipelineName),\n\t\tTransform: &pps.Transform{},\n\t\tInput: &pps.Input{Cron: &pps.CronInput{Name: \"cron\"}},\n\t})\n\trequire.YesError(t, err)\n\trequire.Matches(t, \"Empty spec string\", err.Error())\n\n\t_, err = c.PpsAPIClient.CreatePipeline(c.Ctx(), &pps.CreatePipelineRequest{\n\t\tPipeline: client.NewPipeline(pfs.DefaultProjectName, pipelineName),\n\t\tTransform: &pps.Transform{},\n\t\tInput: &pps.Input{Cross: []*pps.Input{}},\n\t})\n\trequire.YesError(t, err)\n\trequire.Matches(t, \"no input set\", err.Error())\n\n\t_, err = c.PpsAPIClient.CreatePipeline(c.Ctx(), &pps.CreatePipelineRequest{\n\t\tPipeline: client.NewPipeline(pfs.DefaultProjectName, pipelineName),\n\t\tTransform: &pps.Transform{},\n\t\tInput: &pps.Input{Union: []*pps.Input{}},\n\t})\n\trequire.YesError(t, err)\n\trequire.Matches(t, \"no input set\", err.Error())\n\n\t_, err = c.PpsAPIClient.CreatePipeline(c.Ctx(), &pps.CreatePipelineRequest{\n\t\tPipeline: client.NewPipeline(pfs.DefaultProjectName, pipelineName),\n\t\tTransform: &pps.Transform{},\n\t\tInput: &pps.Input{Join: []*pps.Input{}},\n\t})\n\trequire.YesError(t, err)\n\trequire.Matches(t, \"no input set\", err.Error())\n}", "func (pm *PipelineManager) newDeployment(pipeline *api.Pipeline) *appsv1.Deployment {\n\tlbls := pipeLabels(pipeline)\n\n\tdeployment := &appsv1.Deployment{\n\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\tName: pipeline.Name,\n\t\t\tNamespace: pipeline.Namespace,\n\t\t\tOwnerReferences: []metav1.OwnerReference{\n\t\t\t\t*metav1.NewControllerRef(pipeline, api.SchemeGroupVersion.WithKind(api.PipelineResourceKind)),\n\t\t\t},\n\t\t\tLabels: lbls,\n\t\t},\n\t\tSpec: appsv1.DeploymentSpec{\n\t\t\tSelector: &metav1.LabelSelector{\n\t\t\t\tMatchLabels: lbls,\n\t\t\t},\n\t\t\tStrategy: appsv1.DeploymentStrategy{\n\t\t\t\tType: appsv1.RecreateDeploymentStrategyType,\n\t\t\t},\n\t\t\tMinReadySeconds: 10,\n\t\t\tTemplate: corev1.PodTemplateSpec{\n\t\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\t\tLabels: lbls,\n\t\t\t\t},\n\t\t\t\tSpec: corev1.PodSpec{\n\t\t\t\t\tRestartPolicy: corev1.RestartPolicyAlways,\n\t\t\t\t\tVolumes: []corev1.Volume{\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tName: \"config\",\n\t\t\t\t\t\t\tVolumeSource: corev1.VolumeSource{\n\t\t\t\t\t\t\t\tConfigMap: &corev1.ConfigMapVolumeSource{\n\t\t\t\t\t\t\t\t\tLocalObjectReference: corev1.LocalObjectReference{Name: pipeline.Name},\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\tContainers: []corev1.Container{\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tName: \"gravity\",\n\t\t\t\t\t\t\tImage: pipeline.Spec.Image,\n\t\t\t\t\t\t\tCommand: pipeline.Spec.Command,\n\t\t\t\t\t\t\tPorts: []corev1.ContainerPort{\n\t\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t\tName: \"http\",\n\t\t\t\t\t\t\t\t\tContainerPort: containerPort,\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\tLivenessProbe: &corev1.Probe{\n\t\t\t\t\t\t\t\tHandler: corev1.Handler{\n\t\t\t\t\t\t\t\t\tHTTPGet: &corev1.HTTPGetAction{\n\t\t\t\t\t\t\t\t\t\tPort: intstr.FromString(\"http\"),\n\t\t\t\t\t\t\t\t\t\tPath: \"/healthz\",\n\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\tInitialDelaySeconds: 10,\n\t\t\t\t\t\t\t\tTimeoutSeconds: 5,\n\t\t\t\t\t\t\t\tPeriodSeconds: 10,\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\tVolumeMounts: []corev1.VolumeMount{\n\t\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t\tName: \"config\",\n\t\t\t\t\t\t\t\t\tMountPath: \"/etc/gravity\",\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\tResources: corev1.ResourceRequirements{ //TODO from tps config or metrics\n\t\t\t\t\t\t\t\tRequests: corev1.ResourceList{\n\t\t\t\t\t\t\t\t\t\"cpu\": resource.MustParse(\"100m\"),\n\t\t\t\t\t\t\t\t\t\"memory\": resource.MustParse(\"150M\"),\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n\n\tif pipeline.Spec.Paused {\n\t\tdeployment.Spec.Replicas = int32Ptr(0)\n\t} else {\n\t\tdeployment.Spec.Replicas = int32Ptr(1)\n\t}\n\treturn deployment\n}", "func (r *PipelineManifestReader) NextCreatePipelineRequest() (*ppsclient.CreatePipelineRequest, error) {\n\tvar result ppsclient.CreatePipelineRequest\n\tif err := jsonpb.UnmarshalNext(r.decoder, &result); err != nil {\n\t\tif err == io.EOF {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn nil, fmt.Errorf(\"malformed pipeline spec: %s\", err)\n\t}\n\treturn &result, nil\n}", "func NewGemSpecCataloger() *common.GenericCataloger {\n\tglobParsers := map[string]common.ParserFn{\n\t\t\"**/specifications/**/*.gemspec\": parseGemSpecEntries,\n\t}\n\n\treturn common.NewGenericCataloger(nil, globParsers, \"ruby-gemspec-cataloger\")\n}", "func (in *PilotSpec) DeepCopy() *PilotSpec {\n\tif in == nil {\n\t\treturn nil\n\t}\n\tout := new(PilotSpec)\n\tin.DeepCopyInto(out)\n\treturn out\n}", "func newContainer(rspec *spec.Spec, lockDir string) (*Container, error) {\n\tif rspec == nil {\n\t\treturn nil, errors.Wrapf(ErrInvalidArg, \"must provide a valid runtime spec to create container\")\n\t}\n\n\tctr := new(Container)\n\tctr.config = new(ContainerConfig)\n\tctr.state = new(containerRuntimeInfo)\n\n\tctr.config.ID = stringid.GenerateNonCryptoID()\n\tctr.config.Name = namesgenerator.GetRandomName(0)\n\n\tctr.config.Spec = new(spec.Spec)\n\tdeepcopier.Copy(rspec).To(ctr.config.Spec)\n\tctr.config.CreatedTime = time.Now()\n\n\t// Path our lock file will reside at\n\tlockPath := filepath.Join(lockDir, ctr.config.ID)\n\t// Grab a lockfile at the given path\n\tlock, err := storage.GetLockfile(lockPath)\n\tif err != nil {\n\t\treturn nil, errors.Wrapf(err, \"error creating lockfile for new container\")\n\t}\n\tctr.lock = lock\n\n\treturn ctr, nil\n}", "func (in *SplitterSpec) DeepCopy() *SplitterSpec {\n\tif in == nil {\n\t\treturn nil\n\t}\n\tout := new(SplitterSpec)\n\tin.DeepCopyInto(out)\n\treturn out\n}", "func PipelineReqFromInfo(pipelineInfo *ppsclient.PipelineInfo) *ppsclient.CreatePipelineRequest {\n\treturn &ppsclient.CreatePipelineRequest{\n\t\tPipeline: pipelineInfo.Pipeline,\n\t\tTransform: pipelineInfo.Transform,\n\t\tParallelismSpec: pipelineInfo.ParallelismSpec,\n\t\tEgress: pipelineInfo.Egress,\n\t\tOutputBranch: pipelineInfo.OutputBranch,\n\t\tScaleDownThreshold: pipelineInfo.ScaleDownThreshold,\n\t\tResourceRequests: pipelineInfo.ResourceRequests,\n\t\tResourceLimits: pipelineInfo.ResourceLimits,\n\t\tInput: pipelineInfo.Input,\n\t\tDescription: pipelineInfo.Description,\n\t\tIncremental: pipelineInfo.Incremental,\n\t\tCacheSize: pipelineInfo.CacheSize,\n\t\tEnableStats: pipelineInfo.EnableStats,\n\t\tBatch: pipelineInfo.Batch,\n\t\tMaxQueueSize: pipelineInfo.MaxQueueSize,\n\t\tService: pipelineInfo.Service,\n\t\tChunkSpec: pipelineInfo.ChunkSpec,\n\t\tDatumTimeout: pipelineInfo.DatumTimeout,\n\t\tJobTimeout: pipelineInfo.JobTimeout,\n\t\tSalt: pipelineInfo.Salt,\n\t}\n}", "func (in *PlexSpec) DeepCopy() *PlexSpec {\n\tif in == nil {\n\t\treturn nil\n\t}\n\tout := new(PlexSpec)\n\tin.DeepCopyInto(out)\n\treturn out\n}", "func NewTimespec(t time.Time) Timespec {\n\treturn Timespec{t.Unix(), int64(t.Nanosecond())}\n}", "func (in *SshPipeSpec) DeepCopy() *SshPipeSpec {\n\tif in == nil {\n\t\treturn nil\n\t}\n\tout := new(SshPipeSpec)\n\tin.DeepCopyInto(out)\n\treturn out\n}", "func makePodSpec(t thanosv1beta1.Receiver) (*corev1.PodSpec, error) {\n\n\tif t.Spec.ReceivePrefix == \"\" {\n\t\tt.Spec.ReceivePrefix = receiverDir\n\t}\n\tif t.Spec.Retention == \"\" {\n\t\tt.Spec.Retention = defaultRetetion\n\t}\n\t// TODO set args to spec\n\tthanosArgs := []string{\n\t\t\"receive\",\n\t\tfmt.Sprintf(\"--tsdb.path=%s\", t.Spec.ReceivePrefix),\n\t\tfmt.Sprintf(\"--tsdb.retention=%s\", t.Spec.Retention),\n\t\tfmt.Sprintf(\"--labels=receive=\\\"%s\\\"\", t.Spec.ReceiveLables),\n\t\tfmt.Sprintf(\"--objstore.config=type: %s\\nconfig:\\n bucket: \\\"%s\\\"\", t.Spec.ObjectStorageType, t.Spec.BucketName),\n\t}\n\tif t.Spec.LogLevel != \"\" && t.Spec.LogLevel != \"info\" {\n\t\tthanosArgs = append(thanosArgs, fmt.Sprintf(\"--log.level=%s\", t.Spec.LogLevel))\n\t}\n\tenv := []corev1.EnvVar{\n\t\t{\n\t\t\tName: \"GOOGLE_APPLICATION_CREDENTIALS\",\n\t\t\tValue: secretsDir + t.Spec.SecretName + \".json\",\n\t\t},\n\t}\n\n\tports := []corev1.ContainerPort{\n\t\t{\n\t\t\tContainerPort: 10902,\n\t\t\tName: \"http\",\n\t\t},\n\t\t{\n\t\t\tContainerPort: 10901,\n\t\t\tName: \"grpc\",\n\t\t},\n\t}\n\n\tif strings.Contains(t.Name, \"receiver\") {\n\t\tports = append(ports, corev1.ContainerPort{\n\t\t\tContainerPort: 19291,\n\t\t\tName: \"receive\",\n\t\t})\n\t}\n\n\t// mount to pod\n\tvolumemounts := []corev1.VolumeMount{\n\t\t{\n\t\t\tName: \"thanos-persistent-storage\",\n\t\t\tMountPath: t.Spec.Retention,\n\t\t},\n\t\t{\n\t\t\tName: \"google-cloud-key\",\n\t\t\tMountPath: secretsDir,\n\t\t},\n\t}\n\n\tcontainers := []corev1.Container{\n\t\t{\n\t\t\tName: \"receiver\",\n\t\t\tImage: *t.Spec.Image,\n\t\t\tArgs: thanosArgs,\n\t\t\tEnv: env,\n\t\t\tPorts: ports,\n\t\t\tVolumeMounts: volumemounts,\n\t\t},\n\t}\n\n\t// Need create json from gcp iam\n\t// https://github.com/orangesys/blueprint/tree/master/prometheus-thanos\n\t// kubectl create secret generic ${SERVICE_ACCOUNT_NAME} --from-file=${SERVICE_ACCOUNT_NAME}.json=${SERVICE_ACCOUNT_NAME}.json\n\t// secret name is thanos-demo-gcs\n\t// TODO setting secret name with spec\n\tvolumes := []corev1.Volume{\n\t\t{\n\t\t\tName: \"google-cloud-key\",\n\t\t\tVolumeSource: corev1.VolumeSource{\n\t\t\t\tSecret: &corev1.SecretVolumeSource{\n\t\t\t\t\tSecretName: t.Spec.SecretName,\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n\n\treturn &corev1.PodSpec{\n\t\tTerminationGracePeriodSeconds: &gracePeriodTerm,\n\t\tContainers: containers,\n\t\tVolumes: volumes,\n\t}, nil\n}", "func (in *PentestingSpec) DeepCopy() *PentestingSpec {\n\tif in == nil {\n\t\treturn nil\n\t}\n\tout := new(PentestingSpec)\n\tin.DeepCopyInto(out)\n\treturn out\n}", "func MustParseV1Pipeline(t *testing.T, yaml string) *v1.Pipeline {\n\tt.Helper()\n\tvar pipeline v1.Pipeline\n\tyaml = `apiVersion: tekton.dev/v1\nkind: Pipeline\n` + yaml\n\tmustParseYAML(t, yaml, &pipeline)\n\treturn &pipeline\n}", "func NewCmdUpgradePipeline() (*cobra.Command, *Options) {\n\to := &Options{}\n\n\tcmd := &cobra.Command{\n\t\tUse: \"pipeline\",\n\t\tAliases: []string{\"pipelines\"},\n\t\tShort: \"Upgrades the pipelines in the source repositories to the latest version stream and pipeline catalog\",\n\t\tRun: func(cmd *cobra.Command, args []string) {\n\t\t\terr := o.Run()\n\t\t\thelper.CheckErr(err)\n\t\t},\n\t}\n\tcmd.Flags().StringVarP(&o.Dir, \"dir\", \"d\", \".\", \"the directory look for the 'jx-requirements.yml` file\")\n\tcmd.Flags().StringVarP(&o.Filter, \"filter\", \"f\", \"\", \"the text filter to filter out repositories to upgrade\")\n\tcmd.Flags().StringVarP(&o.ConfigFile, \"config\", \"c\", \"\", \"the configuration file to load for the repository configurations. If not specified we look in .jx/gitops/source-repositories.yaml\")\n\tcmd.Flags().StringVarP(&o.Strategy, \"strategy\", \"s\", \"resource-merge\", \"the 'kpt' strategy to use. To see available strategies type 'kpt pkg update --help'. Typical values are: resource-merge, fast-forward, alpha-git-patch, force-delete-replace\")\n\n\tcmd.Flags().StringVar(&o.PullRequestTitle, \"pull-request-title\", \"\", \"the PR title\")\n\tcmd.Flags().StringVar(&o.PullRequestBody, \"pull-request-body\", \"\", \"the PR body\")\n\tcmd.Flags().BoolVarP(&o.AutoMerge, \"auto-merge\", \"\", true, \"should we automatically merge if the PR pipeline is green\")\n\tcmd.Flags().BoolVarP(&o.NoConvert, \"no-convert\", \"\", false, \"disables converting from Kptfile based pipelines to the uses:sourceURI notation for reusing pipelines across repositories\")\n\tcmd.Flags().StringVarP(&o.KptBinary, \"bin\", \"\", \"\", \"the 'kpt' binary name to use. If not specified this command will download the jx binary plugin into ~/.jx3/plugins/bin and use that\")\n\n\to.EnvironmentPullRequestOptions.ScmClientFactory.AddFlags(cmd)\n\n\teo := &o.EnvironmentPullRequestOptions\n\tcmd.Flags().StringVarP(&eo.CommitTitle, \"commit-title\", \"\", \"\", \"the commit title\")\n\tcmd.Flags().StringVarP(&eo.CommitMessage, \"commit-message\", \"\", \"\", \"the commit message\")\n\treturn cmd, o\n}", "func (p *Parallel) NewPipeline() *Pipeline {\n\tpipe := NewPipeline()\n\tp.Add(pipe)\n\treturn pipe\n}", "func (g *Gitlab) CreatePipeline(pid, ref string) (*Pipeline, error) {\n\tvar pl Pipeline\n\tdata, err := g.buildAndExecRequest(\n\t\thttp.MethodPost,\n\t\tg.ResourceUrlWithQuery(\n\t\t\tpipelineCreationUrl,\n\t\t\tmap[string]string{\":id\": pid},\n\t\t\tmap[string]string{\"ref\": ref},\n\t\t),\n\t\tnil,\n\t)\n\tif nil != err {\n\t\treturn nil, fmt.Errorf(\"Request create pipeline API error: %v\", err)\n\t}\n\n\tif err := json.Unmarshal(data, &pl); nil != err {\n\t\treturn nil, fmt.Errorf(\"Decode response error: %v\", err)\n\t}\n\n\treturn &pl, nil\n}", "func New() filters.Spec {\n\treturn filter{}\n}", "func createTestSpec(\n\tname string,\n\tproperties ...*PropertyDefinition) TypeDefinition {\n\tspecName := MakeInternalTypeName(pkg, name+SpecSuffix)\n\treturn MakeTypeDefinition(\n\t\tspecName,\n\t\tNewObjectType().WithProperties(properties...))\n}", "func NewPipeWrapper(runDir, expt string) (*PipeWrapper, error) {\n\tpipeFile := filepath.Join(runDir, fmt.Sprintf(\"exporter-%s\", expt))\n\tif err := syscall.Mkfifo(pipeFile, constant.DefaultRootFileMode); err != nil {\n\t\treturn nil, err\n\t}\n\tpipeWraper := PipeWrapper{\n\t\tPipeFile: pipeFile,\n\t}\n\treturn &pipeWraper, nil\n}", "func NewMsgSpec(subjectName string, newMsg func() interface{}) (MsgSpec, error) {\n\tif !SubjectNameRegexp.MatchString(subjectName) {\n\t\treturn nil, fmt.Errorf(\"SubjectName format invalid\")\n\t}\n\n\tif newMsg == nil {\n\t\treturn nil, fmt.Errorf(\"NewMsg is empty\")\n\t}\n\tmsgValue := newMsg()\n\tif msgValue == nil {\n\t\treturn nil, fmt.Errorf(\"NewMsg() returns nil\")\n\t}\n\tmsgType := reflect.TypeOf(msgValue)\n\tif msgType.Kind() != reflect.Ptr {\n\t\treturn nil, fmt.Errorf(\"NewMsg() returns %s which is not a pointer\", msgType.String())\n\t}\n\n\treturn &msgSpec{\n\t\tsubjectName: subjectName,\n\t\tnewMsg: newMsg,\n\t\tmsgType: msgType,\n\t\tmsgValue: msgValue,\n\t}, nil\n}", "func NewPipe(b Buffer) *Pipe {\n\tvar p Pipe\n\tp.b = b\n\tp.c.L = &p.m\n\treturn &p\n}", "func (in *AzurePipelinesPoolSpec) DeepCopy() *AzurePipelinesPoolSpec {\n\tif in == nil {\n\t\treturn nil\n\t}\n\tout := new(AzurePipelinesPoolSpec)\n\tin.DeepCopyInto(out)\n\treturn out\n}", "func New(stages ...StageRunner) *Pipeline {\n\treturn &Pipeline{\n\t\tstages: stages,\n\t}\n}", "func New(opt ...Option) (p *Pipeline, err error) {\n\toptions := Options{}\n\tfor _, v := range opt {\n\t\tv(&options)\n\t}\n\tp = &Pipeline{\n\t\tOptions: options,\n\t\trunMutex: sync.Mutex{},\n\t\tstatus: STATUS_STOP,\n\t}\n\terr = p.init()\n\treturn\n}", "func MustParseV1beta1Pipeline(t *testing.T, yaml string) *v1beta1.Pipeline {\n\tt.Helper()\n\tvar pipeline v1beta1.Pipeline\n\tyaml = `apiVersion: tekton.dev/v1beta1\nkind: Pipeline\n` + yaml\n\tmustParseYAML(t, yaml, &pipeline)\n\treturn &pipeline\n}", "func NewPipelineDefinition(path string, env *PipelineEnvironment) (*PipelineDefinition, error) {\n\tdir, err := os.Getwd()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefaultPath := filepath.Join(dir, GantryDef)\n\tif _, err := os.Stat(defaultPath); path == \"\" && err == nil {\n\t\tpath = defaultPath\n\t}\n\tdefaultPath = filepath.Join(dir, DockerCompose)\n\tif _, err := os.Stat(defaultPath); path == \"\" && err == nil {\n\t\tpath = defaultPath\n\t}\n\tfile, err := os.Open(path)\n\tif err != nil {\n\t\tpipelineLogger.Println(\"Could not open pipeline definition.\")\n\t\treturn nil, err\n\t}\n\tdefer file.Close()\n\n\tdata, err := ioutil.ReadAll(file)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\t// Apply environment to yaml\n\tpreproc, err := preprocessor.NewPreprocessor()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdata, err = preproc.Process(data, env)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\td := &PipelineDefinition{}\n\tif err := yaml.Unmarshal(data, d); err != nil {\n\t\treturn d, err\n\t}\n\tif err := d.checkVersion(); err != nil {\n\t\treturn d, err\n\t}\n\t// Update with specific meta if defined\n\tfor name, meta := range env.Steps {\n\t\ts, ok := d.Steps[name]\n\t\tif ok {\n\t\t\tmeta.Type = s.Meta.Type\n\t\t\ts.Meta = meta\n\t\t\tif meta.Type == ServiceTypeStep {\n\t\t\t\ts.Meta.KeepAlive = KeepAliveNo\n\t\t\t}\n\t\t\td.Steps[name] = s\n\t\t} else {\n\t\t\tif meta.Selected {\n\t\t\t\treturn d, fmt.Errorf(\"no such service or step: %s\", name)\n\t\t\t}\n\t\t\tif !meta.Ignore {\n\t\t\t\tlog.Printf(\"ignoring unknown step '%s'\", name)\n\t\t\t}\n\t\t}\n\t}\n\t// Open output files for container logs\n\tfor n, step := range d.Steps {\n\t\tif err = step.Meta.Open(); err != nil {\n\t\t\tpipelineLogger.Printf(\"Error creating log output of %s: %s\", step.ColoredName(), err)\n\t\t}\n\t\td.Steps[n] = step\n\t}\n\treturn d, nil\n}", "func TestPipelineResourceRequest(t *testing.T) {\n\tif testing.Short() {\n\t\tt.Skip(\"Skipping integration tests in short mode\")\n\t}\n\n\tt.Parallel()\n\tc, ns := minikubetestenv.AcquireCluster(t)\n\t// create repos\n\tdataRepo := tu.UniqueString(\"repo\")\n\tpipelineName := tu.UniqueString(\"pipeline\")\n\trequire.NoError(t, c.CreateRepo(pfs.DefaultProjectName, dataRepo))\n\t// Resources are not yet in client.CreatePipeline() (we may add them later)\n\t_, err := c.PpsAPIClient.CreatePipeline(\n\t\tcontext.Background(),\n\t\t&pps.CreatePipelineRequest{\n\t\t\tPipeline: client.NewPipeline(pfs.DefaultProjectName, pipelineName),\n\t\t\tTransform: &pps.Transform{\n\t\t\t\tCmd: []string{\"cp\", path.Join(\"/pfs\", dataRepo, \"file\"), \"/pfs/out/file\"},\n\t\t\t},\n\t\t\tParallelismSpec: &pps.ParallelismSpec{\n\t\t\t\tConstant: 1,\n\t\t\t},\n\t\t\tResourceRequests: &pps.ResourceSpec{\n\t\t\t\tMemory: \"100M\",\n\t\t\t\tCpu: 0.5,\n\t\t\t\tDisk: \"10M\",\n\t\t\t},\n\t\t\tInput: &pps.Input{\n\t\t\t\tPfs: &pps.PFSInput{\n\t\t\t\t\tRepo: dataRepo,\n\t\t\t\t\tBranch: \"master\",\n\t\t\t\t\tGlob: \"/*\",\n\t\t\t\t},\n\t\t\t},\n\t\t})\n\trequire.NoError(t, err)\n\n\t// Get info about the pipeline pods from k8s & check for resources\n\tpipelineInfo, err := c.InspectPipeline(pfs.DefaultProjectName, pipelineName, false)\n\trequire.NoError(t, err)\n\n\tvar container v1.Container\n\tkubeClient := tu.GetKubeClient(t)\n\trequire.NoError(t, backoff.Retry(func() error {\n\t\tpodList, err := kubeClient.CoreV1().Pods(ns).List(\n\t\t\tcontext.Background(),\n\t\t\tmetav1.ListOptions{\n\t\t\t\tLabelSelector: metav1.FormatLabelSelector(metav1.SetAsLabelSelector(\n\t\t\t\t\tmap[string]string{\n\t\t\t\t\t\t\"app\": \"pipeline\",\n\t\t\t\t\t\t\"pipelineName\": pipelineInfo.Pipeline.Name,\n\t\t\t\t\t\t\"pipelineVersion\": fmt.Sprint(pipelineInfo.Version),\n\t\t\t\t\t},\n\t\t\t\t)),\n\t\t\t})\n\t\tif err != nil {\n\t\t\treturn errors.EnsureStack(err) // retry\n\t\t}\n\t\tif len(podList.Items) != 1 || len(podList.Items[0].Spec.Containers) == 0 {\n\t\t\treturn errors.Errorf(\"could not find single container for pipeline %s\", pipelineInfo.Pipeline.Name)\n\t\t}\n\t\tcontainer = podList.Items[0].Spec.Containers[0]\n\t\treturn nil // no more retries\n\t}, backoff.NewTestingBackOff()))\n\t// Make sure a CPU and Memory request are both set\n\tcpu, ok := container.Resources.Requests[v1.ResourceCPU]\n\trequire.True(t, ok)\n\trequire.Equal(t, \"500m\", cpu.String())\n\tmem, ok := container.Resources.Requests[v1.ResourceMemory]\n\trequire.True(t, ok)\n\trequire.Equal(t, \"100M\", mem.String())\n\tdisk, ok := container.Resources.Requests[v1.ResourceEphemeralStorage]\n\trequire.True(t, ok)\n\trequire.Equal(t, \"10M\", disk.String())\n}", "func newComponent() componentsV1alpha1.Component {\n\tvar comp componentsV1alpha1.Component\n\tcomp.Spec = componentsV1alpha1.ComponentSpec{}\n\treturn comp\n}", "func (c *CanaryDeployer) IsNewSpec(cd *flaggerv1.Canary) (bool, error) {\n\ttargetName := cd.Spec.TargetRef.Name\n\tcanary, err := c.kubeClient.AppsV1().Deployments(cd.Namespace).Get(targetName, metav1.GetOptions{})\n\tif err != nil {\n\t\tif errors.IsNotFound(err) {\n\t\t\treturn false, fmt.Errorf(\"deployment %s.%s not found\", targetName, cd.Namespace)\n\t\t}\n\t\treturn false, fmt.Errorf(\"deployment %s.%s query error %v\", targetName, cd.Namespace, err)\n\t}\n\n\tif cd.Status.LastAppliedSpec == \"\" {\n\t\treturn true, nil\n\t}\n\n\tnewSpec := &canary.Spec.Template.Spec\n\toldSpecJson, err := base64.StdEncoding.DecodeString(cd.Status.LastAppliedSpec)\n\tif err != nil {\n\t\treturn false, fmt.Errorf(\"%s.%s decode error %v\", cd.Name, cd.Namespace, err)\n\t}\n\toldSpec := &corev1.PodSpec{}\n\terr = json.Unmarshal(oldSpecJson, oldSpec)\n\tif err != nil {\n\t\treturn false, fmt.Errorf(\"%s.%s unmarshal error %v\", cd.Name, cd.Namespace, err)\n\t}\n\n\tif diff := cmp.Diff(*newSpec, *oldSpec, cmpopts.IgnoreUnexported(resource.Quantity{})); diff != \"\" {\n\t\t//fmt.Println(diff)\n\t\treturn true, nil\n\t}\n\n\treturn false, nil\n}", "func NewFTSpec(account string, name string, org string) *FTSpec {\n\treturn &FTSpec{\n\t\tApplicationSpec{\n\t\t\tOrg: org,\n\t\t\tName: name,\n\t\t\tAccount: account,\n\t\t\tCoinfigFileName: FTConfigFileName,\n\t\t},\n\t}\n}", "func (in *PredictorSpec) DeepCopy() *PredictorSpec {\n\tif in == nil {\n\t\treturn nil\n\t}\n\tout := new(PredictorSpec)\n\tin.DeepCopyInto(out)\n\treturn out\n}", "func NewPipeline() Pipeline {\n\n\tp := &pipeline{}\n\tp.head = newHandlerContext(p, headHandler{}, nil, nil)\n\tp.tail = newHandlerContext(p, tailHandler{}, nil, nil)\n\n\tp.head.next = p.tail\n\tp.tail.prev = p.head\n\n\t// head + tail\n\tp.size = 2\n\treturn p\n}", "func newPrometheusSpec(name, addr string) cap.SupervisorSpec {\n\treturn cap.NewSupervisorSpec(\n\t\tname,\n\t\t// this function builds an HTTP Server, this functionality requires more\n\t\t// than a goroutine given the only way to stop a http server is to call the\n\t\t// http.Shutdown function on a seperate goroutine\n\t\tfunc() ([]cap.Node, cap.CleanupResourcesFn, error) {\n\t\t\tserver := buildPrometheusHTTPServer(addr)\n\n\t\t\t// CAUTION: The order here matters, we need waitUntilDone to start last so\n\t\t\t// that it can terminate first, if this is not the case the\n\t\t\t// listenAndServeHTTPWorker child will never terminate.\n\t\t\t//\n\t\t\t// DISCLAIMER: The caution above _is not_ a capataz requirement, but a\n\t\t\t// requirement of net/https' API\n\t\t\tnodes := []cap.Node{\n\t\t\t\tlistenAndServeHTTPWorker(server),\n\t\t\t\twaitUntilDoneHTTPWorker(server),\n\t\t\t}\n\n\t\t\tcleanupServer := func() error {\n\t\t\t\treturn server.Close()\n\t\t\t}\n\n\t\t\treturn nodes, cleanupServer, nil\n\t\t},\n\t)\n}", "func New(spec data.Spec, dp *data.Points, width, height int) chart.Chart {\n\tseries := []chart.Series{}\n\tmarkers := []chart.GridLine{}\n\tfor _, f := range spec.Fields {\n\t\tvals := dp.Get(f.ID)\n\t\tif f.IsMarker {\n\t\t\tfor i, v := range vals {\n\t\t\t\tif v > 0 {\n\t\t\t\t\tmarkers = append(markers, chart.GridLine{Value: float64(i)})\n\t\t\t\t}\n\t\t\t}\n\t\t\tcontinue\n\t\t}\n\t\tseries = append(series, chart.ContinuousSeries{\n\t\t\tName: fmt.Sprintf(\"%s: %s\", f.Name, siValueFormater(vals[len(vals)-1])),\n\t\t\tYValues: vals,\n\t\t})\n\t}\n\treturn newChart(series, markers, width, height)\n}", "func (in *SupplyChainSpec) DeepCopy() *SupplyChainSpec {\n\tif in == nil {\n\t\treturn nil\n\t}\n\tout := new(SupplyChainSpec)\n\tin.DeepCopyInto(out)\n\treturn out\n}" ]
[ "0.82939947", "0.7002424", "0.7002424", "0.7002424", "0.6526051", "0.636503", "0.6136273", "0.611253", "0.5635833", "0.56107116", "0.553662", "0.53617257", "0.5338917", "0.531994", "0.5294855", "0.527345", "0.5272532", "0.5264014", "0.52633154", "0.5214412", "0.52054584", "0.51972497", "0.5184368", "0.51786864", "0.5174637", "0.51712924", "0.5147109", "0.5119813", "0.5114196", "0.5104715", "0.51016015", "0.5100341", "0.50572854", "0.50492096", "0.50436175", "0.50380194", "0.5028853", "0.50277716", "0.50201595", "0.50133216", "0.50082093", "0.500004", "0.4995232", "0.49897656", "0.4989556", "0.49804497", "0.49787182", "0.49731857", "0.49731857", "0.49708605", "0.49648988", "0.49591064", "0.49456698", "0.49413204", "0.49044636", "0.4883914", "0.4881523", "0.48748103", "0.4851663", "0.48451763", "0.4843472", "0.48348278", "0.48164693", "0.48139948", "0.4809112", "0.47959122", "0.47945872", "0.47879726", "0.4779457", "0.4772973", "0.47675455", "0.4762111", "0.47555143", "0.4754201", "0.4752841", "0.47518638", "0.47504112", "0.4733188", "0.4717592", "0.47112954", "0.4700328", "0.46945298", "0.46851015", "0.46818763", "0.46597198", "0.4655002", "0.46492627", "0.4626036", "0.46243498", "0.46188697", "0.4618847", "0.46086454", "0.46079907", "0.4602916", "0.45986003", "0.4598111", "0.45932254", "0.45816764", "0.4572973", "0.45695308" ]
0.8225763
1
NewKeeperSpec generates a new KeeperSpec from a job.KeeperSpec
func NewKeeperSpec(spec *job.KeeperSpec) *KeeperSpec { return &KeeperSpec{ ContractAddress: spec.ContractAddress, FromAddress: spec.FromAddress, CreatedAt: spec.CreatedAt, UpdatedAt: spec.UpdatedAt, EVMChainID: spec.EVMChainID, } }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func NewKeeperSpec(spec *job.KeeperSpec) *KeeperSpec {\n\treturn &KeeperSpec{\n\t\tContractAddress: spec.ContractAddress,\n\t\tFromAddress: spec.FromAddress,\n\t\tCreatedAt: spec.CreatedAt,\n\t\tUpdatedAt: spec.UpdatedAt,\n\t}\n}", "func NewKeeper(cdc *codec.Codec, key sdk.StoreKey, pk ProtocolKeeper, sk StakingKeeper, ck BankKeeper,\n\tparamSpace params.Subspace) Keeper {\n\treturn Keeper{\n\t\tkey,\n\t\tcdc,\n\t\tpk,\n\t\tsk,\n\t\tck,\n\t\tparamSpace.WithKeyTable(types.ParamKeyTable()),\n\t}\n}", "func NewKeeper(cdc *codec.Codec, ak types.AccountKeeper, key sdk.StoreKey, paramstore types.ParamSubspace) Keeper {\n\tkeeper := Keeper{\n\t\tstoreKey: key,\n\t\tcdc: cdc,\n\t\tak: ak,\n\t\tparamstore: paramstore.WithKeyTable(types.ParamKeyTable()),\n\t}\n\treturn keeper\n}", "func NewKeeper(cdc *wire.Codec, key sdk.StoreKey, vs sdk.ValidatorSet, params params.Getter, codespace sdk.CodespaceType) Keeper {\n\tkeeper := Keeper{\n\t\tstoreKey: key,\n\t\tcdc: cdc,\n\t\tvalidatorSet: vs,\n\t\tparams: params,\n\t\tcodespace: codespace,\n\t}\n\treturn keeper\n}", "func NewKeeper(cdc *wire.Codec, paramsKeeper params.Keeper) Keeper {\n\treturn Keeper{\n\t\tcdc: cdc,\n\t\tparamsKeeper: paramsKeeper,\n\t}\n}", "func NewKeeper(ok oracle.Keeper, mk mint.Keeper, paramspace params.Subspace) Keeper {\n\treturn Keeper{\n\t\tok: ok,\n\t\tmk: mk,\n\t\tparamSpace: paramspace.WithKeyTable(paramKeyTable()),\n\t}\n}", "func NewKeeper(coinKeeper bank.Keeper, cdc *codec.Codec, key sdk.StoreKey, paramspace types.ParamSubspace) Keeper {\n\tkeeper := Keeper{\n\t\tCoinKeeper: coinKeeper,\n\t\tstoreKey: key,\n\t\tcdc: cdc,\n\t\tparamspace: paramspace.WithKeyTable(types.ParamKeyTable()),\n\t}\n\treturn keeper\n}", "func NewKeeper(cdc *codec.Codec, key sdk.StoreKey, subspace sdk.Subspace, maccPerms map[string][]string) Keeper {\n\t// set the addresses\n\tpermAddrs := make(map[string]types.PermissionsForAddress)\n\tfor name, perms := range maccPerms {\n\t\tpermAddrs[name] = types.NewPermissionsForAddress(name, perms)\n\t}\n\n\treturn Keeper{\n\t\tcdc: cdc,\n\t\tstoreKey: key,\n\t\tsubspace: subspace.WithKeyTable(types.ParamKeyTable()),\n\t\tpermAddrs: permAddrs,\n\t}\n}", "func NewKeeper(\n\tcdc *codec.Codec, key sdk.StoreKey,\n\tparamSpace params.Subspace,\n\tbk types.BankKeeperAccountID,\n\tsk types.StakingKeeperAccountID,\n\tsupplyKeeper types.SupplyKeeperAccountID,\n\taccKeeper account.Keeper,\n\tfeeCollectorName string, blacklistedAddrs map[string]bool,\n) Keeper {\n\t// set KeyTable if it has not already been set\n\tif !paramSpace.HasKeyTable() {\n\t\tparamSpace = paramSpace.WithKeyTable(types.ParamKeyTable())\n\t}\n\n\treturn Keeper{\n\t\tstoreKey: key,\n\t\tcdc: cdc,\n\t\tparamSpace: paramSpace,\n\t\tBankKeeper: bk,\n\t\tstakingKeeper: sk,\n\t\tsupplyKeeper: supplyKeeper,\n\t\tAccKeeper: accKeeper,\n\t\tfeeCollectorName: feeCollectorName,\n\t\tblacklistedAddrs: blacklistedAddrs,\n\t\tstartNotDistriTimePoint: time.Time{},\n\t}\n}", "func NewKeeper(cdc *codec.Codec,\n\tkey sdk.StoreKey,\n\tparamspace types.ParamSubspace,\n\tbankKeeper types.BankKeeper,\n\treferralKeeper types.ReferralKeeper,\n\tscheduleKeeper types.ScheduleKeeper,\n\tvpnKeeper types.VPNKeeper,\n\tstorageKeeper types.StorageKeeper,\n\tsupplyKeeper types.SupplyKeeper,\n\tprofileKeeper types.ProfileKeeper,\n) Keeper {\n\tkeeper := Keeper{\n\t\tstoreKey: key,\n\t\tcdc: cdc,\n\t\tparamspace: paramspace.WithKeyTable(types.ParamKeyTable()),\n\t\tbankKeeper: bankKeeper,\n\t\tReferralKeeper: referralKeeper,\n\t\tscheduleKeeper: scheduleKeeper,\n\t\tvpnKeeper: vpnKeeper,\n\t\tstorageKeeper: storageKeeper,\n\t\tsupplyKeeper: supplyKeeper,\n\t\tprofileKeeper: profileKeeper,\n\t}\n\treturn keeper\n}", "func NewKeeper(\n\tcdc *codec.Codec, key sdk.StoreKey, ck internal.CUKeeper, tk internal.TokenKeeper, proto func() exported.CUIBCAsset) Keeper {\n\treturn Keeper{\n\t\tkey: key,\n\t\tcdc: cdc,\n\t\tproto: proto,\n\t\tck: ck,\n\t\ttk: tk,\n\t}\n}", "func NewKeeper(cdc *codec.Codec, key sdk.StoreKey, paramstore subspace.Subspace, pfk types.PricefeedKeeper,\n\tak types.AuctionKeeper, sk types.SupplyKeeper, ack types.AccountKeeper, maccs map[string][]string) Keeper {\n\tif !paramstore.HasKeyTable() {\n\t\tparamstore = paramstore.WithKeyTable(types.ParamKeyTable())\n\t}\n\n\treturn Keeper{\n\t\tkey: key,\n\t\tcdc: cdc,\n\t\tparamSubspace: paramstore,\n\t\tpricefeedKeeper: pfk,\n\t\tauctionKeeper: ak,\n\t\tsupplyKeeper: sk,\n\t\taccountKeeper: ack,\n\t\tmaccPerms: maccs,\n\t}\n}", "func NewKeeper(cdc codec.Marshaler, key sdk.StoreKey, channelKeeper transfertypes.ChannelKeeper, scopedKeeper capability.ScopedKeeper, portKeeper types.PortKeeper) Keeper {\n\treturn Keeper{\n\t\tstoreKey: key,\n\t\tcdc: cdc,\n\t\tChannelKeeper: channelKeeper,\n\t\tScopedKeeper: scopedKeeper,\n\t\tPortKeeper: portKeeper,\n\t}\n}", "func NewKeeper(cdc *wire.Codec, key sdk.StoreKey, ck bank.Keeper) Keeper {\n\tkeeper := Keeper{\n\t\tstoreKey: key,\n\t\tcdc: cdc,\n\t\tcoinKeeper: ck,\n\t\t//codespace: codespace,\n\t}\n\treturn keeper\n}", "func NewKeeper(\n\tcdc codec.BinaryMarshaler,\n\tstoreKey sdk.StoreKey,\n\tchannelKeeper types.ChannelKeeper,\n\tscopedKeeper capabilitykeeper.ScopedKeeper,\n) Keeper {\n\treturn Keeper{\n\t\tcdc: cdc,\n\t\tstoreKey: storeKey,\n\t\tchannelKeeper: channelKeeper,\n\t\tscopedKeeper: scopedKeeper,\n\t}\n}", "func NewKeeper(am sdk.AccountMapper) Keeper {\n\treturn Keeper{am: am}\n}", "func NewKeeper(codec *codec.Codec, storeKey sdk.StoreKey,\r\n\taccountKeeper AccountKeeper, bankKeeper BankKeeper, invoiceKeeper InvoiceKeeper, loanKeeper LoanKeeper, supplyKeeper supply.Keeper,\r\n\tparamStore params.Subspace,\r\n\tcodespace sdk.CodespaceType) Keeper {\r\n\treturn Keeper{\r\n\t\tstoreKey: storeKey,\r\n\t\tcodec: codec,\r\n\t\tparamStore: paramStore.WithKeyTable(ParamKeyTable()),\r\n\t\tcodespace: codespace,\r\n\t\tbankKeeper: bankKeeper,\r\n\t\taccountKeeper: accountKeeper,\r\n\t\tinvoiceKeeper: invoiceKeeper,\r\n\t\tloanKeeper: loanKeeper,\r\n\t\tsupplyKeeper: supplyKeeper,\r\n\t}\r\n}", "func NewKeeper(\n\tcdc codec.BinaryMarshaler, storeKey sdk.StoreKey,\n\tparamSpace paramstypes.Subspace, rk RelationshipsKeeper, sk SubspacesKeeper,\n) Keeper {\n\tif !paramSpace.HasKeyTable() {\n\t\tparamSpace = paramSpace.WithKeyTable(types.ParamKeyTable())\n\t}\n\n\treturn Keeper{\n\t\tstoreKey: storeKey,\n\t\tcdc: cdc,\n\t\tparamSubspace: paramSpace,\n\t\trk: rk,\n\t\tsk: sk,\n\t}\n}", "func NewKeeper(\n\tcdc *codec.Codec,\n\tstoreKey sdk.StoreKey,\n\tchannelKeeper types.ChannelKeeper, portKeeper types.PortKeeper,\n\tscopedKeeper capability.ScopedKeeper,\n) Keeper {\n\treturn Keeper{\n\t\tcdc: cdc,\n\t\tstoreKey: storeKey,\n\t\tchannelKeeper: channelKeeper,\n\t\tportKeeper: portKeeper,\n\t\tscopedKeeper: scopedKeeper,\n\t}\n}", "func NewKeeper(cdc *codec.Codec, key sdk.StoreKey, bankKeeper types.BankKeeper) Keeper {\n\tkeeper := Keeper{\n\t\tstoreKey: key,\n\t\tcdc: cdc,\n\t\tbankKeeper: bankKeeper,\n\t}\n\treturn keeper\n}", "func NewKeeper(\n\tcdc codec.BinaryMarshaler,\n\tstoreKey sdk.StoreKey,\n\tparamSpace paramtypes.Subspace,\n\tak authkeeper.AccountKeeper,\n\tbk types.BankKeeper,\n) BaseKeeper {\n\n\t// set KeyTable if it has not already been set\n\tif !paramSpace.HasKeyTable() {\n\t\tparamSpace = paramSpace.WithKeyTable(types.ParamKeyTable())\n\t}\n\n\treturn BaseKeeper{\n\t\tsvcTags: metrics.Tags{\n\t\t\t\"svc\": \"oracle_k\",\n\t\t},\n\t\tparamSpace: paramSpace,\n\n\t\tstoreKey: storeKey,\n\t\tcdc: cdc,\n\t\taccountKeeper: ak,\n\t\tbankKeeper: bk,\n\t\tlogger: log.WithField(\"module\", types.ModuleName),\n\t}\n}", "func NewKeeper(paramSpace params.Subspace,\n\tdistrKeeper DistrKeeper, bankKeeper BankKeeper,\n\tfeeCollectionKeeper FeeCollectionKeeper) Keeper {\n\n\treturn Keeper{\n\t\troutes: []InvarRoute{},\n\t\tparamSpace: paramSpace.WithKeyTable(ParamKeyTable()),\n\t\tdistrKeeper: distrKeeper,\n\t\tbankKeeper: bankKeeper,\n\t\tfeeCollectionKeeper: feeCollectionKeeper,\n\t}\n}", "func NewKeeper(cdc *codec.Codec, storeKey sdk.StoreKey, bankKeeper bank.Keeper) Keeper {\n\treturn Keeper{\n\t\tcdc: cdc,\n\t\tstoreKey: storeKey,\n\t\tBankKeeper: bankKeeper,\n\t}\n}", "func NewKeeper(\n\tcdc codec.Codec, key sdk.StoreKey,\n\tchannelKeeper types.ChannelKeeper, portKeeper types.PortKeeper,\n\tbankKeeper bankkeeper.Keeper,\n\tscopedKeeper capabilitykeeper.ScopedKeeper,\n\tpushAction vm.ActionPusher,\n) Keeper {\n\n\treturn Keeper{\n\t\tstoreKey: key,\n\t\tcdc: cdc,\n\t\tbankKeeper: bankKeeper,\n\t\tchannelKeeper: channelKeeper,\n\t\tportKeeper: portKeeper,\n\t\tscopedKeeper: scopedKeeper,\n\t\tPushAction: pushAction,\n\t}\n}", "func NewKeeper(cdc codec.Marshaler, key sdk.StoreKey, wasmKeeper *wasm.Keeper, aiRequestSubspace params.Subspace, stakingKeeper staking.Keeper, bankKeeper bank.Keeper, providerKeeper *provider.Keeper) *Keeper {\n\tif !aiRequestSubspace.HasKeyTable() {\n\t\t// register parameters of the airequest module into the param space\n\t\taiRequestSubspace = aiRequestSubspace.WithKeyTable(types.ParamKeyTable())\n\t}\n\treturn &Keeper{\n\t\tstoreKey: key,\n\t\tcdc: cdc,\n\t\twasmKeeper: wasmKeeper,\n\t\tparamSpace: aiRequestSubspace,\n\t\tstakingKeeper: stakingKeeper,\n\t\tbankKeeper: bankKeeper,\n\t\tproviderKeeper: providerKeeper,\n\t}\n}", "func NewKeeper(coinKeeper bank.Keeper, cdc *codec.Codec, key sdk.StoreKey) Keeper {\n\tkeeper := Keeper{\n\t\tCoinKeeper: coinKeeper,\n\t\tstoreKey: key,\n\t\tcdc: cdc,\n\t}\n\treturn keeper\n}", "func NewKeeper(storeKey sdk.StoreKey, storeCiphertextShares *sdk.KVStoreKey, storeDecryptionShares *sdk.KVStoreKey, cdc *codec.Codec) Keeper {\n\trandmetric := PrometheusMetrics()\n\tt := time.Now().UTC()\n\treturn Keeper{\n\t\tstoreKey: storeKey,\n\t\tgroup: P256,\n\t\tstoreCiphertextSharesKey: storeCiphertextShares,\n\t\tstoreDecryptionSharesKey: storeDecryptionShares,\n\t\tcdc: cdc,\n\t\trandmetric: randmetric,\n\t\tresTime: t,\n\t}\n}", "func NewKeeper(cdc codec.BinaryMarshaler, storeKey sdk.StoreKey, pk postskeeper.Keeper) Keeper {\n\treturn Keeper{\n\t\tcdc: cdc,\n\t\tstoreKey: storeKey,\n\t\tpostKeeper: pk,\n\t}\n}", "func NewKeeper(cdc codec.Marshaler, storeKey, memKey sdk.StoreKey, stakingKeeper types.StakingKeeper, paramspace types.ParamSubspace) *Keeper {\n\n\t// set KeyTable if it has not already been set\n\tif !paramspace.HasKeyTable() {\n\t\tparamspace = paramspace.WithKeyTable(types.ParamKeyTable())\n\t}\n\n\treturn &Keeper{\n\t\tcdc: cdc,\n\t\tstoreKey: storeKey,\n\t\tmemKey: memKey,\n\t\tStakingKeeper: stakingKeeper,\n\t\tparamspace: paramspace,\n\t}\n}", "func NewKeeper(\n\tcdc codec.BinaryMarshaler,\n\tdataKey sdk.StoreKey,\n\tindexKey sdk.StoreKey,\n\treferralKeeper types.ReferralKeeper,\n\taccountKeeper types.AccountKeeper,\n\tbankKeeper types.BankKeeper,\n\tparamspace types.ParamSubspace,\n\tfeeCollectorName string,\n) Keeper {\n\tkeeper := Keeper{\n\t\tdataStoreKey: dataKey,\n\t\tindexStoreKey: indexKey,\n\t\tcdc: cdc,\n\t\treferralKeeper: referralKeeper,\n\t\taccountKeeper: accountKeeper,\n\t\tbankKeeper: bankKeeper,\n\t\tparamspace: paramspace.WithKeyTable(types.ParamKeyTable()),\n\t\tfeeCollectorName: feeCollectorName,\n\t}\n\treturn keeper\n}", "func (in *ZookeeperSpec) DeepCopy() *ZookeeperSpec {\n\tif in == nil {\n\t\treturn nil\n\t}\n\tout := new(ZookeeperSpec)\n\tin.DeepCopyInto(out)\n\treturn out\n}", "func NewKeeper(storeKey sdk.StoreKey, cdc codec.BinaryCodec) Keeper {\n\treturn Keeper{\n\t\tstoreKey: storeKey,\n\t\tcdc: cdc,\n\t\tupgradeHandlers: map[string]types.UpgradeHandler{},\n\t}\n}", "func NewKeeper(coinKeeper bank.Keeper, storeKey sdk.StoreKey, cdc *codec.Codec) Keeper {\n\treturn Keeper{\n\t\tcoinKeeper: coinKeeper,\n\t\tstoreKey: storeKey,\n\t\tcdc: cdc,\n\t}\n}", "func NewKeeper(coinKeeper bank.Keeper, storeKey sdk.StoreKey, cdc *codec.Codec) Keeper {\n\treturn Keeper{\n\t\tcoinKeeper: coinKeeper,\n\t\tstoreKey: storeKey,\n\t\tcdc: cdc,\n\t}\n}", "func NewKeeper(storeKey sdk.StoreKey, cdc codec.BinaryCodec) Keeper {\n\treturn Keeper{cdc: cdc, storeKey: storeKey}\n}", "func (k *Keeper) createLegacyKeeperJob(client cmd.HTTPClient, registryAddr, nodeAddr string) error {\n\trequest, err := json.Marshal(web.CreateJobRequest{\n\t\tTOML: testspecs.GenerateKeeperSpec(testspecs.KeeperSpecParams{\n\t\t\tName: fmt.Sprintf(\"keeper job - registry %s\", registryAddr),\n\t\t\tContractAddress: registryAddr,\n\t\t\tFromAddress: nodeAddr,\n\t\t\tEvmChainID: int(k.cfg.ChainID),\n\t\t}).Toml(),\n\t})\n\tif err != nil {\n\t\treturn fmt.Errorf(\"failed to marshal request: %s\", err)\n\t}\n\n\tresp, err := client.Post(\"/v2/jobs\", bytes.NewReader(request))\n\tif err != nil {\n\t\treturn fmt.Errorf(\"failed to create keeper job: %s\", err)\n\t}\n\tdefer resp.Body.Close()\n\n\tif resp.StatusCode >= 400 {\n\t\tbody, err := io.ReadAll(resp.Body)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"failed to read error response body: %s\", err)\n\t\t}\n\n\t\treturn fmt.Errorf(\"unable to create keeper job: '%v' [%d]\", string(body), resp.StatusCode)\n\t}\n\n\treturn nil\n}", "func NewKeeper(coinKeeper bank.Keeper, storeKey sdk.StoreKey, cdc *codec.Codec, codespace sdk.CodespaceType) Keeper {\n\treturn Keeper{\n\t\tcoinKeeper: coinKeeper,\n\t\tstoreKey: storeKey,\n\t\tcdc: cdc,\n\t\tcodespace: codespace,\n\t}\n}", "func NewKeeper(storeKey sdk.StoreKey, paramStore params.Subspace, codec *codec.Codec, bankKeeper BankKeeper,\n\taccountKeeper auth.AccountKeeper, supplyKeeper supply.Keeper, cosmosDistKeeper cosmosDist.Keeper) Keeper {\n\n\t// ensure distribution module accounts are set\n\tif addr := supplyKeeper.GetModuleAddress(UserGrowthPoolName); addr == nil {\n\t\tpanic(fmt.Sprintf(\"%s module account has not been set\", UserGrowthPoolName))\n\t}\n\tif addr := supplyKeeper.GetModuleAddress(UserRewardPoolName); addr == nil {\n\t\tpanic(fmt.Sprintf(\"%s module account has not been set\", UserRewardPoolName))\n\t}\n\treturn Keeper{\n\t\tstoreKey,\n\t\tcodec,\n\t\tparamStore.WithKeyTable(ParamKeyTable()),\n\t\tbankKeeper,\n\t\taccountKeeper,\n\t\tsupplyKeeper,\n\t\tcosmosDistKeeper,\n\t}\n}", "func (k *Keeper) createKeeperJob(client cmd.HTTPClient, registryAddr, nodeAddr string) error {\n\tvar err error\n\tif k.cfg.OCR2Keepers {\n\t\terr = k.createOCR2KeeperJob(client, registryAddr, nodeAddr)\n\t} else {\n\t\terr = k.createLegacyKeeperJob(client, registryAddr, nodeAddr)\n\t}\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tlog.Println(\"Keeper job has been successfully created in the Chainlink node with address: \", nodeAddr)\n\n\treturn nil\n}", "func (k *Keeper) createKeeperJob(client cmd.HTTPClient, registryAddr, nodeAddr string) error {\n\trequest, err := json.Marshal(web.CreateJobRequest{\n\t\tTOML: testspecs.GenerateKeeperSpec(testspecs.KeeperSpecParams{\n\t\t\tName: fmt.Sprintf(\"keeper job - registry %s\", registryAddr),\n\t\t\tContractAddress: registryAddr,\n\t\t\tFromAddress: nodeAddr,\n\t\t\tEvmChainID: int(k.cfg.ChainID),\n\t\t\tMinIncomingConfirmations: 1,\n\t\t\tObservationSource: keeper.ExpectedObservationSource,\n\t\t}).Toml(),\n\t})\n\tif err != nil {\n\t\treturn fmt.Errorf(\"failed to marshal request: %s\", err)\n\t}\n\n\tresp, err := client.Post(\"/v2/jobs\", bytes.NewReader(request))\n\tif err != nil {\n\t\treturn fmt.Errorf(\"failed to create keeper job: %s\", err)\n\t}\n\tdefer resp.Body.Close()\n\n\tif resp.StatusCode >= 400 {\n\t\tbody, err := ioutil.ReadAll(resp.Body)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"failed to read error response body: %s\", err)\n\t\t}\n\n\t\treturn fmt.Errorf(\"unable to create keeper job: '%v' [%d]\", string(body), resp.StatusCode)\n\t}\n\tlog.Println(\"Keeper job has been successfully created in the Chainlink node with address: \", nodeAddr)\n\treturn nil\n}", "func (in *BookkeeperSpec) DeepCopy() *BookkeeperSpec {\n\tif in == nil {\n\t\treturn nil\n\t}\n\tout := new(BookkeeperSpec)\n\tin.DeepCopyInto(out)\n\treturn out\n}", "func NewKeeper(orderKeeper types.OrderKeeper, tokenKeeper types.TokenKeeper, dexKeeper types.DexKeeper, swapKeeper types.SwapKeeper,\n\tfarmKeeper types.FarmKeeper, mintKeeper types.MintKeeper, marketKeeper types.MarketKeeper, cdc *codec.Codec, logger log.Logger, cfg *config.Config) Keeper {\n\tk := Keeper{\n\t\tOrderKeeper: orderKeeper,\n\t\tTokenKeeper: tokenKeeper,\n\t\tmarketKeeper: marketKeeper,\n\t\tdexKeeper: dexKeeper,\n\t\tswapKeeper: swapKeeper,\n\t\tfarmKeeper: farmKeeper,\n\t\tmintKeeper: mintKeeper,\n\t\tcdc: cdc,\n\t\tLogger: logger.With(\"module\", \"backend\"),\n\t\tConfig: cfg,\n\t\twsChan: nil,\n\t}\n\n\tif k.Config.EnableBackend {\n\t\tk.Cache = cache.NewCache()\n\t\torm, err := orm.New(k.Config.LogSQL, &k.Config.OrmEngine, &k.Logger)\n\t\tif err != nil {\n\t\t\tpanic(fmt.Sprintf(\"backend new orm error:%s\", err.Error()))\n\t\t}\n\t\tk.Orm = orm\n\t\tk.stopChan = make(chan struct{})\n\n\t\tif k.Config.EnableMktCompute {\n\t\t\t// websocket channel\n\t\t\tk.wsChan = make(chan types.IWebsocket, types.WebsocketChanCapacity)\n\t\t\tk.ticker3sChan = make(chan types.IWebsocket, types.WebsocketChanCapacity)\n\t\t\tgo generateKline1M(k)\n\t\t\t// init ticker buffer\n\t\t\tts := time.Now().Unix()\n\n\t\t\tk.UpdateTickersBuffer(ts-types.SecondsInADay*14, ts, nil)\n\n\t\t\tgo k.mergeTicker3SecondEvents()\n\n\t\t\t// set observer keeper\n\t\t\tk.swapKeeper.SetObserverKeeper(k)\n\t\t\tk.farmKeeper.SetObserverKeeper(k)\n\t\t}\n\n\t}\n\tlogger.Debug(fmt.Sprintf(\"%+v\", k.Config))\n\treturn k\n}", "func CreateTestKeepers(t *testing.T, consensusNeeded float64, validatorAmounts []int64, extraMaccPerm string) (sdk.Context, keeper.Keeper, bankkeeper.Keeper, authkeeper.AccountKeeper, oraclekeeper.Keeper, simappparams.EncodingConfig, []sdk.ValAddress) {\n\n\tPKs := CreateTestPubKeys(500)\n\tkeyStaking := sdk.NewKVStoreKey(stakingtypes.StoreKey)\n\t// TODO: staking.TStoreKey removed in favor of?\n\ttkeyStaking := sdk.NewTransientStoreKey(\"transient_staking\")\n\tkeyAcc := sdk.NewKVStoreKey(authtypes.StoreKey)\n\tkeyParams := sdk.NewKVStoreKey(paramstypes.StoreKey)\n\ttkeyParams := sdk.NewTransientStoreKey(paramstypes.TStoreKey)\n\tkeyBank := sdk.NewKVStoreKey(banktypes.StoreKey)\n\tkeyOracle := sdk.NewKVStoreKey(oracleTypes.StoreKey)\n\tkeyEthBridge := sdk.NewKVStoreKey(types.StoreKey)\n\n\tdb := dbm.NewMemDB()\n\tms := store.NewCommitMultiStore(db)\n\tms.MountStoreWithDB(tkeyStaking, sdk.StoreTypeTransient, nil)\n\tms.MountStoreWithDB(keyStaking, sdk.StoreTypeIAVL, db)\n\tms.MountStoreWithDB(keyAcc, sdk.StoreTypeIAVL, db)\n\tms.MountStoreWithDB(keyParams, sdk.StoreTypeIAVL, db)\n\tms.MountStoreWithDB(tkeyParams, sdk.StoreTypeTransient, db)\n\tms.MountStoreWithDB(keyBank, sdk.StoreTypeIAVL, db)\n\tms.MountStoreWithDB(keyOracle, sdk.StoreTypeIAVL, db)\n\tms.MountStoreWithDB(keyEthBridge, sdk.StoreTypeIAVL, db)\n\terr := ms.LoadLatestVersion()\n\trequire.NoError(t, err)\n\n\tctx := sdk.NewContext(ms, tmproto.Header{ChainID: \"foochainid\"}, false, nil)\n\tctx = ctx.WithConsensusParams(\n\t\t&abci.ConsensusParams{\n\t\t\tValidator: &tmproto.ValidatorParams{\n\t\t\t\tPubKeyTypes: []string{tmtypes.ABCIPubKeyTypeEd25519},\n\t\t\t},\n\t\t},\n\t)\n\tctx = ctx.WithLogger(log.NewNopLogger())\n\tencCfg := MakeTestEncodingConfig()\n\n\tbridgeAccount := authtypes.NewEmptyModuleAccount(types.ModuleName, authtypes.Burner, authtypes.Minter)\n\n\tfeeCollectorAcc := authtypes.NewEmptyModuleAccount(authtypes.FeeCollectorName)\n\tnotBondedPool := authtypes.NewEmptyModuleAccount(stakingtypes.NotBondedPoolName, authtypes.Burner, authtypes.Staking)\n\tbondPool := authtypes.NewEmptyModuleAccount(stakingtypes.BondedPoolName, authtypes.Burner, authtypes.Staking)\n\n\tblacklistedAddrs := make(map[string]bool)\n\tblacklistedAddrs[feeCollectorAcc.GetAddress().String()] = true\n\tblacklistedAddrs[notBondedPool.GetAddress().String()] = true\n\tblacklistedAddrs[bondPool.GetAddress().String()] = true\n\n\tmaccPerms := map[string][]string{\n\t\tauthtypes.FeeCollectorName: nil,\n\t\tstakingtypes.NotBondedPoolName: {authtypes.Burner, authtypes.Staking},\n\t\tstakingtypes.BondedPoolName: {authtypes.Burner, authtypes.Staking},\n\t\ttypes.ModuleName: {authtypes.Burner, authtypes.Minter},\n\t}\n\n\tif extraMaccPerm != \"\" {\n\t\tmaccPerms[extraMaccPerm] = []string{authtypes.Burner, authtypes.Minter}\n\t}\n\n\tparamsKeeper := paramskeeper.NewKeeper(encCfg.Marshaler, encCfg.Amino, keyParams, tkeyParams)\n\n\t//accountKeeper gets maccParams in 0.40, module accounts moved from supplykeeper to authkeeper\n\taccountKeeper := authkeeper.NewAccountKeeper(\n\t\tencCfg.Marshaler, // amino codec\n\t\tkeyAcc, // target store\n\t\tparamsKeeper.Subspace(authtypes.ModuleName),\n\t\tauthtypes.ProtoBaseAccount, // prototype,\n\t\tmaccPerms,\n\t)\n\n\tbankKeeper := bankkeeper.NewBaseKeeper(\n\t\tencCfg.Marshaler,\n\t\tkeyBank,\n\t\taccountKeeper,\n\t\tparamsKeeper.Subspace(banktypes.ModuleName),\n\t\tblacklistedAddrs,\n\t)\n\n\tinitTokens := sdk.TokensFromConsensusPower(10000)\n\ttotalSupply := sdk.NewCoins(sdk.NewCoin(sdk.DefaultBondDenom, initTokens.MulRaw(int64(100))))\n\n\tbankKeeper.SetSupply(ctx, banktypes.NewSupply(totalSupply))\n\n\tstakingKeeper := stakingkeeper.NewKeeper(encCfg.Marshaler, keyStaking, accountKeeper, bankKeeper, paramsKeeper.Subspace(stakingtypes.ModuleName))\n\tstakingKeeper.SetParams(ctx, stakingtypes.DefaultParams())\n\toracleKeeper := oraclekeeper.NewKeeper(encCfg.Marshaler, keyOracle, stakingKeeper, consensusNeeded)\n\n\t// set module accounts\n\terr = bankKeeper.AddCoins(ctx, notBondedPool.GetAddress(), totalSupply)\n\trequire.NoError(t, err)\n\n\taccountKeeper.SetModuleAccount(ctx, bridgeAccount)\n\taccountKeeper.SetModuleAccount(ctx, feeCollectorAcc)\n\taccountKeeper.SetModuleAccount(ctx, bondPool)\n\taccountKeeper.SetModuleAccount(ctx, notBondedPool)\n\n\tethbridgeKeeper := keeper.NewKeeper(encCfg.Marshaler, bankKeeper, oracleKeeper, accountKeeper, keyEthBridge)\n\tCethReceiverAccount, _ := sdk.AccAddressFromBech32(TestCethReceiverAddress)\n\tethbridgeKeeper.SetCethReceiverAccount(ctx, CethReceiverAccount)\n\n\t// Setup validators\n\tvalAddrs := make([]sdk.ValAddress, len(validatorAmounts))\n\tfor i, amount := range validatorAmounts {\n\t\tvalPubKey := PKs[i]\n\t\tvalAddr := sdk.ValAddress(valPubKey.Address().Bytes())\n\t\tvalAddrs[i] = valAddr\n\t\tvalTokens := sdk.TokensFromConsensusPower(amount)\n\t\t// test how the validator is set from a purely unbonbed pool\n\t\tvalidator, err := stakingtypes.NewValidator(valAddr, valPubKey, stakingtypes.Description{})\n\t\trequire.NoError(t, err)\n\n\t\tvalidator, _ = validator.AddTokensFromDel(valTokens)\n\t\tstakingKeeper.SetValidator(ctx, validator)\n\t\tstakingKeeper.SetValidatorByPowerIndex(ctx, validator)\n\t\t_, err = stakingKeeper.ApplyAndReturnValidatorSetUpdates(ctx)\n\t\tif err != nil {\n\t\t\tpanic(\"Failed to apply validator set updates\")\n\t\t}\n\t}\n\n\toracleKeeper.SetOracleWhiteList(ctx, valAddrs)\n\n\treturn ctx, ethbridgeKeeper, bankKeeper, accountKeeper, oracleKeeper, encCfg, valAddrs\n}", "func CreateTestKeepers(t *testing.T, consensusNeeded float64, validatorAmounts []int64, extraMaccPerm string) (sdk.Context, Keeper, bank.Keeper, supply.Keeper, auth.AccountKeeper, []sdk.ValAddress) {\n\tPKs := CreateTestPubKeys(500)\n\tkeyStaking := sdk.NewKVStoreKey(stakingtypes.StoreKey)\n\ttkeyStaking := sdk.NewTransientStoreKey(stakingtypes.TStoreKey)\n\tkeyAcc := sdk.NewKVStoreKey(auth.StoreKey)\n\tkeyParams := sdk.NewKVStoreKey(params.StoreKey)\n\ttkeyParams := sdk.NewTransientStoreKey(params.TStoreKey)\n\tkeySupply := sdk.NewKVStoreKey(supply.StoreKey)\n\tkeyOracle := sdk.NewKVStoreKey(types.StoreKey)\n\n\tdb := dbm.NewMemDB()\n\tms := store.NewCommitMultiStore(db)\n\tms.MountStoreWithDB(tkeyStaking, sdk.StoreTypeTransient, nil)\n\tms.MountStoreWithDB(keyStaking, sdk.StoreTypeIAVL, db)\n\tms.MountStoreWithDB(keyAcc, sdk.StoreTypeIAVL, db)\n\tms.MountStoreWithDB(keyParams, sdk.StoreTypeIAVL, db)\n\tms.MountStoreWithDB(tkeyParams, sdk.StoreTypeTransient, db)\n\tms.MountStoreWithDB(keySupply, sdk.StoreTypeIAVL, db)\n\tms.MountStoreWithDB(keyOracle, sdk.StoreTypeIAVL, db)\n\terr := ms.LoadLatestVersion()\n\trequire.Nil(t, err)\n\n\tctx := sdk.NewContext(ms, abci.Header{ChainID: \"foochainid\"}, false, nil)\n\tctx = ctx.WithConsensusParams(\n\t\t&abci.ConsensusParams{\n\t\t\tValidator: &abci.ValidatorParams{\n\t\t\t\tPubKeyTypes: []string{tmtypes.ABCIPubKeyTypeEd25519},\n\t\t\t},\n\t\t},\n\t)\n\tctx = ctx.WithLogger(log.NewNopLogger())\n\tcdc := MakeTestCodec()\n\n\tfeeCollectorAcc := supply.NewEmptyModuleAccount(auth.FeeCollectorName)\n\tnotBondedPool := supply.NewEmptyModuleAccount(stakingtypes.NotBondedPoolName, supply.Burner, supply.Staking)\n\tbondPool := supply.NewEmptyModuleAccount(stakingtypes.BondedPoolName, supply.Burner, supply.Staking)\n\n\tblacklistedAddrs := make(map[string]bool)\n\tblacklistedAddrs[feeCollectorAcc.GetAddress().String()] = true\n\tblacklistedAddrs[notBondedPool.GetAddress().String()] = true\n\tblacklistedAddrs[bondPool.GetAddress().String()] = true\n\n\tparamsKeeper := params.NewKeeper(cdc, keyParams, tkeyParams, params.DefaultCodespace)\n\n\taccountKeeper := auth.NewAccountKeeper(\n\t\tcdc, // amino codec\n\t\tkeyAcc, // target store\n\t\tparamsKeeper.Subspace(auth.DefaultParamspace),\n\t\tauth.ProtoBaseAccount, // prototype\n\t)\n\n\tbankKeeper := bank.NewBaseKeeper(\n\t\taccountKeeper,\n\t\tparamsKeeper.Subspace(bank.DefaultParamspace),\n\t\tbank.DefaultCodespace,\n\t\tblacklistedAddrs,\n\t)\n\n\tmaccPerms := map[string][]string{\n\t\tauth.FeeCollectorName: nil,\n\t\tstakingtypes.NotBondedPoolName: {supply.Burner, supply.Staking},\n\t\tstakingtypes.BondedPoolName: {supply.Burner, supply.Staking},\n\t}\n\n\tif extraMaccPerm != \"\" {\n\t\tmaccPerms[extraMaccPerm] = []string{supply.Burner, supply.Minter}\n\t}\n\n\tsupplyKeeper := supply.NewKeeper(cdc, keySupply, accountKeeper, bankKeeper, maccPerms)\n\n\tinitTokens := sdk.TokensFromConsensusPower(10000)\n\ttotalSupply := sdk.NewCoins(sdk.NewCoin(sdk.DefaultBondDenom, initTokens.MulRaw(int64(100))))\n\n\tsupplyKeeper.SetSupply(ctx, supply.NewSupply(totalSupply))\n\n\tstakingKeeper := staking.NewKeeper(cdc, keyStaking, supplyKeeper, paramsKeeper.Subspace(staking.DefaultParamspace), stakingtypes.DefaultCodespace)\n\tstakingKeeper.SetParams(ctx, stakingtypes.DefaultParams())\n\toracleKeeper := NewKeeper(cdc, keyOracle, stakingKeeper, types.DefaultCodespace, consensusNeeded)\n\n\t// set module accounts\n\terr = notBondedPool.SetCoins(totalSupply)\n\trequire.NoError(t, err)\n\n\tsupplyKeeper.SetModuleAccount(ctx, feeCollectorAcc)\n\tsupplyKeeper.SetModuleAccount(ctx, bondPool)\n\tsupplyKeeper.SetModuleAccount(ctx, notBondedPool)\n\n\t// Setup validators\n\tvalAddrs := make([]sdk.ValAddress, len(validatorAmounts))\n\tfor i, amount := range validatorAmounts {\n\t\tvalPubKey := PKs[i]\n\t\tvalAddr := sdk.ValAddress(valPubKey.Address().Bytes())\n\t\tvalAddrs[i] = valAddr\n\t\tvalTokens := sdk.TokensFromConsensusPower(amount)\n\t\t// test how the validator is set from a purely unbonbed pool\n\t\tvalidator := stakingtypes.NewValidator(valAddr, valPubKey, stakingtypes.Description{})\n\t\tvalidator, _ = validator.AddTokensFromDel(valTokens)\n\t\tstakingKeeper.SetValidator(ctx, validator)\n\t\tstakingKeeper.SetValidatorByPowerIndex(ctx, validator)\n\t\tstakingKeeper.ApplyAndReturnValidatorSetUpdates(ctx)\n\t}\n\n\treturn ctx, oracleKeeper, bankKeeper, supplyKeeper, accountKeeper, valAddrs\n}", "func newKubeBuilder(appMan Manifest) Builder {\n\treturn &KubeBuilder{Manifest: appMan}\n}", "func InitKeeper(cdc *wire.Codec, paramsKeeper params.Keeper) Keeper {\n\tk := NewKeeper(cdc, paramsKeeper)\n\n\treturn k\n}", "func makePodSpec(t thanosv1beta1.Receiver) (*corev1.PodSpec, error) {\n\n\tif t.Spec.ReceivePrefix == \"\" {\n\t\tt.Spec.ReceivePrefix = receiverDir\n\t}\n\tif t.Spec.Retention == \"\" {\n\t\tt.Spec.Retention = defaultRetetion\n\t}\n\t// TODO set args to spec\n\tthanosArgs := []string{\n\t\t\"receive\",\n\t\tfmt.Sprintf(\"--tsdb.path=%s\", t.Spec.ReceivePrefix),\n\t\tfmt.Sprintf(\"--tsdb.retention=%s\", t.Spec.Retention),\n\t\tfmt.Sprintf(\"--labels=receive=\\\"%s\\\"\", t.Spec.ReceiveLables),\n\t\tfmt.Sprintf(\"--objstore.config=type: %s\\nconfig:\\n bucket: \\\"%s\\\"\", t.Spec.ObjectStorageType, t.Spec.BucketName),\n\t}\n\tif t.Spec.LogLevel != \"\" && t.Spec.LogLevel != \"info\" {\n\t\tthanosArgs = append(thanosArgs, fmt.Sprintf(\"--log.level=%s\", t.Spec.LogLevel))\n\t}\n\tenv := []corev1.EnvVar{\n\t\t{\n\t\t\tName: \"GOOGLE_APPLICATION_CREDENTIALS\",\n\t\t\tValue: secretsDir + t.Spec.SecretName + \".json\",\n\t\t},\n\t}\n\n\tports := []corev1.ContainerPort{\n\t\t{\n\t\t\tContainerPort: 10902,\n\t\t\tName: \"http\",\n\t\t},\n\t\t{\n\t\t\tContainerPort: 10901,\n\t\t\tName: \"grpc\",\n\t\t},\n\t}\n\n\tif strings.Contains(t.Name, \"receiver\") {\n\t\tports = append(ports, corev1.ContainerPort{\n\t\t\tContainerPort: 19291,\n\t\t\tName: \"receive\",\n\t\t})\n\t}\n\n\t// mount to pod\n\tvolumemounts := []corev1.VolumeMount{\n\t\t{\n\t\t\tName: \"thanos-persistent-storage\",\n\t\t\tMountPath: t.Spec.Retention,\n\t\t},\n\t\t{\n\t\t\tName: \"google-cloud-key\",\n\t\t\tMountPath: secretsDir,\n\t\t},\n\t}\n\n\tcontainers := []corev1.Container{\n\t\t{\n\t\t\tName: \"receiver\",\n\t\t\tImage: *t.Spec.Image,\n\t\t\tArgs: thanosArgs,\n\t\t\tEnv: env,\n\t\t\tPorts: ports,\n\t\t\tVolumeMounts: volumemounts,\n\t\t},\n\t}\n\n\t// Need create json from gcp iam\n\t// https://github.com/orangesys/blueprint/tree/master/prometheus-thanos\n\t// kubectl create secret generic ${SERVICE_ACCOUNT_NAME} --from-file=${SERVICE_ACCOUNT_NAME}.json=${SERVICE_ACCOUNT_NAME}.json\n\t// secret name is thanos-demo-gcs\n\t// TODO setting secret name with spec\n\tvolumes := []corev1.Volume{\n\t\t{\n\t\t\tName: \"google-cloud-key\",\n\t\t\tVolumeSource: corev1.VolumeSource{\n\t\t\t\tSecret: &corev1.SecretVolumeSource{\n\t\t\t\t\tSecretName: t.Spec.SecretName,\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n\n\treturn &corev1.PodSpec{\n\t\tTerminationGracePeriodSeconds: &gracePeriodTerm,\n\t\tContainers: containers,\n\t\tVolumes: volumes,\n\t}, nil\n}", "func NewSpecGenerator(arg string, rootfs bool) *SpecGenerator {\n\tcsc := ContainerStorageConfig{}\n\tif rootfs {\n\t\tcsc.Rootfs = arg\n\t\t// check if rootfs should use overlay\n\t\tlastColonIndex := strings.LastIndex(csc.Rootfs, \":\")\n\t\tif lastColonIndex != -1 {\n\t\t\tlastPart := csc.Rootfs[lastColonIndex+1:]\n\t\t\tif lastPart == \"O\" {\n\t\t\t\tcsc.RootfsOverlay = true\n\t\t\t\tcsc.Rootfs = csc.Rootfs[:lastColonIndex]\n\t\t\t} else if lastPart == \"idmap\" || strings.HasPrefix(lastPart, \"idmap=\") {\n\t\t\t\tcsc.RootfsMapping = &lastPart\n\t\t\t\tcsc.Rootfs = csc.Rootfs[:lastColonIndex]\n\t\t\t}\n\t\t}\n\t} else {\n\t\tcsc.Image = arg\n\t}\n\treturn &SpecGenerator{\n\t\tContainerStorageConfig: csc,\n\t}\n}", "func NewSpec(api *gmail.Service, db *db.DB) (*Spec, error) {\n\tlog.SetLevel(log.DebugLevel)\n\tlog.Info(\"starting new spec\")\n\n\tbytes, err := ioutil.ReadFile(\"./spec.yaml\")\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to read file: %v\", err)\n\t}\n\n\tspec := &Spec{\n\t\tapi: api,\n\t\tdb: db,\n\t}\n\n\terr = yaml.Unmarshal(bytes, spec)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to unmarshal: %v\", err)\n\t}\n\n\treturn spec, nil\n}", "func NewSpec(details *SpecDetails) *Spec {\n\treturn &Spec{\n\t\tDetails: details,\n\t\tServices: NewServiceList(),\n\t\tStatus: SpecWaiting,\n\t}\n}", "func NewAKSClusterSpec(properties map[string]string) *AKSClusterSpec {\n\tspec := &AKSClusterSpec{\n\t\tReclaimPolicy: DefaultReclaimPolicy,\n\t\tNodeCount: to.IntPtr(DefaultNodeCount),\n\t}\n\n\tval, ok := properties[\"resourceGroupName\"]\n\tif ok {\n\t\tspec.ResourceGroupName = val\n\t}\n\n\tval, ok = properties[\"location\"]\n\tif ok {\n\t\tspec.Location = val\n\t}\n\n\tval, ok = properties[\"version\"]\n\tif ok {\n\t\tspec.Version = val\n\t}\n\n\tval, ok = properties[\"nodeCount\"]\n\tif ok {\n\t\tif nodeCount, err := strconv.Atoi(val); err == nil {\n\t\t\tspec.NodeCount = to.IntPtr(nodeCount)\n\t\t}\n\t}\n\n\tval, ok = properties[\"nodeVMSize\"]\n\tif ok {\n\t\tspec.NodeVMSize = val\n\t}\n\n\tval, ok = properties[\"dnsNamePrefix\"]\n\tif ok {\n\t\tspec.DNSNamePrefix = val\n\t}\n\n\tval, ok = properties[\"disableRBAC\"]\n\tif ok {\n\t\tif disableRBAC, err := strconv.ParseBool(val); err == nil {\n\t\t\tspec.DisableRBAC = disableRBAC\n\t\t}\n\t}\n\n\treturn spec\n}", "func (in *KeevakindSpec) DeepCopy() *KeevakindSpec {\n\tif in == nil {\n\t\treturn nil\n\t}\n\tout := new(KeevakindSpec)\n\tin.DeepCopyInto(out)\n\treturn out\n}", "func genClusterSpec(job *api.Caffe2Job) ClusterSpec {\n\tjobKey, err := keyFunc(job)\n\tif err != nil {\n\t\tutilruntime.HandleError(fmt.Errorf(\"Couldn't get key for caffe2job object %#v: %v\", job, err))\n\t\treturn nil\n\t}\n\n\tclusterSpec := make(ClusterSpec)\n\n\trtype := \"worker\"\n\tspec := job.Spec.ReplicaSpecs\n\treplicaNames := make([]string, 0, *spec.Replicas)\n\n\tfor i := int32(0); i < *spec.Replicas; i++ {\n\t\thost := genGeneralName(jobKey, rtype, fmt.Sprintf(\"%d\", i)) + \":\" + strconv.Itoa(api.Caffe2Port)\n\t\treplicaNames = append(replicaNames, host)\n\t}\n\n\tclusterSpec[rtype] = replicaNames\n\n\treturn clusterSpec\n}", "func toK8SPodSpec(podSpec *pbpod.PodSpec) *corev1.Pod {\n\t// Create pod template spec and apply configurations to spec.\n\tlabels := make(map[string]string)\n\tfor _, label := range podSpec.GetLabels() {\n\t\tlabels[label.GetKey()] = label.GetValue()\n\t}\n\n\ttermGracePeriod := int64(podSpec.GetKillGracePeriodSeconds())\n\n\tpodTemp := corev1.PodTemplateSpec{\n\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\tLabels: labels,\n\t\t},\n\t\tSpec: corev1.PodSpec{\n\t\t\tContainers: toK8SContainerSpecs(podSpec.GetContainers()),\n\t\t\tInitContainers: toK8SContainerSpecs(podSpec.GetInitContainers()),\n\t\t\tRestartPolicy: \"Never\",\n\t\t\tTerminationGracePeriodSeconds: &termGracePeriod,\n\t\t},\n\t}\n\n\t// Bind node and create pod.\n\treturn &corev1.Pod{\n\t\tObjectMeta: podTemp.ObjectMeta,\n\t\tSpec: podTemp.Spec,\n\t}\n}", "func TestNewStoreEncryptionSpec(t *testing.T) {\n\tdefer leaktest.AfterTest(t)()\n\n\ttestCases := []struct {\n\t\tvalue string\n\t\texpectedErr string\n\t\texpected StoreEncryptionSpec\n\t}{\n\t\t// path\n\t\t{\",\", \"no path specified\", StoreEncryptionSpec{}},\n\t\t{\"\", \"no path specified\", StoreEncryptionSpec{}},\n\t\t{\"/mnt/hda1\", \"field not in the form <key>=<value>: /mnt/hda1\", StoreEncryptionSpec{}},\n\t\t{\"path=\", \"no value specified for path\", StoreEncryptionSpec{}},\n\t\t{\"path=~/data\", \"path cannot start with '~': ~/data\", StoreEncryptionSpec{}},\n\t\t{\"path=data,path=data2\", \"path field was used twice in encryption definition\", StoreEncryptionSpec{}},\n\n\t\t// The same logic applies to key and old-key, don't repeat everything.\n\t\t{\"path=data\", \"no key specified\", StoreEncryptionSpec{}},\n\t\t{\"path=data,key=new.key\", \"no old-key specified\", StoreEncryptionSpec{}},\n\n\t\t// Rotation period.\n\t\t{\"path=data,key=new.key,old-key=old.key,rotation-period\", \"field not in the form <key>=<value>: rotation-period\", StoreEncryptionSpec{}},\n\t\t{\"path=data,key=new.key,old-key=old.key,rotation-period=\", \"no value specified for rotation-period\", StoreEncryptionSpec{}},\n\t\t{\"path=data,key=new.key,old-key=old.key,rotation-period=1\", `could not parse rotation-duration value: 1: time: missing unit in duration \"1\"`, StoreEncryptionSpec{}},\n\t\t{\"path=data,key=new.key,old-key=old.key,rotation-period=1d\", `could not parse rotation-duration value: 1d: time: unknown unit \"d\" in duration \"1d\"`, StoreEncryptionSpec{}},\n\n\t\t// Good values.\n\t\t{\"path=/data,key=/new.key,old-key=/old.key\", \"\", StoreEncryptionSpec{Path: \"/data\", KeyPath: \"/new.key\", OldKeyPath: \"/old.key\", RotationPeriod: DefaultRotationPeriod}},\n\t\t{\"path=/data,key=/new.key,old-key=/old.key,rotation-period=1h\", \"\", StoreEncryptionSpec{Path: \"/data\", KeyPath: \"/new.key\", OldKeyPath: \"/old.key\", RotationPeriod: time.Hour}},\n\t\t{\"path=/data,key=plain,old-key=/old.key,rotation-period=1h\", \"\", StoreEncryptionSpec{Path: \"/data\", KeyPath: \"plain\", OldKeyPath: \"/old.key\", RotationPeriod: time.Hour}},\n\t\t{\"path=/data,key=/new.key,old-key=plain,rotation-period=1h\", \"\", StoreEncryptionSpec{Path: \"/data\", KeyPath: \"/new.key\", OldKeyPath: \"plain\", RotationPeriod: time.Hour}},\n\t}\n\n\tfor i, testCase := range testCases {\n\t\tstoreEncryptionSpec, err := NewStoreEncryptionSpec(testCase.value)\n\t\tif err != nil {\n\t\t\tif len(testCase.expectedErr) == 0 {\n\t\t\t\tt.Errorf(\"%d(%s): no expected error, got %s\", i, testCase.value, err)\n\t\t\t}\n\t\t\tif testCase.expectedErr != fmt.Sprint(err) {\n\t\t\t\tt.Errorf(\"%d(%s): expected error \\\"%s\\\" does not match actual \\\"%s\\\"\", i, testCase.value,\n\t\t\t\t\ttestCase.expectedErr, err)\n\t\t\t}\n\t\t\tcontinue\n\t\t}\n\t\tif len(testCase.expectedErr) > 0 {\n\t\t\tt.Errorf(\"%d(%s): expected error %s but there was none\", i, testCase.value, testCase.expectedErr)\n\t\t\tcontinue\n\t\t}\n\t\tif !reflect.DeepEqual(testCase.expected, storeEncryptionSpec) {\n\t\t\tt.Errorf(\"%d(%s): actual doesn't match expected\\nactual: %+v\\nexpected: %+v\", i,\n\t\t\t\ttestCase.value, storeEncryptionSpec, testCase.expected)\n\t\t}\n\n\t\t// Now test String() to make sure the result can be parsed.\n\t\tstoreEncryptionSpecString := storeEncryptionSpec.String()\n\t\tstoreEncryptionSpec2, err := NewStoreEncryptionSpec(storeEncryptionSpecString)\n\t\tif err != nil {\n\t\t\tt.Errorf(\"%d(%s): error parsing String() result: %s\", i, testCase.value, err)\n\t\t\tcontinue\n\t\t}\n\t\t// Compare strings to deal with floats not matching exactly.\n\t\tif !reflect.DeepEqual(storeEncryptionSpecString, storeEncryptionSpec2.String()) {\n\t\t\tt.Errorf(\"%d(%s): actual doesn't match expected\\nactual: %#+v\\nexpected: %#+v\", i, testCase.value,\n\t\t\t\tstoreEncryptionSpec, storeEncryptionSpec2)\n\t\t}\n\t}\n}", "func New() Generator {\n\tspec := rspec.Spec{\n\t\tVersion: rspec.Version,\n\t\tPlatform: rspec.Platform{\n\t\t\tOS: runtime.GOOS,\n\t\t\tArch: runtime.GOARCH,\n\t\t},\n\t\tRoot: rspec.Root{\n\t\t\tPath: \"\",\n\t\t\tReadonly: false,\n\t\t},\n\t\tProcess: rspec.Process{\n\t\t\tTerminal: false,\n\t\t\tUser: rspec.User{},\n\t\t\tArgs: []string{\n\t\t\t\t\"sh\",\n\t\t\t},\n\t\t\tEnv: []string{\n\t\t\t\t\"PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\",\n\t\t\t\t\"TERM=xterm\",\n\t\t\t},\n\t\t\tCwd: \"/\",\n\t\t\tCapabilities: []string{\n\t\t\t\t\"CAP_CHOWN\",\n\t\t\t\t\"CAP_DAC_OVERRIDE\",\n\t\t\t\t\"CAP_FSETID\",\n\t\t\t\t\"CAP_FOWNER\",\n\t\t\t\t\"CAP_MKNOD\",\n\t\t\t\t\"CAP_NET_RAW\",\n\t\t\t\t\"CAP_SETGID\",\n\t\t\t\t\"CAP_SETUID\",\n\t\t\t\t\"CAP_SETFCAP\",\n\t\t\t\t\"CAP_SETPCAP\",\n\t\t\t\t\"CAP_NET_BIND_SERVICE\",\n\t\t\t\t\"CAP_SYS_CHROOT\",\n\t\t\t\t\"CAP_KILL\",\n\t\t\t\t\"CAP_AUDIT_WRITE\",\n\t\t\t},\n\t\t\tRlimits: []rspec.Rlimit{\n\t\t\t\t{\n\t\t\t\t\tType: \"RLIMIT_NOFILE\",\n\t\t\t\t\tHard: uint64(1024),\n\t\t\t\t\tSoft: uint64(1024),\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\tHostname: \"mrsdalloway\",\n\t\tMounts: []rspec.Mount{\n\t\t\t{\n\t\t\t\tDestination: \"/proc\",\n\t\t\t\tType: \"proc\",\n\t\t\t\tSource: \"proc\",\n\t\t\t\tOptions: nil,\n\t\t\t},\n\t\t\t{\n\t\t\t\tDestination: \"/dev\",\n\t\t\t\tType: \"tmpfs\",\n\t\t\t\tSource: \"tmpfs\",\n\t\t\t\tOptions: []string{\"nosuid\", \"strictatime\", \"mode=755\", \"size=65536k\"},\n\t\t\t},\n\t\t\t{\n\t\t\t\tDestination: \"/dev/pts\",\n\t\t\t\tType: \"devpts\",\n\t\t\t\tSource: \"devpts\",\n\t\t\t\tOptions: []string{\"nosuid\", \"noexec\", \"newinstance\", \"ptmxmode=0666\", \"mode=0620\", \"gid=5\"},\n\t\t\t},\n\t\t\t{\n\t\t\t\tDestination: \"/dev/shm\",\n\t\t\t\tType: \"tmpfs\",\n\t\t\t\tSource: \"shm\",\n\t\t\t\tOptions: []string{\"nosuid\", \"noexec\", \"nodev\", \"mode=1777\", \"size=65536k\"},\n\t\t\t},\n\t\t\t{\n\t\t\t\tDestination: \"/dev/mqueue\",\n\t\t\t\tType: \"mqueue\",\n\t\t\t\tSource: \"mqueue\",\n\t\t\t\tOptions: []string{\"nosuid\", \"noexec\", \"nodev\"},\n\t\t\t},\n\t\t\t{\n\t\t\t\tDestination: \"/sys\",\n\t\t\t\tType: \"sysfs\",\n\t\t\t\tSource: \"sysfs\",\n\t\t\t\tOptions: []string{\"nosuid\", \"noexec\", \"nodev\", \"ro\"},\n\t\t\t},\n\t\t},\n\t\tLinux: &rspec.Linux{\n\t\t\tResources: &rspec.Resources{\n\t\t\t\tDevices: []rspec.DeviceCgroup{\n\t\t\t\t\t{\n\t\t\t\t\t\tAllow: false,\n\t\t\t\t\t\tAccess: strPtr(\"rwm\"),\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\tNamespaces: []rspec.Namespace{\n\t\t\t\t{\n\t\t\t\t\tType: \"pid\",\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tType: \"network\",\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tType: \"ipc\",\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tType: \"uts\",\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tType: \"mount\",\n\t\t\t\t},\n\t\t\t},\n\t\t\tDevices: []rspec.Device{},\n\t\t},\n\t}\n\treturn Generator{\n\t\tspec: &spec,\n\t}\n}", "func TestNewReconciler(t *testing.T) {\n\tnewReconcilerCases := map[string]struct {\n\t\tmanager *mockManager\n\t\texpectedReconciler *ReconcileCommand\n\t}{\n\t\t\"empty manager\": {\n\t\t\tmanager: &mockManager{},\n\t\t\texpectedReconciler: &ReconcileCommand{\n\t\t\t\tkubernetes: &k8s.Kubernetes{},\n\t\t\t},\n\t\t},\n\t}\n\tfor name, newReconcilerCase := range newReconcilerCases {\n\t\tt.Run(name, func(t *testing.T) {\n\t\t\tactualReconciler := newReconciler(newReconcilerCase.manager)\n\t\t\tassert.Equal(t, newReconcilerCase.expectedReconciler, actualReconciler)\n\t\t})\n\t}\n}", "func (f *MemKv) newPrefixWatcher(ctx context.Context, prefix string, fromVersion string) (*watcher, error) {\n\tif !strings.HasSuffix(prefix, \"/\") {\n\t\tprefix += \"/\"\n\t}\n\treturn f.watch(ctx, prefix, fromVersion, true)\n}", "func newReconciler(mgr manager.Manager) reconcile.Reconciler {\n\treturn &ReconcileJedyKind{client: mgr.GetClient(), scheme: mgr.GetScheme()}\n}", "func (rs *RegistrySynchronizer) newRegistryFromChain() (Registry, error) {\n\tfromAddress := rs.effectiveKeeperAddress\n\tcontractAddress := rs.job.KeeperSpec.ContractAddress\n\n\tregistryConfig, err := rs.registryWrapper.GetConfig(nil)\n\tif err != nil {\n\t\trs.jrm.TryRecordError(rs.job.ID, err.Error())\n\t\treturn Registry{}, errors.Wrap(err, \"failed to get contract config\")\n\t}\n\n\tkeeperIndex := int32(-1)\n\tkeeperMap := map[ethkey.EIP55Address]int32{}\n\tfor idx, address := range registryConfig.KeeperAddresses {\n\t\tkeeperMap[ethkey.EIP55AddressFromAddress(address)] = int32(idx)\n\t\tif address == fromAddress {\n\t\t\tkeeperIndex = int32(idx)\n\t\t}\n\t}\n\tif keeperIndex == -1 {\n\t\trs.logger.Warnf(\"unable to find %s in keeper list on registry %s\", fromAddress.Hex(), contractAddress.Hex())\n\t}\n\n\treturn Registry{\n\t\tBlockCountPerTurn: registryConfig.BlockCountPerTurn,\n\t\tCheckGas: registryConfig.CheckGas,\n\t\tContractAddress: contractAddress,\n\t\tFromAddress: rs.job.KeeperSpec.FromAddress,\n\t\tJobID: rs.job.ID,\n\t\tKeeperIndex: keeperIndex,\n\t\tNumKeepers: int32(len(registryConfig.KeeperAddresses)),\n\t\tKeeperIndexMap: keeperMap,\n\t}, nil\n}", "func newFakeReconciler(initObjects ...runtime.Object) *ReconcileMachineRemediation {\n\tfakeClient := fake.NewFakeClient(initObjects...)\n\tremediator := &FakeRemedatior{}\n\treturn &ReconcileMachineRemediation{\n\t\tclient: fakeClient,\n\t\tremediator: remediator,\n\t\tnamespace: consts.NamespaceOpenshiftMachineAPI,\n\t}\n}", "func newContainer(rspec *spec.Spec, lockDir string) (*Container, error) {\n\tif rspec == nil {\n\t\treturn nil, errors.Wrapf(ErrInvalidArg, \"must provide a valid runtime spec to create container\")\n\t}\n\n\tctr := new(Container)\n\tctr.config = new(ContainerConfig)\n\tctr.state = new(containerRuntimeInfo)\n\n\tctr.config.ID = stringid.GenerateNonCryptoID()\n\tctr.config.Name = namesgenerator.GetRandomName(0)\n\n\tctr.config.Spec = new(spec.Spec)\n\tdeepcopier.Copy(rspec).To(ctr.config.Spec)\n\tctr.config.CreatedTime = time.Now()\n\n\t// Path our lock file will reside at\n\tlockPath := filepath.Join(lockDir, ctr.config.ID)\n\t// Grab a lockfile at the given path\n\tlock, err := storage.GetLockfile(lockPath)\n\tif err != nil {\n\t\treturn nil, errors.Wrapf(err, \"error creating lockfile for new container\")\n\t}\n\tctr.lock = lock\n\n\treturn ctr, nil\n}", "func newKubeClient(kubeconfigPath string) (*versioned.Clientset, error) {\n\tvar err error\n\tvar kubeConf *rest.Config\n\n\tif kubeconfigPath == \"\" {\n\t\t// creates the in-cluster config\n\t\tkubeConf, err = k8s.GetConfig()\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"build default in cluster kube config failed: %w\", err)\n\t\t}\n\t} else {\n\t\tkubeConf, err = clientcmd.BuildConfigFromFlags(\"\", kubeconfigPath)\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"build kube client config from config file failed: %w\", err)\n\t\t}\n\t}\n\treturn versioned.NewForConfig(kubeConf)\n}", "func NewStakingKeeperWeightedMock(t ...MockStakingValidatorData) *StakingKeeperMock {\n\tr := &StakingKeeperMock{\n\t\tBondedValidators: make([]stakingtypes.Validator, len(t)),\n\t\tValidatorPower: make(map[string]int64, len(t)),\n\t}\n\n\tfor i, a := range t {\n\t\tpk, err := codectypes.NewAnyWithValue(ed25519.GenPrivKey().PubKey())\n\t\tif err != nil {\n\t\t\tpanic(err)\n\t\t}\n\t\tr.BondedValidators[i] = stakingtypes.Validator{\n\t\t\tConsensusPubkey: pk,\n\t\t\tOperatorAddress: a.Operator.String(),\n\t\t\tStatus: stakingtypes.Bonded,\n\t\t}\n\t\tr.ValidatorPower[a.Operator.String()] = a.Power\n\t}\n\treturn r\n}", "func CreateFromSpec(spec *Spec) *Signer {\n\tsigner := New()\n\n\tsigner.SetCredential(spec.AccessKeyID, spec.AccessKeySecret)\n\n\tif spec.Literal != nil {\n\t\tsigner.SetLiteral(spec.Literal)\n\t}\n\n\tif spec.HeaderHoisting != nil {\n\t\tsigner.SetHeaderHoisting(spec.HeaderHoisting)\n\t}\n\n\tsigner.IgnoreHeader(spec.IgnoredHeaders...)\n\tsigner.ExcludeBody(spec.ExcludeBody)\n\n\tif ttl, e := time.ParseDuration(spec.TTL); e == nil {\n\t\tsigner.SetTTL(ttl)\n\t}\n\n\tif len(spec.AccessKeys) > 0 {\n\t\tsigner.SetAccessKeyStore(idSecretMap(spec.AccessKeys))\n\t}\n\treturn signer\n}", "func MakeSpec(\n\tconn, unique string,\n\tneedsUpdate func(db.Specifier, db.Specifier) bool,\n\tnewDBSpec db.Specifier,\n\tnewDBFunc DBMaker,\n\tnewDBError error,\n\tupdateFunc Updater,\n\tupdateErr error,\n) db.Specifier {\n\treturn &Spec{\n\t\tConn: conn,\n\t\tUnique: unique,\n\t\tUpdateNeeded: needsUpdate,\n\t\tNewDBSpec: newDBSpec,\n\t\tNewDBFunc: newDBFunc,\n\t\tNewDBError: newDBError,\n\t\tUpdateFunc: updateFunc,\n\t\tUpdateErr: updateErr,\n\t}\n}", "func (f *MemKv) newWatcher(ctx context.Context, key string, fromVersion string) (*watcher, error) {\n\tif strings.HasSuffix(key, \"/\") {\n\t\treturn nil, fmt.Errorf(\"Watch called on a prefix\")\n\t}\n\treturn f.watch(ctx, key, fromVersion, false)\n}", "func newKlusterlets(c *OperatorV1Client) *klusterlets {\n\treturn &klusterlets{\n\t\tclient: c.RESTClient(),\n\t}\n}", "func newReconciler(mgr manager.Manager) reconcile.Reconciler {\n\treturn &ReconcileKedaController{client: mgr.GetClient(), scheme: mgr.GetScheme()}\n}", "func (k *Kit) withTransportSpec(spec *compiledTransportSpec) *Kit {\n\tnewK := *k\n\tnewK.transportSpec = spec\n\treturn &newK\n}", "func createNamespaceSpec(nsName string) *v1.Namespace {\n\tvar namespace = &v1.Namespace{\n\t\tTypeMeta: metav1.TypeMeta{\n\t\t\tKind: \"Namespace\",\n\t\t},\n\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\tGenerateName: nsName,\n\t\t},\n\t}\n\treturn namespace\n}", "func NewCoinKeeper(am sdk.AccountMapper) CoinKeeper {\n\treturn CoinKeeper{am: am}\n}", "func newReconciler(\n\tmgr manager.Manager,\n\tmutationSystem *mutation.System,\n\ttracker *readiness.Tracker,\n\tgetPod func(context.Context) (*corev1.Pod, error),\n\tkind string,\n\tnewMutationObj func() client.Object,\n\tmutatorFor func(client.Object) (types.Mutator, error),\n\tevents chan event.GenericEvent,\n) *Reconciler {\n\tr := &Reconciler{\n\t\tsystem: mutationSystem,\n\t\tClient: mgr.GetClient(),\n\t\ttracker: tracker,\n\t\tgetPod: getPod,\n\t\tscheme: mgr.GetScheme(),\n\t\treporter: ctrlmutators.NewStatsReporter(),\n\t\tcache: ctrlmutators.NewMutationCache(),\n\t\tgvk: mutationsv1.GroupVersion.WithKind(kind),\n\t\tnewMutationObj: newMutationObj,\n\t\tmutatorFor: mutatorFor,\n\t\tlog: logf.Log.WithName(\"controller\").WithValues(logging.Process, fmt.Sprintf(\"%s_controller\", strings.ToLower(kind))),\n\t\tevents: events,\n\t}\n\tif getPod == nil {\n\t\tr.getPod = r.defaultGetPod\n\t}\n\treturn r\n}", "func New(name, platformName, path, format string, parentUI *ui.UI, envConfig map[string]string) (*Kluster, error) {\n\tif len(format) == 0 {\n\t\tformat = DefaultFormat\n\t}\n\tif !validFormat(format) {\n\t\treturn nil, fmt.Errorf(\"invalid format %q for the kubernetes cluster config file\", format)\n\t}\n\tpath = filepath.Join(path, DefaultConfigFilename+\".\"+format)\n\n\tif _, err := os.Stat(path); os.IsExist(err) {\n\t\treturn nil, fmt.Errorf(\"the Kluster config file %q already exists\", path)\n\t}\n\n\tnewUI := parentUI.Copy()\n\n\tcluster := Kluster{\n\t\tVersion: Version,\n\t\tKind: \"cluster\",\n\t\tName: name,\n\t\tpath: path,\n\t\tui: newUI,\n\t}\n\n\t// // TODO: Improve this, all platforms are not needed\n\t// allPlatforms := provisioner.SupportedPlatforms(name, envConfig)\n\t// platform, ok := allPlatforms[platformName]\n\t// if !ok {\n\t// \treturn nil, fmt.Errorf(\"platform %q is not supported\", platformName)\n\t// }\n\n\tplatform, err := provisioner.New(name, platformName, envConfig, newUI, Version)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"platform %q is not supported. %s\", platformName, err)\n\t}\n\n\tlogPrefix := fmt.Sprintf(\"KubeKit [ %s@%s ]\", cluster.Name, platformName)\n\tcluster.ui.SetLogPrefix(logPrefix)\n\n\tcluster.Platforms = make(map[string]interface{}, 1)\n\tcluster.provisioner = make(map[string]provisioner.Provisioner, 1)\n\tcluster.State = make(map[string]*State, 1)\n\n\tcluster.Platforms[platformName] = platform.Config()\n\tcluster.provisioner[platformName] = platform\n\tcluster.State[platformName] = &State{\n\t\tStatus: AbsentStatus.String(),\n\t}\n\n\tcluster.Resources = resources.DefaultResourcesFor(platformName)\n\n\t// return if this is a platform with no configuration, such as EKS or AKS\n\tswitch platformName {\n\tcase \"eks\", \"aks\":\n\t\treturn &cluster, nil\n\t}\n\n\tcluster.Config, err = configurator.DefaultConfig(envConfig)\n\n\treturn &cluster, err\n}", "func newPeerAuthenticationWithSpec() *securityv1beta1.PeerAuthentication {\n\tpeerAuthentication := newPeerAuthentication()\n\tpeerAuthentication.Spec.PortLevelMtls = map[uint32]*securityv1beta1apis.PeerAuthentication_MutualTLS{\n\t\ttTargetPort: {\n\t\t\tMode: securityv1beta1apis.PeerAuthentication_MutualTLS_PERMISSIVE,\n\t\t},\n\t}\n\tpeerAuthentication.Spec.Selector = &istiov1beta1apis.WorkloadSelector{\n\t\tMatchLabels: map[string]string{\n\t\t\tapplicationLabelKey: tName,\n\t\t},\n\t}\n\treturn peerAuthentication\n}", "func MachinePoolSpec(ctx context.Context, inputGetter func() MachinePoolSpecInput) {\n\tinput := inputGetter()\n\tExpect(input.E2EConfig).ToNot(BeNil(), \"Invalid argument. input.E2EConfig can't be nil\")\n\tExpect(input.ConfigClusterFn).ToNot(BeNil(), \"Invalid argument. input.ConfigClusterFn can't be nil\")\n\tExpect(input.BootstrapClusterProxy).ToNot(BeNil(), \"Invalid argument. input.BootstrapClusterProxy can't be nil\")\n\tExpect(input.AWSSession).ToNot(BeNil(), \"Invalid argument. input.AWSSession can't be nil\")\n\tExpect(input.Namespace).NotTo(BeNil(), \"Invalid argument. input.Namespace can't be nil\")\n\tExpect(input.ClusterName).ShouldNot(BeEmpty(), \"Invalid argument. input.ClusterName can't be empty\")\n\tExpect(input.Flavor).ShouldNot(BeEmpty(), \"Invalid argument. input.Flavor can't be empty\")\n\n\tginkgo.By(fmt.Sprintf(\"getting cluster with name %s\", input.ClusterName))\n\tcluster := framework.GetClusterByName(ctx, framework.GetClusterByNameInput{\n\t\tGetter: input.BootstrapClusterProxy.GetClient(),\n\t\tNamespace: input.Namespace.Name,\n\t\tName: input.ClusterName,\n\t})\n\tExpect(cluster).NotTo(BeNil(), \"couldn't find CAPI cluster\")\n\n\tginkgo.By(fmt.Sprintf(\"creating an applying the %s template\", input.Flavor))\n\tconfigCluster := input.ConfigClusterFn(input.ClusterName, input.Namespace.Name)\n\tconfigCluster.Flavor = input.Flavor\n\tconfigCluster.WorkerMachineCount = pointer.Int64(1)\n\tworkloadClusterTemplate := shared.GetTemplate(ctx, configCluster)\n\tif input.UsesLaunchTemplate {\n\t\tuserDataTemplate := `#!/bin/bash\n/etc/eks/bootstrap.sh %s \\\n --container-runtime containerd\n`\n\t\teksClusterName := getEKSClusterName(input.Namespace.Name, input.ClusterName)\n\t\tuserData := fmt.Sprintf(userDataTemplate, eksClusterName)\n\t\tuserDataEncoded := base64.StdEncoding.EncodeToString([]byte(userData))\n\t\tworkloadClusterTemplate = []byte(strings.ReplaceAll(string(workloadClusterTemplate), \"USER_DATA\", userDataEncoded))\n\t}\n\tginkgo.By(string(workloadClusterTemplate))\n\tginkgo.By(fmt.Sprintf(\"Applying the %s cluster template yaml to the cluster\", configCluster.Flavor))\n\terr := input.BootstrapClusterProxy.Apply(ctx, workloadClusterTemplate)\n\tExpect(err).ShouldNot(HaveOccurred())\n\n\tginkgo.By(\"Waiting for the machine pool to be running\")\n\tmp := framework.DiscoveryAndWaitForMachinePools(ctx, framework.DiscoveryAndWaitForMachinePoolsInput{\n\t\tLister: input.BootstrapClusterProxy.GetClient(),\n\t\tGetter: input.BootstrapClusterProxy.GetClient(),\n\t\tCluster: cluster,\n\t}, input.E2EConfig.GetIntervals(\"\", \"wait-worker-nodes\")...)\n\tExpect(len(mp)).To(Equal(1))\n\n\tginkgo.By(\"Check the status of the node group\")\n\teksClusterName := getEKSClusterName(input.Namespace.Name, input.ClusterName)\n\tif input.ManagedMachinePool {\n\t\tvar nodeGroupName string\n\t\tif input.UsesLaunchTemplate {\n\t\t\tnodeGroupName = getEKSNodegroupWithLaunchTemplateName(input.Namespace.Name, input.ClusterName)\n\t\t} else {\n\t\t\tnodeGroupName = getEKSNodegroupName(input.Namespace.Name, input.ClusterName)\n\t\t}\n\t\tverifyManagedNodeGroup(eksClusterName, nodeGroupName, true, input.AWSSession)\n\t} else {\n\t\tasgName := getASGName(input.ClusterName)\n\t\tverifyASG(eksClusterName, asgName, true, input.AWSSession)\n\t}\n\n\tif input.IncludeScaling { // TODO (richardcase): should this be a separate spec?\n\t\tginkgo.By(\"Scaling the machine pool up\")\n\t\tframework.ScaleMachinePoolAndWait(ctx, framework.ScaleMachinePoolAndWaitInput{\n\t\t\tClusterProxy: input.BootstrapClusterProxy,\n\t\t\tCluster: cluster,\n\t\t\tReplicas: 2,\n\t\t\tMachinePools: mp,\n\t\t\tWaitForMachinePoolToScale: input.E2EConfig.GetIntervals(\"\", \"wait-worker-nodes\"),\n\t\t})\n\n\t\tginkgo.By(\"Scaling the machine pool down\")\n\t\tframework.ScaleMachinePoolAndWait(ctx, framework.ScaleMachinePoolAndWaitInput{\n\t\t\tClusterProxy: input.BootstrapClusterProxy,\n\t\t\tCluster: cluster,\n\t\t\tReplicas: 1,\n\t\t\tMachinePools: mp,\n\t\t\tWaitForMachinePoolToScale: input.E2EConfig.GetIntervals(\"\", \"wait-worker-nodes\"),\n\t\t})\n\t}\n\n\tif input.Cleanup {\n\t\tdeleteMachinePool(ctx, deleteMachinePoolInput{\n\t\t\tDeleter: input.BootstrapClusterProxy.GetClient(),\n\t\t\tMachinePool: mp[0],\n\t\t})\n\n\t\twaitForMachinePoolDeleted(ctx, waitForMachinePoolDeletedInput{\n\t\t\tGetter: input.BootstrapClusterProxy.GetClient(),\n\t\t\tMachinePool: mp[0],\n\t\t}, input.E2EConfig.GetIntervals(\"\", \"wait-delete-machine-pool\")...)\n\t}\n}", "func newK8sCluster(c config.Config) (*k8sCluster, error) {\n\tvar kubeconfig *string\n\tif home := homedir.HomeDir(); home != \"\" {\n\t\tkubeconfig = flag.String(\"kubeconfig\", filepath.Join(home, \".kube\", \"config\"), \"(optional) absolue path to the kubeconfig file\")\n\t} else {\n\t\tkubeconfig = flag.String(\"kubeconfig\", \"\", \"absolue path to the kubeconfig file\")\n\t}\n\tflag.Parse()\n\n\tconfig, err := clientcmd.BuildConfigFromFlags(\"\", *kubeconfig)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tclientset, err := kubernetes.NewForConfig(config)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &k8sCluster{\n\t\tconfig: c,\n\t\tmutex: sync.Mutex{},\n\t\tpods: make(map[string]string),\n\t\tclientset: clientset,\n\t}, nil\n}", "func createKubeconfig(clusterName, username, clusterControlPlaceAddress, caBasebase64, crtBase64, privateKeyBase64 string) (kubeconfigYAML string) {\n\tcertificate_tpl := `---\napiVersion: v1\nkind: Config\ncurrent-context: %s\nclusters:\n - name: %s\n cluster:\n server: %s\n certificate-authority-data: %s\ncontexts:\n - context:\n cluster: %s\n user: %s\n name: %s@%s\nusers:\n - name: %s\n user:\n client-certificate-data: %s\n client-key-data: %s`\n\n\treturn fmt.Sprintf(certificate_tpl,\n\t\tclusterName,\n\t\tclusterName,\n\t\tclusterControlPlaceAddress,\n\t\tcaBasebase64,\n\t\tclusterName,\n\t\tusername,\n\t\tusername,\n\t\tclusterName,\n\t\tusername,\n\t\tcrtBase64,\n\t\tprivateKeyBase64,\n\t)\n}", "func newReconciler(mgr manager.Manager) reconcile.Reconciler {\n\treturn &ReconcileHiveConfig{Client: mgr.GetClient(), scheme: mgr.GetScheme(), restConfig: mgr.GetConfig()}\n}", "func New(t *testing.T, cfg Config) *Environment {\n\te := &Environment{\n\t\thelmPath: \"../kubernetes_helm/helm\",\n\t\tsynkPath: \"src/go/cmd/synk/synk_/synk\",\n\t\tt: t,\n\t\tcfg: cfg,\n\t\tscheme: k8sruntime.NewScheme(),\n\t\tclusters: map[string]*cluster{},\n\t}\n\tif cfg.SchemeFunc != nil {\n\t\tcfg.SchemeFunc(e.scheme)\n\t}\n\tscheme.AddToScheme(e.scheme)\n\n\tvar g errgroup.Group\n\t// Setup cluster concurrently.\n\tfor _, cfg := range cfg.Clusters {\n\t\t// Make name unique to avoid collisions across parallel tests.\n\t\tuniqName := fmt.Sprintf(\"%s-%x\", cfg.Name, time.Now().UnixNano())\n\t\tt.Logf(\"Assigned unique name %q to cluster %q\", uniqName, cfg.Name)\n\n\t\tcluster := &cluster{\n\t\t\tgenName: uniqName,\n\t\t\tcfg: cfg,\n\t\t}\n\t\te.clusters[cfg.Name] = cluster\n\n\t\tg.Go(func() error {\n\t\t\tif err := setupCluster(e.synkPath, cluster); err != nil {\n\t\t\t\t// If cluster has already been created, delete it.\n\t\t\t\tif cluster.kind != nil && os.Getenv(\"NO_TEARDOWN\") == \"\" {\n\t\t\t\t\tcluster.kind.Delete(cfg.Name, \"\")\n\t\t\t\t\tif cluster.kubeConfigPath != \"\" {\n\t\t\t\t\t\tos.Remove(cluster.kubeConfigPath)\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\treturn errors.Wrapf(err, \"Create cluster %q\", cfg.Name)\n\t\t\t}\n\t\t\tlog.Printf(\"Created cluster %q\", cfg.Name)\n\t\t\treturn nil\n\t\t})\n\t}\n\tif err := g.Wait(); err != nil {\n\t\tt.Fatal(err)\n\t}\n\treturn e\n}", "func newKubeClient() (*kclient.Client, error) {\n\tvar (\n\t\tconfig *kclient.Config\n\t\terr error\n\t\tmasterURL string\n\t)\n\t// If the user specified --kube_master_url, expand env vars and verify it.\n\tif *argKubeMasterURL != \"\" {\n\t\tmasterURL, err = expandKubeMasterURL()\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\tif masterURL != \"\" && *argKubecfgFile == \"\" {\n\t\t// Only --kube_master_url was provided.\n\t\tconfig = &kclient.Config{Host: masterURL}\n\t} else {\n\t\t// We either have:\n\t\t// 1) --kube_master_url and --kubecfg_file\n\t\t// 2) just --kubecfg_file\n\t\t// 3) neither flag\n\t\t// In any case, the logic is the same. If (3), this will automatically\n\t\t// fall back on the service account token.\n\t\toverrides := &kclientcmd.ConfigOverrides{}\n\t\toverrides.ClusterInfo.Server = masterURL // might be \"\", but that is OK\n\t\trules := &kclientcmd.ClientConfigLoadingRules{ExplicitPath: *argKubecfgFile} // might be \"\", but that is OK\n\t\tif config, err = kclientcmd.NewNonInteractiveDeferredLoadingClientConfig(rules, overrides).ClientConfig(); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\tconfig.Version = k8sAPIVersion\n\tglog.Infof(\"Using %s for kubernetes master\", config.Host)\n\tglog.Infof(\"Using kubernetes API %s\", config.Version)\n\treturn kclient.New(config)\n}", "func NewFake(force bool) (m starlark.HasAttrs, closeFn func(), err error) {\n\t// Create a fake API store with some endpoints pre-populated\n\tcm := corev1.ConfigMap{\n\t\tTypeMeta: metav1.TypeMeta{\n\t\t\tAPIVersion: \"v1\",\n\t\t\tKind: \"ConfigMap\",\n\t\t},\n\t\tData: map[string]string{\n\t\t\t\"client-ca-file\": \"contents\",\n\t\t},\n\t}\n\tcmData, err := apiruntime.Encode(unstructured.UnstructuredJSONScheme, &cm)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\tfm := map[string][]byte{\n\t\t\"/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\": cmData,\n\t}\n\n\ts := httptest.NewTLSServer(&fakeKube{m: fm})\n\n\tu, err := url.Parse(s.URL)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\th := \"https://\" + u.Host\n\ttlsConfig := rest.TLSClientConfig{\n\t\tInsecure: true,\n\t}\n\trConf := &rest.Config{Host: h, TLSClientConfig: tlsConfig}\n\n\tt, err := rest.TransportFor(rConf)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\tk := New(\n\t\th,\n\t\tfakeDiscovery(),\n\t\tdynamic.NewForConfigOrDie(rConf),\n\t\t&http.Client{Transport: t},\n\t\tfalse, /* dryRun */\n\t\tforce,\n\t\tfalse, /* diff */\n\t\tnil, /* diffFilters */\n\t)\n\n\treturn newFakeModule(k.(*kubePackage)), s.Close, nil\n}", "func createObjectInMultipleFakeClusters(obj ...ctrlruntimeclient.Object) map[string]ctrlruntimeclient.Client {\n\treturn map[string]ctrlruntimeclient.Client{\n\t\t\"a\": fakectrlruntimeclient.NewClientBuilder().WithObjects(obj...).Build(),\n\t\t\"b\": fakectrlruntimeclient.NewClientBuilder().WithObjects(obj...).Build(),\n\t}\n}", "func newReconciler(mgr manager.Manager) reconcile.Reconciler {\n\treturn &ReconcileKubemanager{Client: mgr.GetClient(), Scheme: mgr.GetScheme()}\n}", "func createKubeConfigAKS(ctx context.Context, state map[string]*tfjson.StateOutput, kcPath string) error {\n\tkubeconfigYaml, ok := state[\"aks_kubeconfig\"].Value.(string)\n\tif !ok || kubeconfigYaml == \"\" {\n\t\treturn fmt.Errorf(\"failed to obtain kubeconfig from tf output\")\n\t}\n\treturn tftestenv.CreateKubeconfigAKS(ctx, kubeconfigYaml, kcPath)\n}", "func newEchoServerPodSpec(podName string) *api.Pod {\n\tport := 8080\n\tpod := &api.Pod{\n\t\tObjectMeta: api.ObjectMeta{\n\t\t\tName: podName,\n\t\t},\n\t\tSpec: api.PodSpec{\n\t\t\tContainers: []api.Container{\n\t\t\t\t{\n\t\t\t\t\tName: \"echoserver\",\n\t\t\t\t\tImage: \"gcr.io/google_containers/echoserver:1.4\",\n\t\t\t\t\tPorts: []api.ContainerPort{{ContainerPort: int32(port)}},\n\t\t\t\t},\n\t\t\t},\n\t\t\tRestartPolicy: api.RestartPolicyNever,\n\t\t},\n\t}\n\treturn pod\n}", "func setupGovKeeper(t *testing.T, expectations ...func(sdk.Context, mocks)) (\n\t*keeper.Keeper,\n\tmocks,\n\tmoduletestutil.TestEncodingConfig,\n\tsdk.Context,\n) {\n\tt.Helper()\n\tkey := storetypes.NewKVStoreKey(types.StoreKey)\n\tstoreService := runtime.NewKVStoreService(key)\n\ttestCtx := testutil.DefaultContextWithDB(t, key, storetypes.NewTransientStoreKey(\"transient_test\"))\n\tctx := testCtx.Ctx.WithBlockHeader(cmtproto.Header{Time: cmttime.Now()})\n\tencCfg := moduletestutil.MakeTestEncodingConfig()\n\tv1.RegisterInterfaces(encCfg.InterfaceRegistry)\n\tv1beta1.RegisterInterfaces(encCfg.InterfaceRegistry)\n\tbanktypes.RegisterInterfaces(encCfg.InterfaceRegistry)\n\n\t// Create MsgServiceRouter, but don't populate it before creating the gov\n\t// keeper.\n\tmsr := baseapp.NewMsgServiceRouter()\n\n\t// gomock initializations\n\tctrl := gomock.NewController(t)\n\tm := mocks{\n\t\tacctKeeper: govtestutil.NewMockAccountKeeper(ctrl),\n\t\tbankKeeper: govtestutil.NewMockBankKeeper(ctrl),\n\t\tstakingKeeper: govtestutil.NewMockStakingKeeper(ctrl),\n\t\tdistributionKeeper: govtestutil.NewMockDistributionKeeper(ctrl),\n\t}\n\tif len(expectations) == 0 {\n\t\tmockDefaultExpectations(ctx, m)\n\t} else {\n\t\tfor _, exp := range expectations {\n\t\t\texp(ctx, m)\n\t\t}\n\t}\n\n\t// Gov keeper initializations\n\n\tgovKeeper := keeper.NewKeeper(encCfg.Codec, storeService, m.acctKeeper, m.bankKeeper, m.stakingKeeper, m.distributionKeeper, msr, types.DefaultConfig(), govAcct.String())\n\trequire.NoError(t, govKeeper.ProposalID.Set(ctx, 1))\n\tgovRouter := v1beta1.NewRouter() // Also register legacy gov handlers to test them too.\n\tgovRouter.AddRoute(types.RouterKey, v1beta1.ProposalHandler)\n\tgovKeeper.SetLegacyRouter(govRouter)\n\terr := govKeeper.Params.Set(ctx, v1.DefaultParams())\n\trequire.NoError(t, err)\n\terr = govKeeper.Constitution.Set(ctx, \"constitution\")\n\trequire.NoError(t, err)\n\n\t// Register all handlers for the MegServiceRouter.\n\tmsr.SetInterfaceRegistry(encCfg.InterfaceRegistry)\n\tv1.RegisterMsgServer(msr, keeper.NewMsgServerImpl(govKeeper))\n\tbanktypes.RegisterMsgServer(msr, nil) // Nil is fine here as long as we never execute the proposal's Msgs.\n\n\treturn govKeeper, m, encCfg, ctx\n}", "func NewKeyMan(masterSecret []byte, benchmarking bool) *KeyMan {\n\tlistenIP := tpAddrToKeyServerAddr(config.TPAddr)\n\tkm := &KeyMan{\n\t\tkeyLength: config.KeyLength,\n\t\tkeyTTL: config.KeyTTL,\n\t\tms: \t\t\tmasterSecret,\n\t\tlistenIP: \tlistenIP,\n\t\tlistenPort: \tconfig.ServerPort,\n\t}\n\terr := km.RefreshL0()\n\tif err!=nil{\n\t\tlog.Println(logPrefix+\"ERROR: Did not refresh L0\")\n\t}\t\n\tegressL1Keys = make(map[string]KeyPld)\n\tingressL1Keys = make(map[string]KeyPld)\n\tif benchmarking==false{\n\t\tkm.serveL1()\n\t}\n\t\n\treturn km\n}", "func (ts *tester) createClient() (cli k8s_client.EKS, err error) {\n\tfmt.Print(ts.cfg.EKSConfig.Colorize(\"\\n\\n[yellow]*********************************\\n\"))\n\tfmt.Printf(ts.cfg.EKSConfig.Colorize(\"[light_green]createClient [default](%q)\\n\"), ts.cfg.EKSConfig.ConfigPath)\n\tts.cfg.EKSConfig.AuthenticationAPIVersion =\"client.authentication.k8s.io/v1alpha1\"\n\n\tif ts.cfg.EKSConfig.AWSIAMAuthenticatorPath != \"\" && ts.cfg.EKSConfig.AWSIAMAuthenticatorDownloadURL != \"\" {\n\t\ttpl := template.Must(template.New(\"tmplKUBECONFIG\").Parse(tmplKUBECONFIG))\n\t\tbuf := bytes.NewBuffer(nil)\n\t\tif err = tpl.Execute(buf, kubeconfig{\n\t\t\tClusterAPIServerEndpoint: ts.cfg.EKSConfig.Status.ClusterAPIServerEndpoint,\n\t\t\tClusterCA: ts.cfg.EKSConfig.Status.ClusterCA,\n\t\t\tAWSIAMAuthenticatorPath: ts.cfg.EKSConfig.AWSIAMAuthenticatorPath,\n\t\t\tClusterName: ts.cfg.EKSConfig.Name,\n\t\t\tAuthenticationAPIVersion: ts.cfg.EKSConfig.AuthenticationAPIVersion,\n\t\t}); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tts.cfg.Logger.Info(\"writing KUBECONFIG with aws-iam-authenticator\", zap.String(\"kubeconfig-path\", ts.cfg.EKSConfig.KubeConfigPath))\n\t\tif err = ioutil.WriteFile(ts.cfg.EKSConfig.KubeConfigPath, buf.Bytes(), 0777); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tif err = aws_s3.Upload(\n\t\t\tts.cfg.Logger,\n\t\t\tts.cfg.S3API,\n\t\t\tts.cfg.EKSConfig.S3.BucketName,\n\t\t\tpath.Join(ts.cfg.EKSConfig.Name, \"kubeconfig.yaml\"),\n\t\t\tts.cfg.EKSConfig.KubeConfigPath,\n\t\t); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tts.cfg.Logger.Info(\"wrote KUBECONFIG with aws-iam-authenticator\", zap.String(\"kubeconfig-path\", ts.cfg.EKSConfig.KubeConfigPath))\n\t} else {\n\t\targs := []string{\n\t\t\tts.cfg.EKSConfig.AWSCLIPath,\n\t\t\t\"eks\",\n\t\t\tfmt.Sprintf(\"--region=%s\", ts.cfg.EKSConfig.Region),\n\t\t\t\"update-kubeconfig\",\n\t\t\tfmt.Sprintf(\"--name=%s\", ts.cfg.EKSConfig.Name),\n\t\t\tfmt.Sprintf(\"--kubeconfig=%s\", ts.cfg.EKSConfig.KubeConfigPath),\n\t\t\t\"--verbose\",\n\t\t}\n\t\tif ts.cfg.EKSConfig.ResolverURL != \"\" {\n\t\t\targs = append(args, fmt.Sprintf(\"--endpoint=%s\", ts.cfg.EKSConfig.ResolverURL))\n\t\t}\n\t\tcmd := strings.Join(args, \" \")\n\t\tts.cfg.Logger.Info(\"writing KUBECONFIG with 'aws eks update-kubeconfig'\",\n\t\t\tzap.String(\"kubeconfig-path\", ts.cfg.EKSConfig.KubeConfigPath),\n\t\t\tzap.String(\"cmd\", cmd),\n\t\t)\n\t\tretryStart, waitDur := time.Now(), 3*time.Minute\n\t\tvar output []byte\n\t\tfor time.Since(retryStart) < waitDur {\n\t\t\tselect {\n\t\t\tcase <-ts.cfg.Stopc:\n\t\t\t\treturn nil, errors.New(\"update-kubeconfig aborted\")\n\t\t\tcase <-time.After(5 * time.Second):\n\t\t\t}\n\t\t\tctx, cancel := context.WithTimeout(context.Background(), 1*time.Minute)\n\t\t\toutput, err = exec.New().CommandContext(ctx, args[0], args[1:]...).CombinedOutput()\n\t\t\tcancel()\n\t\t\tout := string(output)\n\t\t\tfmt.Fprintf(ts.cfg.LogWriter, \"\\n'%s' output:\\n\\n%s\\n\\n\", cmd, out)\n\t\t\tif err != nil {\n\t\t\t\tts.cfg.Logger.Warn(\"'aws eks update-kubeconfig' failed\", zap.Error(err))\n\t\t\t\tif !strings.Contains(out, \"Cluster status not active\") || !strings.Contains(err.Error(), \"exit\") {\n\t\t\t\t\treturn nil, fmt.Errorf(\"'aws eks update-kubeconfig' failed (output %q, error %v)\", out, err)\n\t\t\t\t}\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tts.cfg.Logger.Info(\"'aws eks update-kubeconfig' success\", zap.String(\"kubeconfig-path\", ts.cfg.EKSConfig.KubeConfigPath))\n\t\t\tif err = aws_s3.Upload(\n\t\t\t\tts.cfg.Logger,\n\t\t\t\tts.cfg.S3API,\n\t\t\t\tts.cfg.EKSConfig.S3.BucketName,\n\t\t\t\tpath.Join(ts.cfg.EKSConfig.Name, \"kubeconfig.yaml\"),\n\t\t\t\tts.cfg.EKSConfig.KubeConfigPath,\n\t\t\t); err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\tbreak\n\t\t}\n\t\tif err != nil {\n\t\t\tts.cfg.Logger.Warn(\"failed 'aws eks update-kubeconfig'\", zap.Error(err))\n\t\t\treturn nil, err\n\t\t}\n\n\t\tts.cfg.Logger.Info(\"ran 'aws eks update-kubeconfig'\")\n\t\tfmt.Fprintf(ts.cfg.LogWriter, \"\\n\\n'%s' output:\\n\\n%s\\n\\n\", cmd, strings.TrimSpace(string(output)))\n\t}\n\n\tts.cfg.Logger.Info(\"creating k8s client\")\n\tkcfg := &k8s_client.EKSConfig{\n\t\tLogger: ts.cfg.Logger,\n\t\tRegion: ts.cfg.EKSConfig.Region,\n\t\tClusterName: ts.cfg.EKSConfig.Name,\n\t\tKubeConfigPath: ts.cfg.EKSConfig.KubeConfigPath,\n\t\tKubectlPath: ts.cfg.EKSConfig.KubectlPath,\n\t\tServerVersion: ts.cfg.EKSConfig.Version,\n\t\tEncryptionEnabled: ts.cfg.EKSConfig.Encryption.CMKARN != \"\",\n\t\tS3API: ts.cfg.S3API,\n\t\tS3BucketName: ts.cfg.EKSConfig.S3.BucketName,\n\t\tS3MetricsRawOutputDirKubeAPIServer: path.Join(ts.cfg.EKSConfig.Name, \"metrics-kube-apiserver\"),\n\t\tMetricsRawOutputDirKubeAPIServer: filepath.Join(filepath.Dir(ts.cfg.EKSConfig.ConfigPath), ts.cfg.EKSConfig.Name+\"-metrics-kube-apiserver\"),\n\t\tClients: ts.cfg.EKSConfig.Clients,\n\t\tClientQPS: ts.cfg.EKSConfig.ClientQPS,\n\t\tClientBurst: ts.cfg.EKSConfig.ClientBurst,\n\t\tClientTimeout: ts.cfg.EKSConfig.ClientTimeout,\n\t}\n\tif ts.cfg.EKSConfig.IsEnabledAddOnClusterVersionUpgrade() {\n\t\tkcfg.UpgradeServerVersion = ts.cfg.EKSConfig.AddOnClusterVersionUpgrade.Version\n\t}\n\tif ts.cfg.EKSConfig.Status != nil {\n\t\tkcfg.ClusterAPIServerEndpoint = ts.cfg.EKSConfig.Status.ClusterAPIServerEndpoint\n\t\tkcfg.ClusterCADecoded = ts.cfg.EKSConfig.Status.ClusterCADecoded\n\t}\n\tcli, err = k8s_client.NewEKS(kcfg)\n\tif err != nil {\n\t\tts.cfg.Logger.Warn(\"failed to create k8s client\", zap.Error(err))\n\t} else {\n\t\tts.cfg.Logger.Info(\"created k8s client\")\n\t}\n\treturn cli, err\n}", "func NewBaseKeeper(am auth.AccountKeeper) BaseKeeper {\n\treturn BaseKeeper{am: am}\n}", "func buildEKSCCCreateObject(cluster *mgmtv3.Cluster) (*unstructured.Unstructured, error) {\n\teksClusterConfig := eksv1.EKSClusterConfig{\n\t\tTypeMeta: v1.TypeMeta{\n\t\t\tKind: \"EKSClusterConfig\",\n\t\t\tAPIVersion: eksV1,\n\t\t},\n\t\tObjectMeta: v1.ObjectMeta{\n\t\t\tName: cluster.Name,\n\t\t\tOwnerReferences: []v1.OwnerReference{\n\t\t\t\t{\n\t\t\t\t\tKind: cluster.Kind,\n\t\t\t\t\tAPIVersion: rbac.RancherManagementAPIVersion,\n\t\t\t\t\tName: cluster.Name,\n\t\t\t\t\tUID: cluster.UID,\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\tSpec: *cluster.Spec.EKSConfig,\n\t}\n\n\t// convert EKS cluster config into unstructured object so it can be used with dynamic client\n\teksClusterConfigMap, err := runtime.DefaultUnstructuredConverter.ToUnstructured(&eksClusterConfig)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn &unstructured.Unstructured{\n\t\tObject: eksClusterConfigMap,\n\t}, nil\n}", "func makeZookeeperSts(z *bkcmdbv1.Bkcmdb) *appsv1.StatefulSet {\n\treplicas := int32(1)\n\tterminationPeriod := int64(1800)\n\tfsGroup := int64(1000)\n\trunAsUser := int64(1000)\n\n\treturn &appsv1.StatefulSet{\n\t\tTypeMeta: metav1.TypeMeta{\n\t\t\tKind: \"StatefulSet\",\n\t\t\tAPIVersion: \"apps/v1\",\n\t\t},\n\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\tName: z.GetName() + \"-zookeeper\",\n\t\t\tNamespace: z.Namespace,\n\t\t\tLabels: map[string]string{\n\t\t\t\t\"app\": \"zookeeper\",\n\t\t\t\t\"release\": z.GetName(),\n\t\t\t},\n\t\t},\n\t\tSpec: appsv1.StatefulSetSpec{\n\t\t\tSelector: &metav1.LabelSelector{\n\t\t\t\tMatchLabels: map[string]string{\n\t\t\t\t\t\"app\": \"zookeeper\",\n\t\t\t\t\t\"release\": z.GetName(),\n\t\t\t\t\t\"component\": \"server\",\n\t\t\t\t},\n\t\t\t},\n\t\t\tReplicas: &replicas,\n\t\t\tServiceName: z.GetName() + \"-zookeeper-headless\",\n\t\t\tUpdateStrategy: appsv1.StatefulSetUpdateStrategy{\n\t\t\t\tType: appsv1.OnDeleteStatefulSetStrategyType,\n\t\t\t},\n\t\t\tTemplate: v1.PodTemplateSpec{\n\t\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\t\tLabels: map[string]string{\n\t\t\t\t\t\t\"app\": \"zookeeper\",\n\t\t\t\t\t\t\"release\": z.GetName(),\n\t\t\t\t\t\t\"component\": \"server\",\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tSpec: v1.PodSpec{\n\t\t\t\t\tTerminationGracePeriodSeconds: &terminationPeriod,\n\t\t\t\t\tSecurityContext: &v1.PodSecurityContext{\n\t\t\t\t\t\tRunAsUser: &runAsUser,\n\t\t\t\t\t\tFSGroup: &fsGroup,\n\t\t\t\t\t},\n\t\t\t\t\tContainers: makeZookeeperContainers(z),\n\t\t\t\t\tVolumes: []v1.Volume{\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tName: \"data\",\n\t\t\t\t\t\t\tVolumeSource: v1.VolumeSource{\n\t\t\t\t\t\t\t\tEmptyDir: &v1.EmptyDirVolumeSource{},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n}", "func newReconciler(mgr manager.Manager) reconcile.Reconciler {\n\tr := &Reconciler{\n\t\tClient: mgr.GetClient(),\n\t\tscheme: mgr.GetScheme(),\n\t\tkubeclient: kubernetes.NewForConfigOrDie(mgr.GetConfig()),\n\t\trecorder: mgr.GetRecorder(controllerName),\n\t}\n\tr.provision = r._provision\n\tr.bind = r._bind\n\tr.delete = r._delete\n\treturn r\n}", "func newReconciler(mgr manager.Manager) reconcile.Reconciler {\n\treturn &ReconcileTektonInstallation{client: mgr.GetClient(), scheme: mgr.GetScheme()}\n}", "func makePodSpecPatch(\n\tcontainer *pipelinespec.PipelineDeploymentConfig_PipelineContainerSpec,\n\tcomponentSpec *pipelinespec.ComponentSpec,\n\texecutorInput *pipelinespec.ExecutorInput,\n\texecutionID int64,\n\tpipelineName string,\n\trunID string,\n) (string, error) {\n\texecutorInputJSON, err := protojson.Marshal(executorInput)\n\tif err != nil {\n\t\treturn \"\", fmt.Errorf(\"failed to make podSpecPatch: %w\", err)\n\t}\n\tcomponentJSON, err := protojson.Marshal(componentSpec)\n\tif err != nil {\n\t\treturn \"\", fmt.Errorf(\"failed to make podSpecPatch: %w\", err)\n\t}\n\n\tuserCmdArgs := make([]string, 0, len(container.Command)+len(container.Args))\n\tuserCmdArgs = append(userCmdArgs, container.Command...)\n\tuserCmdArgs = append(userCmdArgs, container.Args...)\n\tlauncherCmd := []string{\n\t\t// TODO(Bobgy): workaround argo emissary executor bug, after we upgrade to an argo version with the bug fix, we can remove the following line.\n\t\t// Reference: https://github.com/argoproj/argo-workflows/issues/7406\n\t\t\"/var/run/argo/argoexec\", \"emissary\", \"--\",\n\t\tcomponent.KFPLauncherPath,\n\t\t// TODO(Bobgy): no need to pass pipeline_name and run_id, these info can be fetched via pipeline context and pipeline run context which have been created by root DAG driver.\n\t\t\"--pipeline_name\", pipelineName,\n\t\t\"--run_id\", runID,\n\t\t\"--execution_id\", fmt.Sprintf(\"%v\", executionID),\n\t\t\"--executor_input\", string(executorInputJSON),\n\t\t\"--component_spec\", string(componentJSON),\n\t\t\"--pod_name\",\n\t\tfmt.Sprintf(\"$(%s)\", component.EnvPodName),\n\t\t\"--pod_uid\",\n\t\tfmt.Sprintf(\"$(%s)\", component.EnvPodUID),\n\t\t\"--mlmd_server_address\",\n\t\tfmt.Sprintf(\"$(%s)\", component.EnvMetadataHost),\n\t\t\"--mlmd_server_port\",\n\t\tfmt.Sprintf(\"$(%s)\", component.EnvMetadataPort),\n\t\t\"--\", // separater before user command and args\n\t}\n\tres := k8score.ResourceRequirements{\n\t\tLimits: map[k8score.ResourceName]k8sres.Quantity{},\n\t}\n\tmemoryLimit := container.GetResources().GetMemoryLimit()\n\tif memoryLimit != 0 {\n\t\tq, err := k8sres.ParseQuantity(fmt.Sprintf(\"%vG\", memoryLimit))\n\t\tif err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\t\tres.Limits[k8score.ResourceMemory] = q\n\t}\n\tcpuLimit := container.GetResources().GetCpuLimit()\n\tif cpuLimit != 0 {\n\t\tq, err := k8sres.ParseQuantity(fmt.Sprintf(\"%v\", cpuLimit))\n\t\tif err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\t\tres.Limits[k8score.ResourceCPU] = q\n\t}\n\taccelerator := container.GetResources().GetAccelerator()\n\tif accelerator != nil {\n\t\treturn \"\", fmt.Errorf(\"accelerator resources are not supported yet: https://github.com/kubeflow/pipelines/issues/7043\")\n\t}\n\tpodSpec := &k8score.PodSpec{\n\t\tContainers: []k8score.Container{{\n\t\t\tName: \"main\", // argo task user container is always called \"main\"\n\t\t\tCommand: launcherCmd,\n\t\t\tArgs: userCmdArgs,\n\t\t\tImage: container.Image,\n\t\t\tResources: res,\n\t\t}},\n\t}\n\tpodSpecPatchBytes, err := json.Marshal(podSpec)\n\tif err != nil {\n\t\treturn \"\", fmt.Errorf(\"JSON marshaling pod spec patch: %w\", err)\n\t}\n\treturn string(podSpecPatchBytes), nil\n}", "func NewPodSpec(serviceAccountName string, containers []core.Container, volumes []core.Volume, nodeSelector map[string]string) core.PodSpec {\n\treturn core.PodSpec{\n\t\tContainers: containers,\n\t\tServiceAccountName: serviceAccountName,\n\t\tVolumes: volumes,\n\t\tNodeSelector: nodeSelector,\n\t}\n}", "func createClusterSpecificSpec(app apptypes.AppType, b *troubleshootv1beta2.SupportBundle, clientset kubernetes.Interface) (*troubleshootv1beta2.SupportBundle, error) {\n\tsupportBundle, err := staticspecs.GetClusterSpecificSpec()\n\tif err != nil {\n\t\tlogger.Errorf(\"Failed to load cluster specific support bundle spec: %v\", err)\n\t\treturn nil, err\n\t}\n\n\tsupportBundle = addDiscoveredSpecs(supportBundle, app, clientset)\n\treturn supportBundle, nil\n}", "func newReconciler(mgr manager.Manager) reconcile.Reconciler {\n\tapiConfig := swagger.NewConfiguration()\n\t//TODO Make it configurable\n\tapiConfig.BasePath = \"http://localhost:5000\"\n\n\treturn &ReconcileNotebookJob{\n\t\tClient: mgr.GetClient(),\n\t\tscheme: mgr.GetScheme(),\n\t\trecorder: mgr.GetRecorder(\"notebookjob-controller\"),\n\t\tapiClient: swagger.NewAPIClient(apiConfig),\n\t}\n}", "func newReconciler(mgr manager.Manager) reconcile.Reconciler {\n\treturn &ReconcileConfigMap{client: mgr.GetClient(), scheme: mgr.GetScheme()}\n}", "func (c *DotQservConfigMapSpec) Create() (client.Object, error) {\n\tcr := c.qserv\n\ttmplData := generateTemplateData(cr)\n\n\treqLogger := log.WithValues(\"Request.Namespace\", cr.Namespace, \"Request.Name\", cr.Name)\n\n\tname := c.GetName()\n\tnamespace := cr.Namespace\n\n\tlabels := util.GetComponentLabels(constants.Czar, cr.Name)\n\troot := filepath.Join(\"/\", \"configmap\", \"dot-qserv\")\n\n\tcm := &v1.ConfigMap{\n\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\tName: name,\n\t\t\tNamespace: namespace,\n\t\t\tLabels: labels,\n\t\t},\n\t\tData: scanDir(root, reqLogger, &tmplData),\n\t}\n\n\treturn cm, nil\n}" ]
[ "0.81380135", "0.642263", "0.6284003", "0.61777276", "0.61743927", "0.6154568", "0.6145337", "0.61207503", "0.61089283", "0.6068348", "0.6050417", "0.60409606", "0.6033205", "0.5995373", "0.59891766", "0.5973247", "0.59710914", "0.5970534", "0.59357965", "0.5931972", "0.5926432", "0.5899297", "0.5893751", "0.584957", "0.58224", "0.5813931", "0.5806469", "0.5804138", "0.57760894", "0.5756695", "0.57307214", "0.57006663", "0.5694793", "0.5694793", "0.56438607", "0.5611398", "0.55778587", "0.54979473", "0.54452217", "0.5409382", "0.53377354", "0.52969354", "0.5240047", "0.5229853", "0.5220481", "0.5212761", "0.51852167", "0.51517504", "0.5078096", "0.5035236", "0.5003213", "0.5001144", "0.49421215", "0.49276298", "0.48723695", "0.48645502", "0.48614708", "0.48524958", "0.48436522", "0.4815928", "0.4815865", "0.47745854", "0.47739083", "0.47701985", "0.4755108", "0.4718484", "0.47183505", "0.47114038", "0.47084057", "0.46896768", "0.4679611", "0.4677361", "0.46745092", "0.46652576", "0.46537006", "0.46161926", "0.4600593", "0.4599197", "0.4598936", "0.459411", "0.45896012", "0.45839578", "0.4578501", "0.45724672", "0.4549098", "0.4548901", "0.45372444", "0.45317382", "0.4528681", "0.45244366", "0.4524431", "0.4523764", "0.451815", "0.45176065", "0.45174628", "0.45170966", "0.4516991", "0.45114422", "0.45066166", "0.45064974" ]
0.810713
1
NewWebhookSpec generates a new WebhookSpec from a job.WebhookSpec
func NewWebhookSpec(spec *job.WebhookSpec) *WebhookSpec { return &WebhookSpec{ CreatedAt: spec.CreatedAt, UpdatedAt: spec.UpdatedAt, } }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func NewWebhook(timeout time.Duration) filters.Spec {\n\treturn WebhookWithOptions(WebhookOptions{Timeout: timeout, Tracer: opentracing.NoopTracer{}})\n}", "func (in *GitHubWebhookSpec) DeepCopy() *GitHubWebhookSpec {\n\tif in == nil {\n\t\treturn nil\n\t}\n\tout := new(GitHubWebhookSpec)\n\tin.DeepCopyInto(out)\n\treturn out\n}", "func NewWebhook(ops ...ConfigOption) (*Webhook, error) {\n\tconf := toConfig(ops...)\n\ttemplater, err := templates.NewTemplater(conf.templateName, conf.sidecarConfig, conf.sidecarTemplate)\n\tif err != nil {\n\t\topenlogging.Error(fmt.Sprintf(\"new templater failed: error = %v, template = %s\", err.Error(), conf.templateName))\n\t\treturn nil, err\n\t}\n\n\treturn &Webhook{\n\t\tserver: &http.Server{\n\t\t\tAddr: fmt.Sprintf(\":%d\", conf.port),\n\t\t\tTLSConfig: &tls.Config{GetCertificate: conf.getCertificate},\n\t\t},\n\t\tconfig: conf,\n\t\ttemplater: templater,\n\t}, nil\n}", "func NewWebhook(client kubernetes.Interface, resources *WebhookResources, controllerNamespace string, noInitContainer bool) (*Webhook, error) {\n\tvar (\n\t\tscheme = runtime.NewScheme()\n\t\tcodecs = serializer.NewCodecFactory(scheme)\n\t)\n\n\treturn &Webhook{\n\t\tdeserializer: codecs.UniversalDeserializer(),\n\t\tcontrollerNamespace: controllerNamespace,\n\t\tresources: resources,\n\t\tnoInitContainer: noInitContainer,\n\t}, nil\n}", "func NewWebhook(ctx *pulumi.Context,\n\tname string, args *WebhookArgs, opts ...pulumi.ResourceOption) (*Webhook, error) {\n\tif args == nil {\n\t\treturn nil, errors.New(\"missing one or more required arguments\")\n\t}\n\n\tif args.Name == nil {\n\t\treturn nil, errors.New(\"invalid value for required argument 'Name'\")\n\t}\n\tif args.Url == nil {\n\t\treturn nil, errors.New(\"invalid value for required argument 'Url'\")\n\t}\n\topts = internal.PkgResourceDefaultOpts(opts)\n\tvar resource Webhook\n\terr := ctx.RegisterResource(\"datadog:index/webhook:Webhook\", name, args, &resource, opts...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &resource, nil\n}", "func NewWebhook() *prometheusruleWebhook {\n\tscheme := runtime.NewScheme()\n\treturn &prometheusruleWebhook{\n\t\ts: *scheme,\n\t}\n}", "func New(\n\tctx context.Context,\n\tcontrollers []interface{},\n) (webhook *Webhook, err error) {\n\n\t// ServeMux.Handle panics on duplicate paths\n\tdefer func() {\n\t\tif r := recover(); r != nil {\n\t\t\terr = fmt.Errorf(\"error creating webhook %v\", r)\n\t\t}\n\t}()\n\n\topts := GetOptions(ctx)\n\tif opts == nil {\n\t\treturn nil, errors.New(\"context must have Options specified\")\n\t}\n\tlogger := logging.FromContext(ctx)\n\n\tif opts.StatsReporter == nil {\n\t\treporter, err := NewStatsReporter()\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\topts.StatsReporter = reporter\n\t}\n\n\tdefaultTLSMinVersion := uint16(tls.VersionTLS13)\n\tif opts.TLSMinVersion == 0 {\n\t\topts.TLSMinVersion = TLSMinVersionFromEnv(defaultTLSMinVersion)\n\t} else if opts.TLSMinVersion != tls.VersionTLS12 && opts.TLSMinVersion != tls.VersionTLS13 {\n\t\treturn nil, fmt.Errorf(\"unsupported TLS version: %d\", opts.TLSMinVersion)\n\t}\n\n\tsyncCtx, cancel := context.WithCancel(context.Background())\n\n\twebhook = &Webhook{\n\t\tOptions: *opts,\n\t\tLogger: logger,\n\t\tsynced: cancel,\n\t}\n\n\tif opts.SecretName != \"\" {\n\t\t// Injection is too aggressive for this case because by simply linking this\n\t\t// library we force consumers to have secret access. If we require that one\n\t\t// of the admission controllers' informers *also* require the secret\n\t\t// informer, then we can fetch the shared informer factory here and produce\n\t\t// a new secret informer from it.\n\t\tsecretInformer := kubeinformerfactory.Get(ctx).Core().V1().Secrets()\n\n\t\twebhook.tlsConfig = &tls.Config{\n\t\t\tMinVersion: opts.TLSMinVersion,\n\n\t\t\t// If we return (nil, error) the client sees - 'tls: internal error\"\n\t\t\t// If we return (nil, nil) the client sees - 'tls: no certificates configured'\n\t\t\t//\n\t\t\t// We'll return (nil, nil) when we don't find a certificate\n\t\t\tGetCertificate: func(*tls.ClientHelloInfo) (*tls.Certificate, error) {\n\t\t\t\tsecret, err := secretInformer.Lister().Secrets(system.Namespace()).Get(opts.SecretName)\n\t\t\t\tif err != nil {\n\t\t\t\t\tlogger.Errorw(\"failed to fetch secret\", zap.Error(err))\n\t\t\t\t\treturn nil, nil\n\t\t\t\t}\n\t\t\t\twebOpts := GetOptions(ctx)\n\t\t\t\tsKey, sCert := getSecretDataKeyNamesOrDefault(webOpts.ServerPrivateKeyName, webOpts.ServerCertificateName)\n\t\t\t\tserverKey, ok := secret.Data[sKey]\n\t\t\t\tif !ok {\n\t\t\t\t\tlogger.Warn(\"server key missing\")\n\t\t\t\t\treturn nil, nil\n\t\t\t\t}\n\t\t\t\tserverCert, ok := secret.Data[sCert]\n\t\t\t\tif !ok {\n\t\t\t\t\tlogger.Warn(\"server cert missing\")\n\t\t\t\t\treturn nil, nil\n\t\t\t\t}\n\t\t\t\tcert, err := tls.X509KeyPair(serverCert, serverKey)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn nil, err\n\t\t\t\t}\n\t\t\t\treturn &cert, nil\n\t\t\t},\n\t\t}\n\t}\n\n\twebhook.mux.HandleFunc(\"/\", func(w http.ResponseWriter, r *http.Request) {\n\t\thttp.Error(w, fmt.Sprint(\"no controller registered for: \", html.EscapeString(r.URL.Path)), http.StatusBadRequest)\n\t})\n\n\tfor _, controller := range controllers {\n\t\tswitch c := controller.(type) {\n\t\tcase AdmissionController:\n\t\t\thandler := admissionHandler(logger, opts.StatsReporter, c, syncCtx.Done())\n\t\t\twebhook.mux.Handle(c.Path(), handler)\n\n\t\tcase ConversionController:\n\t\t\thandler := conversionHandler(logger, opts.StatsReporter, c)\n\t\t\twebhook.mux.Handle(c.Path(), handler)\n\n\t\tdefault:\n\t\t\treturn nil, fmt.Errorf(\"unknown webhook controller type: %T\", controller)\n\t\t}\n\t}\n\n\treturn\n}", "func (d hook) toSpec() *specs.Hook {\n\ts := specs.Hook{\n\t\tHookName: d.Lifecycle,\n\t\tPath: d.Path,\n\t\tArgs: d.Args,\n\t}\n\n\treturn &s\n}", "func MakeHookRequest(t *testing.T, fixture, eventType string, changes ...fixtureFunc) *http.Request {\n\tbody := ReadJSONFixture(t, fixture)\n\tfor _, c := range changes {\n\t\tc(body)\n\t}\n\n\tserialisedBody := serialiseToJSON(t, body)\n\tmac := hmac.New(sha1.New, []byte(secret))\n\t_, err := mac.Write(serialisedBody.Bytes())\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tsig := hex.EncodeToString(mac.Sum(nil))\n\treq := httptest.NewRequest(\"POST\", \"/\", serialisedBody)\n\treq.Header.Add(\"X-GitHub-Delivery\", \"72d3162e-cc78-11e3-81ab-4c9367dc0958\")\n\treq.Header.Add(\"Content-Type\", \"application/json\")\n\treq.Header.Add(\"X-GitHub-Event\", eventType)\n\treq.Header.Add(\"X-Hub-Signature\", fmt.Sprintf(\"sha1=%s\", sig))\n\treturn req\n}", "func newValidatingIsReadyWebhookFixture(f *framework.Framework, certCtx *certContext, servicePort int32, namespace string) admissionregistrationv1.ValidatingWebhook {\n\tsideEffectsNone := admissionregistrationv1.SideEffectClassNone\n\tfailOpen := admissionregistrationv1.Ignore\n\treturn admissionregistrationv1.ValidatingWebhook{\n\t\tName: \"validating-is-webhook-configuration-ready.k8s.io\",\n\t\tRules: []admissionregistrationv1.RuleWithOperations{{\n\t\t\tOperations: []admissionregistrationv1.OperationType{admissionregistrationv1.Create},\n\t\t\tRule: admissionregistrationv1.Rule{\n\t\t\t\tAPIGroups: []string{\"\"},\n\t\t\t\tAPIVersions: []string{\"v1\"},\n\t\t\t\tResources: []string{\"configmaps\"},\n\t\t\t},\n\t\t}},\n\t\tClientConfig: admissionregistrationv1.WebhookClientConfig{\n\t\t\tService: &admissionregistrationv1.ServiceReference{\n\t\t\t\tNamespace: namespace,\n\t\t\t\tName: serviceName,\n\t\t\t\tPath: strPtr(\"/always-deny\"),\n\t\t\t\tPort: pointer.Int32(servicePort),\n\t\t\t},\n\t\t\tCABundle: certCtx.signingCert,\n\t\t},\n\t\t// network failures while the service network routing is being set up should be ignored by the marker\n\t\tFailurePolicy: &failOpen,\n\t\tSideEffects: &sideEffectsNone,\n\t\tAdmissionReviewVersions: []string{\"v1\", \"v1beta1\"},\n\t\t// Scope the webhook to just the markers namespace\n\t\tNamespaceSelector: &metav1.LabelSelector{\n\t\t\tMatchLabels: map[string]string{uniqueName + \"-markers\": \"true\"},\n\t\t},\n\t\t// appease createValidatingWebhookConfiguration isolation requirements\n\t\tObjectSelector: &metav1.LabelSelector{\n\t\t\tMatchLabels: map[string]string{uniqueName: \"true\"},\n\t\t},\n\t}\n}", "func (o *CreateGitWebhookUsingPOSTParams) SetGitWebhookSpec(gitWebhookSpec models.GitWebhookSpec) {\n\to.GitWebhookSpec = gitWebhookSpec\n}", "func NewWebhook(url string, filterFnString string, timeout uint64) (*Webhook, error) {\n\n\tvar err error\n\n\tif url == \"\" {\n\t\terr = errors.New(\"url parameter must be defined for webhook events.\")\n\t\treturn nil, err\n\t}\n\n\twh := &Webhook{\n\t\turl: url,\n\t}\n\tif filterFnString != \"\" {\n\t\twh.filter = NewJSEventFunction(filterFnString)\n\t}\n\n\tif timeout != 0 {\n\t\twh.timeout = time.Duration(timeout) * time.Second\n\t} else {\n\t\twh.timeout = time.Duration(kDefaultWebhookTimeout) * time.Second\n\t}\n\n\treturn wh, err\n}", "func New(mgr manager.Manager, args Args) (*Webhook, error) {\n\tlogger := log.Log.WithName(args.Name).WithValues(\"provider\", args.Provider)\n\n\t// Create handler\n\tbuilder := NewBuilder(mgr, logger)\n\n\tfor val, objs := range args.Validators {\n\t\tbuilder.WithValidator(val, objs...)\n\t}\n\n\tfor mut, objs := range args.Mutators {\n\t\tbuilder.WithMutator(mut, objs...)\n\t}\n\n\tbuilder.WithPredicates(args.Predicates...)\n\n\thandler, err := builder.Build()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// Create webhook\n\tlogger.Info(\"Creating webhook\")\n\n\treturn &Webhook{\n\t\tPath: args.Path,\n\t\tWebhook: &admission.Webhook{Handler: handler},\n\t}, nil\n}", "func NewGenericWebhook(kubeConfigFile string, groupVersions []unversioned.GroupVersion) (*GenericWebhook, error) {\n\tfor _, groupVersion := range groupVersions {\n\t\tif !registered.IsEnabledVersion(groupVersion) {\n\t\t\treturn nil, fmt.Errorf(\"webhook plugin requires enabling extension resource: %s\", groupVersion)\n\t\t}\n\t}\n\n\tloadingRules := clientcmd.NewDefaultClientConfigLoadingRules()\n\tloadingRules.ExplicitPath = kubeConfigFile\n\tloader := clientcmd.NewNonInteractiveDeferredLoadingClientConfig(loadingRules, &clientcmd.ConfigOverrides{})\n\n\tclientConfig, err := loader.ClientConfig()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tcodec := api.Codecs.LegacyCodec(groupVersions...)\n\tclientConfig.ContentConfig.NegotiatedSerializer = runtimeserializer.NegotiatedSerializerWrapper(\n\t\truntime.SerializerInfo{Serializer: codec},\n\t\truntime.StreamSerializerInfo{},\n\t)\n\n\trestClient, err := restclient.UnversionedRESTClientFor(clientConfig)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// TODO(ericchiang): Can we ensure remote service is reachable?\n\n\treturn &GenericWebhook{restClient}, nil\n}", "func SetupWebhooks(mgr ctrl.Manager) error {\n\t/*\n\t\tTotally undocumented and hard-to-find feature is that the builder automatically registers the URL path for the webhook.\n\t\tWhat's more, not even the tests in upstream controller-runtime reveal what this path is _actually_ going to look like.\n\t\tSo here's how the path is built (dots replaced with dash, lower-cased, single-form):\n\t\t /validate-<group>-<version>-<kind>\n\t\t /mutate-<group>-<version>-<kind>\n\t\tExample:\n\t\t /validate-clustercode-github-io-v1alpha1-blueprint\n\t\tThis path has to be given in the `//+kubebuilder:webhook:...` magic comment, see example:\n\t\t +kubebuilder:webhook:verbs=create;update;delete,path=/validate-clustercode-github-io-v1alpha1-blueprint,mutating=false,failurePolicy=fail,groups=clustercode.github.io,resources=blueprints,versions=v1alpha1,name=blueprints.clustercode.github.io,sideEffects=None,admissionReviewVersions=v1\n\t\tPay special attention to the plural forms and correct versions!\n\t*/\n\tfor _, setup := range []func(ctrl.Manager) error{\n\t\tblueprintwebhook.SetupWebhook,\n\t} {\n\t\tif err := setup(mgr); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}", "func NewSpec(api *gmail.Service, db *db.DB) (*Spec, error) {\n\tlog.SetLevel(log.DebugLevel)\n\tlog.Info(\"starting new spec\")\n\n\tbytes, err := ioutil.ReadFile(\"./spec.yaml\")\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to read file: %v\", err)\n\t}\n\n\tspec := &Spec{\n\t\tapi: api,\n\t\tdb: db,\n\t}\n\n\terr = yaml.Unmarshal(bytes, spec)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to unmarshal: %v\", err)\n\t}\n\n\treturn spec, nil\n}", "func New(secret string, config WebhookConfig) *Webhook {\n\tif config.L == nil {\n\t\tconfig.L = log.New(ioutil.Discard, \"\", 0)\n\t}\n\tif config.PushCallback == nil {\n\t\tconfig.PushCallback = emptyCallback\n\t}\n\tif config.ReleaseCallback == nil {\n\t\tconfig.ReleaseCallback = emptyCallback\n\t}\n\treturn &Webhook{\n\t\tl: config.L,\n\t\tsecret: secret,\n\t\tpushCallback: config.PushCallback,\n\t\treleaseCallback: config.ReleaseCallback,\n\t}\n}", "func NewWebhook(url string, file interface{}) *SetWebhookParameters {\n\treturn &SetWebhookParameters{\n\t\tURL: url,\n\t\tCertificate: file,\n\t}\n}", "func New(config *Config) *Webhook {\n\treturn &Webhook{\n\t\tprovider: webhooks.Gogs,\n\t\tsecret: config.Secret,\n\t\teventFuncs: map[Event]webhooks.ProcessPayloadFunc{},\n\t}\n}", "func NewWebhook() *NamespaceWebhook {\n\tscheme := runtime.NewScheme()\n\terr := admissionv1.AddToScheme(scheme)\n\tif err != nil {\n\t\tlog.Error(err, \"Fail adding admissionsv1 scheme to NamespaceWebhook\")\n\t\tos.Exit(1)\n\t}\n\n\terr = corev1.AddToScheme(scheme)\n\tif err != nil {\n\t\tlog.Error(err, \"Fail adding corev1 scheme to NamespaceWebhook\")\n\t\tos.Exit(1)\n\t}\n\n\treturn &NamespaceWebhook{\n\t\ts: *scheme,\n\t}\n}", "func New(req *http.Request) (hook *Hook, err error) {\n\thook = new(Hook)\n\tif !strings.EqualFold(req.Method, \"POST\") {\n\t\treturn nil, errors.New(\"unknown method\")\n\t}\n\n\tif hook.Signature = req.Header.Get(\"X-ChatWorkWebhookSignature\"); len(hook.Signature) == 0 {\n\t\treturn nil, errors.New(\"no signature\")\n\t}\n\n\thook.RawPayload, err = ioutil.ReadAll(req.Body)\n\treturn\n}", "func TestCreateAndUpdateWebhook(t *testing.T) {\n\tc, _ := NewClient(testClientID, testSecret, APIBaseSandBox)\n\tc.GetAccessToken()\n\n\tcreationPayload := &CreateWebhookRequest{\n\t\tURL: \"https://example.com/paypal_webhooks\",\n\t\tEventTypes: []WebhookEventType{\n\t\t\tWebhookEventType{\n\t\t\t\tName: \"PAYMENT.AUTHORIZATION.CREATED\",\n\t\t\t},\n\t\t},\n\t}\n\n\tcreatedWebhook, err := c.CreateWebhook(creationPayload)\n\tif err != nil {\n\t\tt.Errorf(\"Webhook couldn't be created, error %v\", err)\n\t}\n\n\tupdatePayload := []WebhookField{\n\t\tWebhookField{\n\t\t\tOperation: \"replace\",\n\t\t\tPath: \"/event_types\",\n\t\t\tValue: []interface{}{\n\t\t\t\tmap[string]interface{}{\n\t\t\t\t\t\"name\": \"PAYMENT.SALE.REFUNDED\",\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n\n\t_, err = c.UpdateWebhook(createdWebhook.ID, updatePayload)\n\tif err != nil {\n\t\tt.Errorf(\"Couldn't update webhook, error %v\", err)\n\t}\n\n\terr = c.DeleteWebhook(createdWebhook.ID)\n\tif err != nil {\n\t\tt.Errorf(\"An error occurred while webhooks deletion, error %v\", err)\n\t}\n}", "func createValidatingWebhookConfiguration(f *framework.Framework, config *admissionregistrationv1.ValidatingWebhookConfiguration) (*admissionregistrationv1.ValidatingWebhookConfiguration, error) {\n\tctx := f.Context\n\tfor _, webhook := range config.Webhooks {\n\t\tif webhook.NamespaceSelector != nil && webhook.NamespaceSelector.MatchLabels[uniqueName] == \"true\" {\n\t\t\tcontinue\n\t\t}\n\t\tif webhook.ObjectSelector != nil && webhook.ObjectSelector.MatchLabels[uniqueName] == \"true\" {\n\t\t\tcontinue\n\t\t}\n\t\tf.Log.Failf(`webhook %s in config %s has no namespace or object selector with %s=\"true\", and can interfere with other tests`, webhook.Name, config.Name, uniqueName)\n\t}\n\treturn f.VclusterClient.AdmissionregistrationV1().ValidatingWebhookConfigurations().Create(ctx, config, metav1.CreateOptions{})\n}", "func NewWebhook(WebhookURL string) (whook *Webhook) {\n\twhook = &Webhook{\n\t\twebhookURL: WebhookURL,\n\t\tusingWebhook: false,\n\t}\n\n\tif whook.CheckWebhookExists() {\n\t\twhook.usingWebhook = true\n\t}\n\t\n\treturn whook\n}", "func NewWebhook(token string) *Webhook {\n\treturn &Webhook{\n\t\tToken: token,\n\t}\n}", "func TestCreateTaskWebhook(t *testing.T) {\n\tdata := []byte(`{\n \"eventType\": \"taskCreated\",\n \"userId\": \"ec1b92fb1868c44aa9a041583c000e2a\",\n \"userFullName\": \"John Doe\",\n \"timestamp\": \"2015-10-20T14:45:06.331Z\",\n \"task\": {\n \"_id\": \"60e8b629fc8d6d28b513807d7d86b133\",\n \"name\": \"Write report\",\n \"description\": \"For school\",\n \"color\": \"green\",\n \"columnId\": \"ff31c6b2374911e49d115f7064763810\",\n \"totalSecondsSpent\": 0,\n \"totalSecondsEstimate\": 0,\n \"swimlaneId\": \"e037a6400e8911e5bdc9053860f3e5c0\",\n \"dates\": [\n {\n \"targetColumnId\": \"ff31c6b4374911e49d115f7064763810\",\n \"status\": \"active\",\n \"dateType\": \"dueDate\",\n \"dueTimestamp\": \"2015-10-20T15:00:00Z\",\n \"dueTimestampLocal\": \"2015-10-20T17:00:00+02:00\"\n }\n ],\n \"subTasks\": [\n {\n \"name\": \"Proofread\",\n \"finished\": false\n }\n ],\n \"labels\": [\n {\n \"name\": \"Writing\",\n \"pinned\": false\n }\n ]\n }\n }`)\n\n\ts := &CreateTaskWebhook{}\n\tdecodeAndEncodeData(t, data, s)\n}", "func NewWebhook(db *sqlx.DB) *Webhook {\n\treturn &Webhook{db: db}\n}", "func (s *WebhooksServiceOp) Create(webhook Webhook, options ...interface{}) (Webhook, error) {\n\tvar webhookResponse GetWebhookResponse\n\tjsonBody, err := json.Marshal(webhook)\n\tif err != nil {\n\t\treturn webhookResponse.Data, err\n\t}\n\treqBody := bytes.NewReader(jsonBody)\n\tbody, reqErr := s.client.DoRequest(http.MethodPost, \"/v3/hooks\", reqBody)\n\tif reqErr != nil {\n\t\treturn webhookResponse.Data, reqErr\n\t}\n\n\tjsonErr := json.Unmarshal(body, &webhookResponse)\n\tif jsonErr != nil {\n\t\treturn webhookResponse.Data, jsonErr\n\t}\n\n\treturn webhookResponse.Data, nil\n}", "func TestCreateAndGetWebhook(t *testing.T) {\n\tc, _ := NewClient(testClientID, testSecret, APIBaseSandBox)\n\tc.GetAccessToken()\n\n\tpayload := &CreateWebhookRequest{\n\t\tURL: \"https://example.com/paypal_webhooks\",\n\t\tEventTypes: []WebhookEventType{\n\t\t\tWebhookEventType{\n\t\t\t\tName: \"PAYMENT.AUTHORIZATION.CREATED\",\n\t\t\t},\n\t\t},\n\t}\n\n\tcreatedWebhook, err := c.CreateWebhook(payload)\n\tif err != nil {\n\t\tt.Errorf(\"Webhook couldn't be created, error %v\", err)\n\t}\n\n\t_, err = c.GetWebhook(createdWebhook.ID)\n\tif err != nil {\n\t\tt.Errorf(\"An error occurred while getting webhook, error %v\", err)\n\t}\n\n\terr = c.DeleteWebhook(createdWebhook.ID)\n\tif err != nil {\n\t\tt.Errorf(\"An error occurred while webhooks deletion, error %v\", err)\n\t}\n}", "func (s *WebhookServiceOp) Create(ctx context.Context, webhook Webhook) (*Webhook, error) {\n\tpath := fmt.Sprintf(\"%s.json\", webhooksBasePath)\n\twrappedData := WebhookResource{Webhook: &webhook}\n\tresource := new(WebhookResource)\n\terr := s.client.Post(ctx, path, wrappedData, resource)\n\treturn resource.Webhook, err\n}", "func NewWebhookAPI(webhookChan chan dockworker.Job) WebhookAPI {\n\treturn WebhookAPI{\n\t\twebhookChan: webhookChan,\n\t}\n}", "func NewWebHook(\n\tsessionWorks repository.Session,\n\tconnectionsSupervisor Connections,\n\tauthorizer Authorizer,\n\twebhookURL string,\n\tmsgRepo repository.Message,\n\tclient httpInfra.Client,\n\tinterruptChan chan os.Signal,\n) *WebHook {\n\treturn &WebHook{\n\t\tsessionRepo: sessionWorks,\n\t\tconnectionsSupervisor: connectionsSupervisor,\n\t\tauth: authorizer,\n\t\twebhookURL: webhookURL,\n\t\tmsgRepo: msgRepo,\n\t\tclient: client,\n\t\tinterruptChan: interruptChan,\n\t}\n}", "func (c *MockWebhookClient) CreateWebhook(ctx context.Context, repo bitbucket.Repo, hook bitbucket.Webhook) (result bitbucket.Webhook, err error) {\n\treturn c.MockCreateWebhook(ctx, repo, hook)\n}", "func NewWebHook(accessToken string) *WebHook {\n\tbaseAPI := \"https://oapi.dingtalk.com/robot/send\"\n\treturn &WebHook{accessToken: accessToken, apiUrl: baseAPI}\n}", "func New(clientSecret string) Webhook {\n\treturn &webhook{\n\t\tsecret: clientSecret,\n\t\tevents: make(map[Event]Handler),\n\t}\n}", "func (client *WebhooksClient) createCreateRequest(ctx context.Context, resourceGroupName string, registryName string, webhookName string, webhookCreateParameters WebhookCreateParameters, options *WebhooksClientBeginCreateOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerRegistry/registries/{registryName}/webhooks/{webhookName}\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif registryName == \"\" {\n\t\treturn nil, errors.New(\"parameter registryName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{registryName}\", url.PathEscape(registryName))\n\tif webhookName == \"\" {\n\t\treturn nil, errors.New(\"parameter webhookName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{webhookName}\", url.PathEscape(webhookName))\n\treq, err := runtime.NewRequest(ctx, http.MethodPut, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2019-05-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, runtime.MarshalAsJSON(req, webhookCreateParameters)\n}", "func New() (*WebhookHandler, error) {\n\tconf, err := NewConfig()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn NewWithConfig(conf)\n}", "func CreateWebhookListener(config,\n\tremoteConfig *rest.Config,\n\tscheme *runtime.Scheme,\n\ttlsKeyFile, tlsCrtFile string,\n\tcreateService bool) (*WebhookListener, error) {\n\tif klog.V(utils.QuiteLogLel) {\n\t\tfnName := utils.GetFnName()\n\t\tklog.Infof(\"Entering: %v()\", fnName)\n\n\t\tdefer klog.Infof(\"Exiting: %v()\", fnName)\n\t}\n\n\tvar err error\n\n\tdynamicClient := dynamic.NewForConfigOrDie(config)\n\n\tl := &WebhookListener{\n\t\tDynamicClient: dynamicClient,\n\t\tlocalConfig: config,\n\t}\n\n\t// The user-provided key and cert files take precedence over the default provided files if both sets exist.\n\tif _, err := os.Stat(defaultKeyFile); err == nil {\n\t\tl.TLSKeyFile = defaultKeyFile\n\t}\n\n\tif _, err := os.Stat(defaultCrtFile); err == nil {\n\t\tl.TLSCrtFile = defaultCrtFile\n\t}\n\n\tif _, err := os.Stat(tlsKeyFile); err == nil {\n\t\tl.TLSKeyFile = tlsKeyFile\n\t}\n\n\tif _, err := os.Stat(tlsCrtFile); err == nil {\n\t\tl.TLSCrtFile = tlsCrtFile\n\t}\n\n\tl.LocalClient, err = client.New(config, client.Options{})\n\n\tif err != nil {\n\t\tklog.Error(\"Failed to initialize client to update local status. error: \", err)\n\t\treturn nil, err\n\t}\n\n\tl.RemoteClient = l.LocalClient\n\tif remoteConfig != nil {\n\t\tl.RemoteClient, err = client.New(remoteConfig, client.Options{})\n\n\t\tif err != nil {\n\t\t\tklog.Error(\"Failed to initialize client to update remote status. error: \", err)\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\tif createService {\n\t\tnamespace, err := getOperatorNamespace()\n\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\t// Create the webhook listener service only when the subscription controller runs in hub mode.\n\t\terr = createWebhookListnerService(l.LocalClient, namespace)\n\n\t\tif err != nil {\n\t\t\tklog.Error(\"Failed to create a service for Git webhook listener. error: \", err)\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\treturn l, err\n}", "func handleNewHook(resp http.ResponseWriter, request *http.Request) {\n\tcors := handleCors(resp, request)\n\tif cors {\n\t\treturn\n\t}\n\n\tuser, err := handleApiAuthentication(resp, request)\n\tif err != nil {\n\t\tlog.Printf(\"Api authentication failed in set new workflowhandler: %s\", err)\n\t\tresp.WriteHeader(401)\n\t\tresp.Write([]byte(`{\"success\": false}`))\n\t\treturn\n\t}\n\n\ttype requestData struct {\n\t\tType string `json:\"type\"`\n\t\tDescription string `json:\"description\"`\n\t\tId string `json:\"id\"`\n\t\tName string `json:\"name\"`\n\t\tWorkflow string `json:\"workflow\"`\n\t}\n\n\tbody, err := ioutil.ReadAll(request.Body)\n\tif err != nil {\n\t\tlog.Printf(\"Body data error: %s\", err)\n\t\tresp.WriteHeader(401)\n\t\tresp.Write([]byte(`{\"success\": false}`))\n\t\treturn\n\t}\n\n\tlog.Println(\"Data: %s\", string(body))\n\n\tctx := context.Background()\n\tvar requestdata requestData\n\terr = yaml.Unmarshal([]byte(body), &requestdata)\n\tif err != nil {\n\t\tlog.Printf(\"Failed unmarshaling inputdata: %s\", err)\n\t\tresp.WriteHeader(401)\n\t\tresp.Write([]byte(`{\"success\": false}`))\n\t\treturn\n\t}\n\tlog.Printf(\"%#v\", requestdata)\n\n\t// CBA making a real thing. Already had some code lol\n\tnewId := requestdata.Id\n\tif len(newId) != 36 {\n\t\tlog.Printf(\"Bad ID\")\n\t\tresp.WriteHeader(401)\n\t\tresp.Write([]byte(`{\"success\": false, \"reason\": \"Invalid ID\"}`))\n\t\treturn\n\t}\n\n\tif requestdata.Id == \"\" || requestdata.Name == \"\" {\n\t\tresp.WriteHeader(401)\n\t\tresp.Write([]byte(`{\"success\": false, \"reason\": \"Requires fields id and name can't be empty\"}`))\n\t\treturn\n\n\t}\n\n\tvalidTypes := []string{\n\t\t\"webhook\",\n\t}\n\n\tisTypeValid := false\n\tfor _, thistype := range validTypes {\n\t\tif requestdata.Type == thistype {\n\t\t\tisTypeValid = true\n\t\t\tbreak\n\t\t}\n\t}\n\n\tif !(isTypeValid) {\n\t\tlog.Printf(\"Type %s is not valid. Try any of these: %s\", requestdata.Type, strings.Join(validTypes, \", \"))\n\t\tresp.WriteHeader(401)\n\t\tresp.Write([]byte(`{\"success\": false}`))\n\t\treturn\n\t}\n\n\thook := Hook{\n\t\tId: newId,\n\t\tWorkflows: []string{requestdata.Workflow},\n\t\tInfo: Info{\n\t\t\tName: requestdata.Name,\n\t\t\tDescription: requestdata.Description,\n\t\t\tUrl: fmt.Sprintf(\"https://shuffler.io/functions/webhooks/webhook_%s\", newId),\n\t\t},\n\t\tType: \"webhook\",\n\t\tOwner: user.Username,\n\t\tStatus: \"uninitialized\",\n\t\tActions: []HookAction{\n\t\t\tHookAction{\n\t\t\t\tType: \"workflow\",\n\t\t\t\tName: requestdata.Name,\n\t\t\t\tId: requestdata.Workflow,\n\t\t\t\tField: \"\",\n\t\t\t},\n\t\t},\n\t\tRunning: false,\n\t}\n\n\tlog.Printf(\"Hello\")\n\n\t// FIXME: Add cloud function execution?\n\t//b, err := json.Marshal(hook)\n\t//if err != nil {\n\t//\tlog.Printf(\"Failed marshalling: %s\", err)\n\t//\tresp.WriteHeader(401)\n\t//\tresp.Write([]byte(`{\"success\": false}`))\n\t//\treturn\n\t//}\n\n\t//environmentVariables := map[string]string{\n\t//\t\"FUNCTION_APIKEY\": user.ApiKey,\n\t//\t\"CALLBACKURL\": \"https://shuffler.io\",\n\t//\t\"HOOKID\": hook.Id,\n\t//}\n\n\t//applocation := fmt.Sprintf(\"gs://%s/triggers/webhook.zip\", bucketName)\n\t//hookname := fmt.Sprintf(\"webhook_%s\", hook.Id)\n\t//err = deployWebhookFunction(ctx, hookname, defaultLocation, applocation, environmentVariables)\n\t//if err != nil {\n\t//\tlog.Printf(\"Error deploying hook: %s\", err)\n\t//\tresp.WriteHeader(401)\n\t//\tresp.Write([]byte(fmt.Sprintf(`{\"success\": false, \"reason\": \"Issue with starting hook. Please wait a second and try again\"}`)))\n\t//\treturn\n\t//}\n\n\thook.Status = \"running\"\n\thook.Running = true\n\terr = setHook(ctx, hook)\n\tif err != nil {\n\t\tlog.Printf(\"Failed setting hook: %s\", err)\n\t\tresp.WriteHeader(401)\n\t\tresp.Write([]byte(`{\"success\": false}`))\n\t\treturn\n\t}\n\n\terr = increaseStatisticsField(ctx, \"total_workflow_triggers\", requestdata.Workflow, 1)\n\tif err != nil {\n\t\tlog.Printf(\"Failed to increase total workflows: %s\", err)\n\t}\n\n\tlog.Println(\"Generating new hook\")\n\tresp.WriteHeader(200)\n\tresp.Write([]byte(`{\"success\": true}`))\n}", "func (*CreateWebhookRequest) Descriptor() ([]byte, []int) {\n\treturn file_events_Event_proto_rawDescGZIP(), []int{4}\n}", "func newPostBuildOpenAPIObjectFunc(config ServerConfig, container *restful.Container) restfulSpec.PostBuildSwaggerObjectFunc {\n\treturn func(swo *spec.Swagger) {\n\t\tswo.Host = config.OpenAPI.Host\n\t\tswo.BasePath = config.OpenAPI.BasePath\n\t\tswo.Schemes = config.OpenAPI.Schemas\n\n\t\tvar title, description string\n\t\tif config.Name != \"\" {\n\t\t\ttitle = config.Name\n\t\t} else {\n\t\t\ttitle = config.OpenAPI.Spec.Title\n\t\t}\n\t\tif config.Description != \"\" {\n\t\t\tdescription = config.Description\n\t\t} else {\n\t\t\tdescription = config.OpenAPI.Spec.Description\n\t\t}\n\t\tswo.Info = &spec.Info{\n\t\t\tInfoProps: spec.InfoProps{\n\t\t\t\tTitle: title,\n\t\t\t\tDescription: description,\n\t\t\t\tContact: &spec.ContactInfo{\n\t\t\t\t\tContactInfoProps: spec.ContactInfoProps{\n\t\t\t\t\t\tName: config.OpenAPI.Spec.Contact.Name,\n\t\t\t\t\t\tEmail: config.OpenAPI.Spec.Contact.Email,\n\t\t\t\t\t\tURL: config.OpenAPI.Spec.Contact.URL,\n\t\t\t\t\t},\n\t\t\t\t},\n\n\t\t\t\tLicense: &spec.License{\n\t\t\t\t\tLicenseProps: spec.LicenseProps{\n\t\t\t\t\t\tName: config.OpenAPI.Spec.License.Name,\n\t\t\t\t\t\tURL: config.OpenAPI.Spec.License.URL,\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tVersion: config.OpenAPI.Spec.Version,\n\t\t\t},\n\t\t}\n\n\t\tvar nTags []spec.Tag\n\t\tvar tags []OpenapiTagConfig\n\t\tif len(config.OpenAPI.Tags) > 0 {\n\t\t\ttags = config.OpenAPI.Tags\n\t\t} else {\n\t\t\ttags = config.OpenAPI.Tags\n\t\t}\n\t\tfor _, tag := range tags {\n\t\t\tnTag := spec.Tag{TagProps: spec.TagProps{Name: tag.Name, Description: tag.Description}}\n\n\t\t\tnTags = append(nTags, nTag)\n\t\t}\n\t\tswo.Tags = nTags\n\t\t// setup security definitions\n\t\tif config.OpenAPI.Auth == \"basic\" {\n\t\t\tswo.SecurityDefinitions = map[string]*spec.SecurityScheme{\n\t\t\t\t\"basicAuth\": spec.BasicAuth(),\n\t\t\t}\n\t\t\tauth := make(map[string][]string)\n\t\t\tauth[\"basicAuth\"] = []string{}\n\t\t\tswo.Security = append(swo.Security, auth)\n\t\t} else if config.OpenAPI.Auth == \"jwt\" {\n\t\t\tswo.SecurityDefinitions = map[string]*spec.SecurityScheme{\n\t\t\t\t\"jwt\": spec.APIKeyAuth(\"Authorization\", \"header\"),\n\t\t\t}\n\t\t\tenrichSwaggerObjectSecurity(swo, container)\n\t\t}\n\n\t}\n}", "func (in *EKSPodIdentityWebhookSpec) DeepCopy() *EKSPodIdentityWebhookSpec {\n\tif in == nil {\n\t\treturn nil\n\t}\n\tout := new(EKSPodIdentityWebhookSpec)\n\tin.DeepCopyInto(out)\n\treturn out\n}", "func (c *EcomClient) CreateWebhook(ctx context.Context, p *CreateWebhookRequest) (*WebhookResponse, error) {\n\trequest, err := json.Marshal(&p)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"%w: client: json marshal\", err)\n\t}\n\n\turl := c.endpoint + \"/webhooks\"\n\tbody := strings.NewReader(string(request))\n\tres, err := c.request(http.MethodPost, url, body)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"%w: request\", err)\n\t}\n\tdefer res.Body.Close()\n\n\tif res.StatusCode >= 400 {\n\t\tvar e badRequestResponse\n\t\tdec := json.NewDecoder(res.Body)\n\t\tdec.DisallowUnknownFields()\n\t\tif err := dec.Decode(&e); err != nil {\n\t\t\treturn nil, fmt.Errorf(\"%w: client decode\", err)\n\t\t}\n\t\treturn nil, fmt.Errorf(\"status: %d, code: %s, message: %s\", e.Status, e.Code, e.Message)\n\t}\n\n\tvar webhook WebhookResponse\n\tif err = json.NewDecoder(res.Body).Decode(&webhook); err != nil {\n\t\treturn nil, fmt.Errorf(\"%w: decode\", err)\n\t}\n\treturn &webhook, nil\n}", "func New(options ...Option) (*Webhook, error) {\n\thook := new(Webhook)\n\tfor _, opt := range options {\n\t\tif err := opt(hook); err != nil {\n\t\t\treturn nil, errors.New(\"Error applying Option\")\n\t\t}\n\t}\n\treturn hook, nil\n}", "func (c *ApiService) CreateWebhook(ServiceSid string, params *CreateWebhookParams) (*VerifyV2Webhook, error) {\n\tpath := \"/v2/Services/{ServiceSid}/Webhooks\"\n\tpath = strings.Replace(path, \"{\"+\"ServiceSid\"+\"}\", ServiceSid, -1)\n\n\tdata := url.Values{}\n\theaders := make(map[string]interface{})\n\n\tif params != nil && params.FriendlyName != nil {\n\t\tdata.Set(\"FriendlyName\", *params.FriendlyName)\n\t}\n\tif params != nil && params.EventTypes != nil {\n\t\tfor _, item := range *params.EventTypes {\n\t\t\tdata.Add(\"EventTypes\", item)\n\t\t}\n\t}\n\tif params != nil && params.WebhookUrl != nil {\n\t\tdata.Set(\"WebhookUrl\", *params.WebhookUrl)\n\t}\n\tif params != nil && params.Status != nil {\n\t\tdata.Set(\"Status\", *params.Status)\n\t}\n\tif params != nil && params.Version != nil {\n\t\tdata.Set(\"Version\", *params.Version)\n\t}\n\n\tresp, err := c.requestHandler.Post(c.baseURL+path, data, headers)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tdefer resp.Body.Close()\n\n\tps := &VerifyV2Webhook{}\n\tif err := json.NewDecoder(resp.Body).Decode(ps); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn ps, err\n}", "func GenTestConfig(name string) (*probes_configpb.ProbeDef, *monitorpb.HermesProbeDef) {\n\tprobeDef := &probes_configpb.ProbeDef{\n\t\tName: proto.String(name),\n\t\tType: probes_configpb.ProbeDef_EXTENSION.Enum(),\n\t}\n\n\thermesExtension := &monitorpb.HermesProbeDef{\n\t\tProbeName: proto.String(name),\n\t\tTargets: []*monitorpb.Target{\n\t\t\t&monitorpb.Target{\n\t\t\t\tName: \"hermes\",\n\t\t\t\tTargetSystem: monitorpb.Target_GOOGLE_CLOUD_STORAGE,\n\t\t\t\tTotalSpaceAllocatedMib: int64(100),\n\t\t\t\tBucketName: \"test_bucket_5\",\n\t\t\t},\n\t\t},\n\t\tTargetSystem: monitorpb.HermesProbeDef_GCS.Enum(),\n\t\tIntervalSec: proto.Int32(3600),\n\t\tTimeoutSec: proto.Int32(60),\n\t\tProbeLatencyDistribution: &metricpb.Dist{\n\t\t\tBuckets: &metricpb.Dist_ExplicitBuckets{\n\t\t\t\tExplicitBuckets: \"0.1, 0.2, 0.4, 0.6 0.8, 1.6, 3.2, 6.4, 12.8, 1000\",\n\t\t\t},\n\t\t},\n\t\tApiCallLatencyDistribution: &metricpb.Dist{\n\t\t\tBuckets: &metricpb.Dist_ExplicitBuckets{\n\t\t\t\tExplicitBuckets: \"0.000000002, 0.000000004, 0.000000008, 0.000000016, 0.00000032, 0.000000064, 0.000000128, 100\",\n\t\t\t},\n\t\t},\n\t}\n\tproto.SetExtension(probeDef, monitorpb.E_HermesProbeDef_HermesProbeDef, hermesExtension)\n\treturn probeDef, hermesExtension\n}", "func (c *Client) CreateWebhook(ctx context.Context, repo bitbucket.Repo, hook bitbucket.Webhook) (bitbucket.Webhook, error) {\n\tmarshalledPayload, err := json.Marshal(hook)\n\tif err != nil {\n\t\treturn bitbucket.Webhook{}, err\n\t}\n\n\turl := c.BaseURL + fmt.Sprintf(\"/rest/api/1.0/projects/%s/repos/%s/webhooks\",\n\t\turl.PathEscape(repo.ProjectKey), url.PathEscape(repo.Repo))\n\n\treq, err := http.NewRequestWithContext(ctx, http.MethodPost, url, bytes.NewBuffer(marshalledPayload))\n\tif err != nil {\n\t\treturn bitbucket.Webhook{}, err\n\t}\n\n\tvar response bitbucket.Webhook\n\tif err := c.sendRequest(req, &response); err != nil {\n\t\treturn bitbucket.Webhook{}, err\n\t}\n\treturn response, nil\n}", "func (w *WebhookServiceOp) Create(webhook Webhook) (*Webhook, error) {\n\tpath := fmt.Sprintf(\"%s\", webhooksBasePath)\n\tresource := new(Webhook)\n\terr := w.client.Post(path, webhook, &resource)\n\treturn resource, err\n}", "func NewWebhookScaffolder(config config.Config, resource resource.Resource, force bool) plugins.Scaffolder {\n\treturn &webhookScaffolder{\n\t\tconfig: config,\n\t\tresource: resource,\n\t\tforce: force,\n\t}\n}", "func NewPodWebhook(minScore int, mrec metrics.Recorder, logger log.Logger) (webhook.Webhook, error) {\n\n\t// Create validators.\n\tval := &PodValidator{\n\t\tLogger: logger,\n\t}\n\n\tcfg := validating.WebhookConfig{\n\t\tName: \"in-toto-pod\",\n\t\tObj: &v1.Pod{},\n\t}\n\n\treturn validating.NewWebhook(cfg, val, mrec, logger)\n}", "func (z *Client) CreateWebhook(ctx context.Context, hook *Webhook) (*Webhook, error) {\n\tvar data, result struct {\n\t\tWebhook *Webhook `json:\"webhook\"`\n\t}\n\tdata.Webhook = hook\n\n\tbody, err := z.post(ctx, \"/webhooks\", data)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\terr = json.Unmarshal(body, &result)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn result.Webhook, nil\n}", "func NewBootstrapSpec(spec *job.BootstrapSpec) *BootstrapSpec {\n\treturn &BootstrapSpec{\n\t\tContractID: spec.ContractID,\n\t\tRelay: spec.Relay,\n\t\tRelayConfig: spec.RelayConfig,\n\t\tBlockchainTimeout: spec.BlockchainTimeout,\n\t\tContractConfigTrackerPollInterval: spec.ContractConfigTrackerPollInterval,\n\t\tContractConfigConfirmations: spec.ContractConfigConfirmations,\n\t\tCreatedAt: spec.CreatedAt,\n\t\tUpdatedAt: spec.UpdatedAt,\n\t}\n}", "func (*CreateWebhookRequest) Descriptor() ([]byte, []int) {\n\treturn file_uac_Event_proto_rawDescGZIP(), []int{3}\n}", "func (m *Client) CreateWebhook(arg0 context.Context, arg1 *zendesk.Webhook) (*zendesk.Webhook, error) {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"CreateWebhook\", arg0, arg1)\n\tret0, _ := ret[0].(*zendesk.Webhook)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}", "func CreateWebhook(user string, repo string, accesstoken string, webhookUrl string, secret string) (int, error) {\n\tdata := user + \":\" + accesstoken\n\tsEnc := base64.StdEncoding.EncodeToString([]byte(data))\n\tname := \"web\"\n\tactive := true\n\thook := github.Hook{\n\t\tName: &name,\n\t\tActive: &active,\n\t\tConfig: make(map[string]interface{}),\n\t\tEvents: []string{\"push\"},\n\t}\n\n\thook.Config[\"url\"] = webhookUrl\n\thook.Config[\"content_type\"] = \"json\"\n\thook.Config[\"secret\"] = secret\n\thook.Config[\"insecure_ssl\"] = \"1\"\n\n\tlogrus.Infof(\"hook to create:%v\", hook)\n\tb := new(bytes.Buffer)\n\tjson.NewEncoder(b).Encode(hook)\n\thc := http.Client{}\n\tAPIURL := fmt.Sprintf(\"https://api.github.com/repos/%v/%v/hooks\", user, repo)\n\treq, err := http.NewRequest(\"POST\", APIURL, b)\n\n\treq.Header.Add(\"Authorization\", \"Basic \"+sEnc)\n\n\tresp, err := hc.Do(req)\n\tif err != nil {\n\t\treturn -1, err\n\t}\n\trespData, err := ioutil.ReadAll(resp.Body)\n\tlogrus.Infof(\"respData:%v\", string(respData))\n\tif resp.StatusCode > 399 {\n\t\treturn -1, errors.New(string(respData))\n\t}\n\terr = json.Unmarshal(respData, &hook)\n\tif err != nil {\n\t\treturn -1, err\n\t}\n\treturn hook.GetID(), err\n}", "func (o *CreateGitWebhookUsingPOSTParams) WithGitWebhookSpec(gitWebhookSpec models.GitWebhookSpec) *CreateGitWebhookUsingPOSTParams {\n\to.SetGitWebhookSpec(gitWebhookSpec)\n\treturn o\n}", "func NewSpec(details *SpecDetails) *Spec {\n\treturn &Spec{\n\t\tDetails: details,\n\t\tServices: NewServiceList(),\n\t\tStatus: SpecWaiting,\n\t}\n}", "func BuildOpenAPISpec(webServices []*restful.WebService, config *common.Config) (*spec3.OpenAPI, error) {\n\treturn BuildOpenAPISpecFromRoutes(restfuladapter.AdaptWebServices(webServices), config)\n}", "func (g *Gitlab) CreateWebHook(cfg *api.SCMConfig, repoURL string, webHook *scm.WebHook) error {\n\tif webHook == nil || len(webHook.Url) == 0 || len(webHook.Events) == 0 {\n\t\treturn fmt.Errorf(\"The webhook %v is not correct\", webHook)\n\t}\n\n\tclient, err := newGitlabClient(cfg.Server, cfg.Username, cfg.Token)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tenableState, disableState := true, false\n\t// Push event is enable for Gitlab webhook in default, so need to remove this default option.\n\thook := gitlab.AddProjectHookOptions{\n\t\tPushEvents: &disableState,\n\t}\n\n\tfor _, e := range webHook.Events {\n\t\tswitch e {\n\t\tcase scm.PullRequestEventType:\n\t\t\thook.MergeRequestsEvents = &enableState\n\t\tcase scm.PullRequestCommentEventType:\n\t\t\thook.NoteEvents = &enableState\n\t\tcase scm.PushEventType:\n\t\t\thook.PushEvents = &enableState\n\t\tcase scm.TagReleaseEventType:\n\t\t\thook.TagPushEvents = &enableState\n\t\tdefault:\n\t\t\tlog.Errorf(\"The event type %s is not supported, will be ignored\", e)\n\t\t\treturn nil\n\t\t}\n\t}\n\thook.URL = &webHook.Url\n\n\tonwer, name := parseURL(repoURL)\n\t_, _, err = client.Projects.AddProjectHook(onwer+\"/\"+name, &hook)\n\treturn err\n}", "func WebhookWithOptions(o WebhookOptions) filters.Spec {\n\treturn &webhookSpec{options: o}\n}", "func WebhookDeploymentReconciler(data operatingSystemManagerData) reconciling.NamedDeploymentReconcilerFactory {\n\treturn func() (string, reconciling.DeploymentReconciler) {\n\t\treturn resources.OperatingSystemManagerWebhookDeploymentName, func(dep *appsv1.Deployment) (*appsv1.Deployment, error) {\n\t\t\targs := []string{\n\t\t\t\t\"-logtostderr\",\n\t\t\t\t\"-v\", \"4\",\n\t\t\t\t\"-namespace\", \"kube-system\",\n\t\t\t}\n\n\t\t\tdep.Name = resources.OperatingSystemManagerWebhookDeploymentName\n\t\t\tdep.Labels = resources.BaseAppLabels(resources.OperatingSystemManagerWebhookDeploymentName, nil)\n\n\t\t\tdep.Spec.Replicas = resources.Int32(1)\n\t\t\tdep.Spec.Selector = &metav1.LabelSelector{\n\t\t\t\tMatchLabels: resources.BaseAppLabels(resources.OperatingSystemManagerWebhookDeploymentName, nil),\n\t\t\t}\n\n\t\t\tdep.Spec.Template.Spec.ImagePullSecrets = []corev1.LocalObjectReference{{Name: resources.ImagePullSecretName}}\n\n\t\t\tvolumes := []corev1.Volume{getWebhookKubeconfigVolume(), getServingCertVolume(), getCABundleVolume()}\n\t\t\tdep.Spec.Template.Spec.Volumes = volumes\n\n\t\t\tpodLabels, err := data.GetPodTemplateLabels(resources.OperatingSystemManagerWebhookDeploymentName, volumes, nil)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, fmt.Errorf(\"failed to create pod labels: %w\", err)\n\t\t\t}\n\n\t\t\tdep.Spec.Template.ObjectMeta = metav1.ObjectMeta{Labels: podLabels}\n\n\t\t\tdep.Spec.Template.Spec.InitContainers = []corev1.Container{}\n\n\t\t\trepository := registry.Must(data.RewriteImage(resources.RegistryQuay + \"/kubermatic/operating-system-manager\"))\n\t\t\tif r := data.OperatingSystemManagerImageRepository(); r != \"\" {\n\t\t\t\trepository = r\n\t\t\t}\n\n\t\t\ttag := Tag\n\t\t\tif t := data.OperatingSystemManagerImageTag(); t != \"\" {\n\t\t\t\ttag = t\n\t\t\t}\n\n\t\t\tdep.Spec.Template.Spec.Containers = []corev1.Container{\n\t\t\t\t{\n\t\t\t\t\tName: Name,\n\t\t\t\t\tImage: repository + \":\" + tag,\n\t\t\t\t\tCommand: []string{\"/usr/local/bin/webhook\"},\n\t\t\t\t\tArgs: args,\n\t\t\t\t\tEnv: []corev1.EnvVar{\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tName: \"KUBECONFIG\",\n\t\t\t\t\t\t\tValue: \"/etc/kubernetes/worker-kubeconfig/kubeconfig\",\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\tPorts: []corev1.ContainerPort{\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tName: \"webhook-server\",\n\t\t\t\t\t\t\tContainerPort: 9443,\n\t\t\t\t\t\t\tProtocol: corev1.ProtocolTCP,\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\tReadinessProbe: &corev1.Probe{\n\t\t\t\t\t\tProbeHandler: corev1.ProbeHandler{\n\t\t\t\t\t\t\tHTTPGet: &corev1.HTTPGetAction{\n\t\t\t\t\t\t\t\tPath: \"/readyz\",\n\t\t\t\t\t\t\t\tPort: intstr.FromInt(8081),\n\t\t\t\t\t\t\t\tScheme: corev1.URISchemeHTTP,\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tFailureThreshold: 3,\n\t\t\t\t\t\tPeriodSeconds: 10,\n\t\t\t\t\t\tSuccessThreshold: 1,\n\t\t\t\t\t\tTimeoutSeconds: 15,\n\t\t\t\t\t},\n\t\t\t\t\tLivenessProbe: &corev1.Probe{\n\t\t\t\t\t\tFailureThreshold: 8,\n\t\t\t\t\t\tProbeHandler: corev1.ProbeHandler{\n\t\t\t\t\t\t\tHTTPGet: &corev1.HTTPGetAction{\n\t\t\t\t\t\t\t\tPath: \"/healthz\",\n\t\t\t\t\t\t\t\tPort: intstr.FromInt(8081),\n\t\t\t\t\t\t\t\tScheme: corev1.URISchemeHTTP,\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tInitialDelaySeconds: 15,\n\t\t\t\t\t\tPeriodSeconds: 10,\n\t\t\t\t\t\tSuccessThreshold: 1,\n\t\t\t\t\t\tTimeoutSeconds: 15,\n\t\t\t\t\t},\n\t\t\t\t\tVolumeMounts: []corev1.VolumeMount{\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tName: resources.OperatingSystemManagerWebhookKubeconfigSecretName,\n\t\t\t\t\t\t\tMountPath: \"/etc/kubernetes/worker-kubeconfig\",\n\t\t\t\t\t\t\tReadOnly: true,\n\t\t\t\t\t\t},\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tName: resources.OperatingSystemManagerWebhookServingCertSecretName,\n\t\t\t\t\t\t\tMountPath: \"/tmp/k8s-webhook-server/serving-certs\",\n\t\t\t\t\t\t\tReadOnly: true,\n\t\t\t\t\t\t},\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tName: resources.CABundleConfigMapName,\n\t\t\t\t\t\t\tMountPath: \"/etc/kubernetes/pki/ca-bundle\",\n\t\t\t\t\t\t\tReadOnly: true,\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t}\n\t\t\terr = resources.SetResourceRequirements(dep.Spec.Template.Spec.Containers, webhookResourceRequirements, nil, dep.Annotations)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, fmt.Errorf(\"failed to set resource requirements: %w\", err)\n\t\t\t}\n\n\t\t\twrappedPodSpec, err := apiserver.IsRunningWrapper(data, dep.Spec.Template.Spec, sets.New(Name))\n\t\t\tif err != nil {\n\t\t\t\treturn nil, fmt.Errorf(\"failed to add apiserver.IsRunningWrapper: %w\", err)\n\t\t\t}\n\t\t\tdep.Spec.Template.Spec = *wrappedPodSpec\n\n\t\t\treturn dep, nil\n\t\t}\n\t}\n}", "func NewWebhooksJsonWebhook() *WebhooksJsonWebhook {\n\tthis := WebhooksJsonWebhook{}\n\treturn &this\n}", "func (t *TauAPI) CreateWebhook(webhook Webhook) (ID int64, error error) {\n\tjsonPostMsg, _ := json.Marshal(webhook)\n\tjsonData, err := t.doTauRequest(&TauReq{\n\t\tVersion: 2,\n\t\tMethod: \"POST\",\n\t\tPath: \"webhooks/webhooks\",\n\t\tNeedsAuth: true,\n\t\tPostMsg: jsonPostMsg,\n\t})\n\tif err != nil {\n\t\treturn 0, nil\n\t}\n\tif string(jsonData) == \"[\\\"Limit reached\\\"]\" {\n\t\treturn 0, fmt.Errorf(\"Limit of webhooks reached (5)\")\n\t}\n\tvar d struct {\n\t\tID int64 `json:\"id\"`\n\t}\n\tif err := json.Unmarshal(jsonData, &d); err != nil {\n\t\treturn 0, fmt.Errorf(\"CreateWebhook -> unmarshal jsonData %v\", err)\n\t}\n\treturn d.ID, nil\n}", "func TestWebHook(t *testing.T) {\n\tg := gomega.NewGomegaWithT(t)\n\tg.Expect(createCertificates(t)).NotTo(gomega.HaveOccurred())\n\n\t// create manager\n\tmgr, err := manager.New(cfg, manager.Options{\n\t\tMetricsBindAddress: \"0\",\n\t})\n\tg.Expect(err).NotTo(gomega.HaveOccurred())\n\n\tc = mgr.GetClient()\n\n\t// add webhook to manager\n\tAdd(mgr)\n\n\t// start manager\n\tstopMgr, mgrStopped := StartTestManager(mgr, g)\n\tdefer func() {\n\t\tclose(stopMgr)\n\t\tmgrStopped.Wait()\n\t}()\n\n\tg.Expect(c.Create(context.TODO(), fnConfig)).NotTo(gomega.HaveOccurred())\n\n\ttestInvalidFunc(t)\n\ttestHandleDefaults(t)\n}", "func (in *AdmissionWebhookConfigurationSpec) DeepCopy() *AdmissionWebhookConfigurationSpec {\n\tif in == nil {\n\t\treturn nil\n\t}\n\tout := new(AdmissionWebhookConfigurationSpec)\n\tin.DeepCopyInto(out)\n\treturn out\n}", "func (c *Client) CreateWebhook(req CreateWebhookReq) (Webhook, error) {\n\tvar res Webhook\n\terr := c.post(c.baseURL+\"/webhooks\", webhookReq{Webhook: req}, &res)\n\n\treturn res, err\n}", "func (*Webhook) Descriptor() ([]byte, []int) {\n\treturn file_google_actions_sdk_v2_webhook_proto_rawDescGZIP(), []int{0}\n}", "func NewPodWebhook(grafeasUrl string, mrec metrics.Recorder, logger log.Logger) (webhook.Webhook, error) {\n\n\t// Create validators.\n\tval := &podValidator{\n\t\tgrafeasUrl: grafeasUrl,\n\t\tlogger: logger,\n\t}\n\n\tcfg := validating.WebhookConfig{\n\t\tName: \"grafeas-image-signing-pod\",\n\t\tObj: &v1.Pod{},\n\t}\n\n\treturn validating.NewWebhook(cfg, val, mrec, logger)\n}", "func appendWebhookConfiguration(mutatingWebhooks []*admissionv1.MutatingWebhookConfiguration, validatingWebhooks []*admissionv1.ValidatingWebhookConfiguration, configyamlFile []byte, tag string) ([]*admissionv1.MutatingWebhookConfiguration, []*admissionv1.ValidatingWebhookConfiguration, error) {\n\tobjs, err := utilyaml.ToUnstructured(configyamlFile)\n\tif err != nil {\n\t\tklog.Fatalf(\"failed to parse yaml\")\n\t}\n\t// look for resources of kind MutatingWebhookConfiguration\n\tfor i := range objs {\n\t\to := objs[i]\n\t\tif o.GetKind() == mutatingWebhookKind {\n\t\t\t// update the name in metadata\n\t\t\tif o.GetName() == mutatingwebhook {\n\t\t\t\to.SetName(fmt.Sprintf(\"%s-%s\", mutatingwebhook, tag))\n\n\t\t\t\twebhook := &admissionv1.MutatingWebhookConfiguration{}\n\t\t\t\tif err := scheme.Scheme.Convert(&o, webhook, nil); err != nil {\n\t\t\t\t\tklog.Fatalf(\"failed to convert MutatingWebhookConfiguration %s\", o.GetName())\n\t\t\t\t}\n\n\t\t\t\tmutatingWebhooks = append(mutatingWebhooks, webhook)\n\t\t\t}\n\t\t}\n\t\tif o.GetKind() == validatingWebhookKind {\n\t\t\t// update the name in metadata\n\t\t\tif o.GetName() == validatingwebhook {\n\t\t\t\to.SetName(fmt.Sprintf(\"%s-%s\", validatingwebhook, tag))\n\n\t\t\t\twebhook := &admissionv1.ValidatingWebhookConfiguration{}\n\t\t\t\tif err := scheme.Scheme.Convert(&o, webhook, nil); err != nil {\n\t\t\t\t\tklog.Fatalf(\"failed to convert ValidatingWebhookConfiguration %s\", o.GetName())\n\t\t\t\t}\n\n\t\t\t\tvalidatingWebhooks = append(validatingWebhooks, webhook)\n\t\t\t}\n\t\t}\n\t}\n\treturn mutatingWebhooks, validatingWebhooks, err\n}", "func (g *V3) CreateWebhook(repo string, webhook *scm.Webhook) error {\n\tif webhook == nil || len(webhook.URL) == 0 || len(webhook.Events) == 0 {\n\t\treturn fmt.Errorf(\"The webhook %v is not correct\", webhook)\n\t}\n\n\t_, err := g.GetWebhook(repo, webhook.URL)\n\tif err != nil {\n\t\tif !cerr.ErrorContentNotFound.Derived(err) {\n\t\t\treturn err\n\t\t}\n\n\t\thook := generateV3ProjectHook(webhook)\n\t\t_, resp, err := g.client.Projects.AddProjectHook(repo, hook)\n\t\tif err != nil {\n\t\t\treturn convertGitlabError(err, resp)\n\t\t}\n\t\treturn nil\n\t}\n\n\tlog.Warningf(\"Webhook already existed: %+v\", webhook)\n\treturn err\n}", "func NewCmdWebhooks() *cobra.Command {\n\tvar cmd = &cobra.Command{\n\t\tUse: \"webhooks\",\n\t\tShort: \"Webhooks Management\",\n\t}\n\tcmd.AddCommand(NewCmdWebhooksCreate())\n\tcmd.AddCommand(NewCmdWebhooksList())\n\tcmd.AddCommand(NewCmdWebhooksGet())\n\tcmd.AddCommand(NewCmdWebhooksUpdate())\n\tcmd.AddCommand(NewCmdWebhooksDelete())\n\treturn cmd\n}", "func (h *HttpHandlerFactory) newHttpHandler(webhookHandler WebhookHandlerBase, mutateFunc func(ctx context.Context, review v1beta1.AdmissionReview) ([]PatchOperation, error)) http.HandlerFunc {\n\treturn func(writer http.ResponseWriter, request *http.Request) {\n\t\tif request.Header.Get(\"Content-Type\") != \"application/json\" {\n\t\t\twebhookHandler.Log(\"level\", \"error\", \"message\", fmt.Sprintf(\"invalid content-type: %q\", request.Header.Get(\"Content-Type\")))\n\t\t\twriter.WriteHeader(http.StatusBadRequest)\n\t\t\treturn\n\t\t}\n\n\t\tdata, err := ioutil.ReadAll(request.Body)\n\t\tif err != nil {\n\t\t\twebhookHandler.Log(\"level\", \"error\", \"message\", \"unable to read request\", \"stack\", microerror.JSON(err))\n\t\t\twriter.WriteHeader(http.StatusInternalServerError)\n\t\t\treturn\n\t\t}\n\n\t\treview := v1beta1.AdmissionReview{}\n\t\tif _, _, err := Deserializer.Decode(data, nil, &review); err != nil {\n\t\t\twebhookHandler.Log(\"level\", \"error\", \"message\", \"unable to parse admission review request\", \"stack\", microerror.JSON(err))\n\t\t\twriter.WriteHeader(http.StatusBadRequest)\n\t\t\treturn\n\t\t}\n\n\t\tvar patch []PatchOperation\n\t\tif review.Request.DryRun != nil && *review.Request.DryRun {\n\t\t\twebhookHandler.Log(\"level\", \"debug\", \"message\", \"Dry run is not supported. Request processing stopped.\", \"stack\", microerror.JSON(err))\n\t\t} else {\n\t\t\tpatch, err = mutateFunc(request.Context(), review)\n\t\t\tif err != nil {\n\t\t\t\twriteResponse(webhookHandler, writer, errorResponse(review.Request.UID, microerror.Mask(err)))\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\n\t\tresourceName := fmt.Sprintf(\"%s %s/%s\", review.Request.Kind, review.Request.Namespace, extractName(review.Request))\n\t\tpatchData, err := json.Marshal(patch)\n\t\tif err != nil {\n\t\t\twebhookHandler.Log(\"level\", \"error\", \"message\", fmt.Sprintf(\"unable to serialize patch for %s\", resourceName), \"stack\", microerror.JSON(err))\n\t\t\twriteResponse(webhookHandler, writer, errorResponse(review.Request.UID, InternalError))\n\t\t\treturn\n\t\t}\n\n\t\twebhookHandler.Log(\"level\", \"debug\", \"message\", fmt.Sprintf(\"admitted %s (with %d patches)\", resourceName, len(patch)))\n\n\t\tpt := v1beta1.PatchTypeJSONPatch\n\t\twriteResponse(webhookHandler, writer, &v1beta1.AdmissionResponse{\n\t\t\tAllowed: true,\n\t\t\tUID: review.Request.UID,\n\t\t\tPatch: patchData,\n\t\t\tPatchType: &pt,\n\t\t})\n\t}\n}", "func newWebhookNotify(accountID string) (*logrus.Logger, error) {\n\trNotify := serverConfig.Notify.GetWebhookByID(accountID)\n\tif rNotify.Endpoint == \"\" {\n\t\treturn nil, errInvalidArgument\n\t}\n\n\tif err := lookupEndpoint(rNotify.Endpoint); err != nil {\n\t\treturn nil, err\n\t}\n\n\tconn := httpConn{\n\t\t// Configure aggressive timeouts for client posts.\n\t\tClient: &http.Client{\n\t\t\tTransport: &http.Transport{\n\t\t\t\tTLSClientConfig: &tls.Config{RootCAs: globalRootCAs},\n\t\t\t\tDialContext: (&net.Dialer{\n\t\t\t\t\tTimeout: 5 * time.Second,\n\t\t\t\t\tKeepAlive: 5 * time.Second,\n\t\t\t\t}).DialContext,\n\t\t\t\tTLSHandshakeTimeout: 3 * time.Second,\n\t\t\t\tResponseHeaderTimeout: 3 * time.Second,\n\t\t\t\tExpectContinueTimeout: 2 * time.Second,\n\t\t\t},\n\t\t},\n\t\tEndpoint: rNotify.Endpoint,\n\t}\n\n\tnotifyLog := logrus.New()\n\tnotifyLog.Out = ioutil.Discard\n\n\t// Set default JSON formatter.\n\tnotifyLog.Formatter = new(logrus.JSONFormatter)\n\n\tnotifyLog.Hooks.Add(conn)\n\n\t// Success\n\treturn notifyLog, nil\n}", "func expectedValidatingWebhooksConfiguration(name string, previousGenerations []operatorsv1.GenerationStatus) int64 {\n\tgeneration := resourcemerge.GenerationFor(previousGenerations, schema.GroupResource{Group: admissionregistrationv1.SchemeGroupVersion.Group, Resource: \"validatingwebhookconfigurations\"}, \"\", name)\n\tif generation != nil {\n\t\treturn generation.LastGeneration\n\t}\n\treturn -1\n}", "func makePodSpec(t thanosv1beta1.Receiver) (*corev1.PodSpec, error) {\n\n\tif t.Spec.ReceivePrefix == \"\" {\n\t\tt.Spec.ReceivePrefix = receiverDir\n\t}\n\tif t.Spec.Retention == \"\" {\n\t\tt.Spec.Retention = defaultRetetion\n\t}\n\t// TODO set args to spec\n\tthanosArgs := []string{\n\t\t\"receive\",\n\t\tfmt.Sprintf(\"--tsdb.path=%s\", t.Spec.ReceivePrefix),\n\t\tfmt.Sprintf(\"--tsdb.retention=%s\", t.Spec.Retention),\n\t\tfmt.Sprintf(\"--labels=receive=\\\"%s\\\"\", t.Spec.ReceiveLables),\n\t\tfmt.Sprintf(\"--objstore.config=type: %s\\nconfig:\\n bucket: \\\"%s\\\"\", t.Spec.ObjectStorageType, t.Spec.BucketName),\n\t}\n\tif t.Spec.LogLevel != \"\" && t.Spec.LogLevel != \"info\" {\n\t\tthanosArgs = append(thanosArgs, fmt.Sprintf(\"--log.level=%s\", t.Spec.LogLevel))\n\t}\n\tenv := []corev1.EnvVar{\n\t\t{\n\t\t\tName: \"GOOGLE_APPLICATION_CREDENTIALS\",\n\t\t\tValue: secretsDir + t.Spec.SecretName + \".json\",\n\t\t},\n\t}\n\n\tports := []corev1.ContainerPort{\n\t\t{\n\t\t\tContainerPort: 10902,\n\t\t\tName: \"http\",\n\t\t},\n\t\t{\n\t\t\tContainerPort: 10901,\n\t\t\tName: \"grpc\",\n\t\t},\n\t}\n\n\tif strings.Contains(t.Name, \"receiver\") {\n\t\tports = append(ports, corev1.ContainerPort{\n\t\t\tContainerPort: 19291,\n\t\t\tName: \"receive\",\n\t\t})\n\t}\n\n\t// mount to pod\n\tvolumemounts := []corev1.VolumeMount{\n\t\t{\n\t\t\tName: \"thanos-persistent-storage\",\n\t\t\tMountPath: t.Spec.Retention,\n\t\t},\n\t\t{\n\t\t\tName: \"google-cloud-key\",\n\t\t\tMountPath: secretsDir,\n\t\t},\n\t}\n\n\tcontainers := []corev1.Container{\n\t\t{\n\t\t\tName: \"receiver\",\n\t\t\tImage: *t.Spec.Image,\n\t\t\tArgs: thanosArgs,\n\t\t\tEnv: env,\n\t\t\tPorts: ports,\n\t\t\tVolumeMounts: volumemounts,\n\t\t},\n\t}\n\n\t// Need create json from gcp iam\n\t// https://github.com/orangesys/blueprint/tree/master/prometheus-thanos\n\t// kubectl create secret generic ${SERVICE_ACCOUNT_NAME} --from-file=${SERVICE_ACCOUNT_NAME}.json=${SERVICE_ACCOUNT_NAME}.json\n\t// secret name is thanos-demo-gcs\n\t// TODO setting secret name with spec\n\tvolumes := []corev1.Volume{\n\t\t{\n\t\t\tName: \"google-cloud-key\",\n\t\t\tVolumeSource: corev1.VolumeSource{\n\t\t\t\tSecret: &corev1.SecretVolumeSource{\n\t\t\t\t\tSecretName: t.Spec.SecretName,\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n\n\treturn &corev1.PodSpec{\n\t\tTerminationGracePeriodSeconds: &gracePeriodTerm,\n\t\tContainers: containers,\n\t\tVolumes: volumes,\n\t}, nil\n}", "func testPostWebhook(p bytes.Buffer) {\n\tresp, _ := http.PostForm(\n\t\t\"\",\n\t\turl.Values{\"payload_json\": {p.String()}},\n\t)\n\n\tbody, _ := ioutil.ReadAll(resp.Body)\n\tdefer resp.Body.Close()\n\tfmt.Println(string(body))\n}", "func (a *ManagementApiService) CreateNotificationWebhook(ctx _context.Context, applicationId int32) apiCreateNotificationWebhookRequest {\n\treturn apiCreateNotificationWebhookRequest{\n\t\tapiService: a,\n\t\tctx: ctx,\n\t\tapplicationId: applicationId,\n\t}\n}", "func BuildJobSpec(pod *podtemplatespec.Builder) *jobspec.Builder {\n\tjobSpecObj := jobspec.NewBuilder().\n\t\tWithPodTemplateSpecBuilder(pod)\n\t_, err := jobSpecObj.Build()\n\tif err != nil {\n\t\tlog.Errorln(err)\n\t}\n\treturn jobSpecObj\n}", "func (client *WebhooksClient) getCreateRequest(ctx context.Context, resourceGroupName string, registryName string, webhookName string, options *WebhooksClientGetOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerRegistry/registries/{registryName}/webhooks/{webhookName}\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif registryName == \"\" {\n\t\treturn nil, errors.New(\"parameter registryName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{registryName}\", url.PathEscape(registryName))\n\tif webhookName == \"\" {\n\t\treturn nil, errors.New(\"parameter webhookName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{webhookName}\", url.PathEscape(webhookName))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2019-05-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}", "func newSchemaPatchGenerator() generation.Generator {\n\trequiredFeatureSetsList := []sets.String{}\n\tif len(requiredFeatureSets) > 0 {\n\t\trequiredFeatureSetsList = append(requiredFeatureSetsList, sets.NewString(requiredFeatureSets...))\n\t}\n\n\treturn schemapatch.NewGenerator(schemapatch.Options{\n\t\tControllerGen: controllerGen,\n\t\tRequiredFeatureSets: requiredFeatureSetsList,\n\t\tVerify: verify,\n\t})\n}", "func (*Webhook) Descriptor() ([]byte, []int) {\n\treturn file_events_Event_proto_rawDescGZIP(), []int{2}\n}", "func WebhookHandler(w http.ResponseWriter, r *http.Request, reconciler gitopsconfig.Reconciler) {\n\tlog.Info(\"received webhook call\")\n\tif r.Method != \"POST\" {\n\t\tlog.Info(\"webhook handler only accepts the POST method\", \"sent_method\", r.Method)\n\t\tw.WriteHeader(405)\n\t\treturn\n\t}\n\n\tpayload, err := ioutil.ReadAll(r.Body)\n\tif err != nil {\n\t\tlog.Error(err, \"error reading webhook request body\")\n\t\treturn\n\t}\n\tdefer r.Body.Close()\n\n\twebHookEvent, err := github.ParseWebHook(github.WebHookType(r), payload)\n\tif err != nil {\n\t\tlog.Error(err, \"error parsing webhook event payload\")\n\t\treturn\n\t}\n\n\tswitch e := webHookEvent.(type) {\n\tcase *github.PushEvent:\n\t\t// A commit push was received, determine if there is are GitOpsConfigs that match the event\n\t\t// The repository url and Git ref must match for the templateSource or parameterSource\n\t\t{\n\t\t\tlist, err := reconciler.GetAll()\n\t\t\tif err != nil {\n\t\t\t\tlog.Error(err, \"error getting the list of GitOpsConfigs\")\n\t\t\t\tw.WriteHeader(500)\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\ttargetList := gitopsv1alpha1.GitOpsConfigList{\n\t\t\t\tTypeMeta: list.TypeMeta,\n\t\t\t\tListMeta: list.ListMeta,\n\t\t\t\tItems: make([]gitopsv1alpha1.GitOpsConfig, 0, len(list.Items)),\n\t\t\t}\n\n\t\t\tfor _, instance := range list.Items {\n\t\t\t\tif !gitopsconfig.ContainsTrigger(&instance, \"Webhook\") {\n\t\t\t\t\tlog.Info(\"skip instance without webhook trigger\", \"instance_name\", instance.Name)\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\n\t\t\t\tlog.Info(\"comparing instance and event metadata\", \"event_name\", e.Repo.GetFullName(), \"event_ref\", e.GetRef(),\n\t\t\t\t\t\"template_uri\", instance.Spec.TemplateSource.URI, \"template_ref\", instance.Spec.TemplateSource.Ref,\n\t\t\t\t\t\"parameter_uri\", instance.Spec.ParameterSource.URI, \"parameter_ref\", instance.Spec.ParameterSource.Ref)\n\n\t\t\t\tif !repoURLAndRefMatch(&instance, e) {\n\t\t\t\t\tlog.Info(\"skip instance without matching repo url or git ref of the event\", \"instance_name\", instance.Name)\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\n\t\t\t\tlog.Info(\"found matching instance\", \"instance_name\", instance.Name)\n\t\t\t\ttargetList.Items = append(targetList.Items, instance)\n\t\t\t}\n\n\t\t\tif len(targetList.Items) == 0 {\n\t\t\t\tlog.Info(\"no gitopsconfigs match the webhook event\", \"event_repo\", e.Repo.GetFullName(), \"event_ref\", strings.TrimPrefix(e.GetRef(), \"refs/heads/\"))\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tfor _, instance := range targetList.Items {\n\t\t\t\t//if secured discard those that do not validate\n\t\t\t\tsecret := getWebhookSecret(&instance)\n\t\t\t\tif secret != \"\" {\n\t\t\t\t\t_, err := github.ValidatePayload(r, []byte(secret))\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\tlog.Error(err, \"webhook payload could not be validated with instance secret --> ignoring\", \"instance\", instance.GetName(), \"namespace\", instance.GetNamespace())\n\t\t\t\t\t\tcontinue\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\tlog.Info(\"Webhook triggering job\", \"instance\", instance.GetName(), \"namespace\", instance.GetNamespace())\n\t\t\t\t_, err := reconciler.CreateJob(\"create\", &instance)\n\t\t\t\tif err != nil {\n\t\t\t\t\tlog.Error(err, \"Webhook unable to create job for instance\", \"instance\", instance.GetName(), \"namespace\", instance.GetNamespace())\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\tdefault:\n\t\t{\n\t\t\tlog.Info(\"unknown event type\", \"type\", github.WebHookType(r))\n\t\t\treturn\n\t\t}\n\t}\n\tlog.Info(\"webhook handling concluded correctly\")\n}", "func newProfile(ctx context.Context, cfg config.KubeSchedulerProfile, r frameworkruntime.Registry, recorderFact RecorderFactory,\n\topts ...frameworkruntime.Option) (framework.Framework, error) {\n\trecorder := recorderFact(cfg.SchedulerName)\n\topts = append(opts, frameworkruntime.WithEventRecorder(recorder))\n\treturn frameworkruntime.NewFramework(ctx, r, &cfg, opts...)\n}", "func (c WebhooksConfig) NewWebhooks(ctx context.Context, server io.Server) (web.Webhooks, error) {\n\tvar target web.Sink\n\tswitch c.Target {\n\tcase \"\":\n\t\treturn nil, nil\n\tcase \"direct\":\n\t\ttarget = &web.HTTPClientSink{\n\t\t\tClient: &http.Client{\n\t\t\t\tTimeout: c.Timeout,\n\t\t\t},\n\t\t}\n\tdefault:\n\t\treturn nil, errWebhooksTarget.WithAttributes(\"target\", c.Target)\n\t}\n\tif c.Registry == nil {\n\t\treturn nil, errWebhooksRegistry.New()\n\t}\n\tif c.QueueSize > 0 || c.Workers > 0 {\n\t\ttarget = &web.QueuedSink{\n\t\t\tTarget: target,\n\t\t\tQueue: make(chan *http.Request, c.QueueSize),\n\t\t\tWorkers: c.Workers,\n\t\t}\n\t}\n\tif controllable, ok := target.(web.ControllableSink); ok {\n\t\tgo func() {\n\t\t\tif err := controllable.Run(ctx); err != nil && !errors.IsCanceled(err) {\n\t\t\t\tlog.FromContext(ctx).WithError(err).Error(\"Webhooks target sink failed\")\n\t\t\t}\n\t\t}()\n\t}\n\treturn web.NewWebhooks(ctx, server, c.Registry, target, c.Downlinks), nil\n}", "func createVersionGitlabConfig(serviceID string, eventType string, payload api.WebhookGitlab,\n\toperation api.VersionOperation) *api.Version {\n\tversion := &api.Version{\n\t\tServiceID: serviceID,\n\t\tOperation: operation,\n\t\tYamlDeploy: api.DeployWithYaml,\n\t\tYamlDeployStatus: api.DeployNoRun,\n\t\tSecurityCheck: false,\n\t}\n\n\tswitch eventType {\n\tcase api.GitlabWebhookPush:\n\t\tversion.Name, version.Description, version.Commit = generateVersionFromGitlabPushData(payload)\n\t\tlog.Info(\"webhook event: push\")\n\n\tcase api.GitlabWebhookPullRequest:\n\t\tversion.Name, version.Description, version.URL, version.Commit = generateVersionFromGitlabPRData(payload)\n\t\tlog.Info(\"webhook event: merge_request\")\n\n\tcase api.GitlabWebhookRelease:\n\t\tversion.Name, version.Description, version.Commit = generateVersionFromGitlabRelData(payload)\n\t\tlog.Infof(\"webhook event: release\")\n\n\tdefault:\n\t\tlog.Info(\"receive undefine webhook event\")\n\t\treturn nil\n\t}\n\n\treturn version\n}", "func unmarshalCreateBottlePayload(ctx context.Context, service *goa.Service, req *http.Request) error {\n\tpayload := &createBottlePayload{}\n\tif err := service.DecodeRequest(req, payload); err != nil {\n\t\treturn err\n\t}\n\tif err := payload.Validate(); err != nil {\n\t\t// Initialize payload with private data structure so it can be logged\n\t\tgoa.ContextRequest(ctx).Payload = payload\n\t\treturn err\n\t}\n\tgoa.ContextRequest(ctx).Payload = payload.Publicize()\n\treturn nil\n}", "func createVendorSpec(b *troubleshootv1beta2.SupportBundle) (*troubleshootv1beta2.SupportBundle, error) {\n\tsupportBundle, err := staticspecs.GetVendorSpec()\n\tif err != nil {\n\t\tlogger.Errorf(\"Failed to load vendor support bundle spec: %v\", err)\n\t\treturn nil, err\n\t}\n\n\tif b.Spec.Collectors != nil {\n\t\tsupportBundle.Spec.Collectors = b.DeepCopy().Spec.Collectors\n\t}\n\tif b.Spec.Analyzers != nil {\n\t\tsupportBundle.Spec.Analyzers = b.DeepCopy().Spec.Analyzers\n\t}\n\treturn supportBundle, nil\n}", "func failingWebhook(namespace, name string, servicePort int32) admissionregistrationv1.ValidatingWebhook {\n\tsideEffectsNone := admissionregistrationv1.SideEffectClassNone\n\n\treturn admissionregistrationv1.ValidatingWebhook{\n\t\tName: name,\n\t\tRules: []admissionregistrationv1.RuleWithOperations{{\n\t\t\tOperations: []admissionregistrationv1.OperationType{admissionregistrationv1.Create},\n\t\t\tRule: admissionregistrationv1.Rule{\n\t\t\t\tAPIGroups: []string{\"\"},\n\t\t\t\tAPIVersions: []string{\"v1\"},\n\t\t\t\tResources: []string{\"configmaps\"},\n\t\t\t},\n\t\t}},\n\t\tClientConfig: admissionregistrationv1.WebhookClientConfig{\n\t\t\tService: &admissionregistrationv1.ServiceReference{\n\t\t\t\tNamespace: namespace,\n\t\t\t\tName: serviceName,\n\t\t\t\tPath: strPtr(\"/configmaps\"),\n\t\t\t\tPort: pointer.Int32(servicePort),\n\t\t\t},\n\t\t\t// Without CA bundle, the call to webhook always fails\n\t\t\tCABundle: nil,\n\t\t},\n\t\tSideEffects: &sideEffectsNone,\n\t\tAdmissionReviewVersions: []string{\"v1\", \"v1beta1\"},\n\t}\n}", "func CreateUpdateHookConfigurationRequest() (request *UpdateHookConfigurationRequest) {\n\trequest = &UpdateHookConfigurationRequest{\n\t\tRoaRequest: &requests.RoaRequest{},\n\t}\n\trequest.InitWithApiInfo(\"Edas\", \"2017-08-01\", \"UpdateHookConfiguration\", \"/pop/app/config_app_hook_json\", \"Edas\", \"openAPI\")\n\trequest.Method = requests.POST\n\treturn\n}", "func createDefaultSpec(app apptypes.AppType, b *troubleshootv1beta2.SupportBundle, opts types.TroubleshootOptions, namespacesToCollect []string, namespacesToAnalyze []string, clientset *kubernetes.Clientset) (*troubleshootv1beta2.SupportBundle, error) {\n\tsupportBundle, err := staticspecs.GetDefaultSpec()\n\tif err != nil {\n\t\tlogger.Errorf(\"Failed to load default support bundle spec: %v\", err)\n\t\treturn nil, err\n\t}\n\n\tvar imageName string\n\tvar pullSecret *troubleshootv1beta2.ImagePullSecrets\n\n\tif clientset != nil {\n\t\timageName, pullSecret, err = getImageAndSecret(context.TODO(), clientset)\n\t\tif err != nil {\n\t\t\tlogger.Errorf(\"Failed to get kotsadm image and secret: %v\", err)\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\tif imageName != \"\" {\n\t\tsupportBundle = populateImages(supportBundle, imageName, pullSecret)\n\t}\n\n\tisKurl, err := kurl.IsKurl(clientset)\n\tif err != nil {\n\t\tlogger.Errorf(\"Failed to check if cluster is kurl: %v\", err)\n\t}\n\n\tif isKurl {\n\t\tkurlSupportBunlde, err := staticspecs.GetKurlSpec()\n\t\tif err != nil {\n\t\t\tlogger.Errorf(\"Failed to load kurl support bundle spec: %v\", err)\n\t\t\treturn nil, err\n\t\t}\n\n\t\tsupportBundle.Spec.Collectors = append(supportBundle.Spec.Collectors, kurlSupportBunlde.Spec.Collectors...)\n\t\tsupportBundle.Spec.Analyzers = append(supportBundle.Spec.Analyzers, kurlSupportBunlde.Spec.Analyzers...)\n\t}\n\n\tsupportBundle = addDefaultDynamicTroubleshoot(supportBundle, app, imageName, pullSecret)\n\tsupportBundle = addAfterCollectionSpec(app, supportBundle, opts)\n\tsupportBundle = populateNamespaces(supportBundle, namespacesToCollect, namespacesToAnalyze)\n\n\treturn supportBundle, nil\n}", "func (c WebhooksConfig) NewWebhooks(ctx context.Context, server io.Server) (web.Webhooks, error) {\n\tvar target web.Sink\n\tswitch c.Target {\n\tcase \"\":\n\t\treturn nil, nil\n\tcase \"direct\":\n\t\tclient, err := server.HTTPClient(ctx)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tclient.Timeout = c.Timeout\n\t\ttarget = &web.HTTPClientSink{\n\t\t\tClient: client,\n\t\t}\n\tdefault:\n\t\treturn nil, errWebhooksTarget.WithAttributes(\"target\", c.Target)\n\t}\n\tif c.Registry == nil {\n\t\treturn nil, errWebhooksRegistry.New()\n\t}\n\tif c.QueueSize > 0 || c.Workers > 0 {\n\t\ttarget = &web.QueuedSink{\n\t\t\tTarget: target,\n\t\t\tQueue: make(chan *http.Request, c.QueueSize),\n\t\t\tWorkers: c.Workers,\n\t\t}\n\t}\n\tif controllable, ok := target.(web.ControllableSink); ok {\n\t\tgo func() {\n\t\t\tif err := controllable.Run(ctx); err != nil && !errors.IsCanceled(err) {\n\t\t\t\tlog.FromContext(ctx).WithError(err).Error(\"Webhooks target sink failed\")\n\t\t\t}\n\t\t}()\n\t}\n\treturn web.NewWebhooks(ctx, server, c.Registry, target, c.Downlinks)\n}", "func createPod(session *gexec.Session, webhookName string) {\n\tpodYamlAbsPath, err := filepath.Abs(filepath.Join(podYamlRelPath))\n\tExpect(err).NotTo(HaveOccurred())\n\n\tpod, err := kubeClient.CreatePod(podYamlAbsPath)\n\tEventually(session.Err, defaultTimeout).Should(gbytes.Say(\"handle: validating webhook request\"))\n\tExpect(err).NotTo(HaveOccurred())\n\tExpect(pod).NotTo(BeNil())\n\n\t// delete pod\n\terr = kubeClient.DeletePod(pod.GetName())\n\tExpect(err).NotTo(HaveOccurred())\n\n\t// delete validating webhook configuration\n\terr = kubeClient.DeleteValidatingWebhookConfiguration(webhookName)\n\tExpect(err).NotTo(HaveOccurred())\n\n\tif utils.IsWindowsPlatform() {\n\t\tsession.Kill()\n\t} else {\n\t\tsession.Terminate()\n\t}\n}", "func expectedMutatingWebhooksConfiguration(name string, previousGenerations []operatorsv1.GenerationStatus) int64 {\n\tgeneration := resourcemerge.GenerationFor(previousGenerations, schema.GroupResource{Group: admissionregistrationv1.SchemeGroupVersion.Group, Resource: \"mutatingwebhookconfigurations\"}, \"\", name)\n\tif generation != nil {\n\t\treturn generation.LastGeneration\n\t}\n\treturn -1\n}", "func (*TestWebhookRequest) Descriptor() ([]byte, []int) {\n\treturn file_events_Event_proto_rawDescGZIP(), []int{8}\n}", "func NewProtocolf(action string, hID int64, note string, a ...interface{}) Protocol {\n\treturn NewProtocol(action, hID, fmt.Sprintf(note, a...))\n}", "func (s *WebhooksService) CreateWebhook(webhookCreateRequest *WebhookCreateRequest) (*Webhook, *resty.Response, error) {\n\n\tpath := \"/webhooks/\"\n\n\tresponse, err := s.client.R().\n\t\tSetBody(webhookCreateRequest).\n\t\tSetResult(&Webhook{}).\n\t\tSetError(&Error{}).\n\t\tPost(path)\n\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\tresult := response.Result().(*Webhook)\n\treturn result, response, err\n\n}", "func createBlipTesterWithSpec(tb testing.TB, spec BlipTesterSpec, rt *RestTester) (*BlipTester, error) {\n\tbt := &BlipTester{\n\t\trestTester: rt,\n\t}\n\n\tif !rt.GetDatabase().OnlyDefaultCollection() {\n\t\tbt.useCollections = true\n\t}\n\n\t// Since blip requests all go over the public handler, wrap the public handler with the httptest server\n\tpublicHandler := bt.restTester.TestPublicHandler()\n\n\tif len(spec.connectingUsername) > 0 {\n\n\t\t// By default, the user will be granted access to a single channel equal to their username\n\t\tadminChannels := []string{spec.connectingUsername}\n\n\t\t// If the caller specified a list of channels to grant the user access to, then use that instead.\n\t\tif len(spec.connectingUserChannelGrants) > 0 {\n\t\t\tadminChannels = []string{} // empty it\n\t\t\tadminChannels = append(adminChannels, spec.connectingUserChannelGrants...)\n\t\t}\n\n\t\tuserDocBody, err := getUserBodyDoc(spec.connectingUsername, spec.connectingPassword, bt.restTester.GetSingleTestDatabaseCollection(), adminChannels)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tlog.Printf(\"Creating user: %v\", userDocBody)\n\n\t\t// Create a user. NOTE: this must come *after* the bt.rt.TestPublicHandler() call, otherwise it will end up getting ignored\n\t\t_ = bt.restTester.SendAdminRequest(\n\t\t\t\"POST\",\n\t\t\t\"/{{.db}}/_user/\",\n\t\t\tuserDocBody,\n\t\t)\n\t}\n\n\t// Create a _temporary_ test server bound to an actual port that is used to make the blip connection.\n\t// This is needed because the mock-based approach fails with a \"Connection not hijackable\" error when\n\t// trying to do the websocket upgrade. Since it's only needed to setup the websocket, it can be closed\n\t// as soon as the websocket is established, hence the defer srv.Close() call.\n\tsrv := httptest.NewServer(publicHandler)\n\tdefer srv.Close()\n\n\t// Construct URL to connect to blipsync target endpoint\n\tdestUrl := fmt.Sprintf(\"%s/%s/_blipsync\", srv.URL, rt.GetDatabase().Name)\n\tu, err := url.Parse(destUrl)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tu.Scheme = \"ws\"\n\n\t// If protocols are not set use V3 as a V3 client would\n\tprotocols := spec.blipProtocols\n\tif len(protocols) == 0 {\n\t\tprotocols = []string{db.BlipCBMobileReplicationV3}\n\t}\n\n\t// Make BLIP/Websocket connection\n\tbt.blipContext, err = db.NewSGBlipContextWithProtocols(base.TestCtx(tb), \"\", protocols...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// Ensure that errors get correctly surfaced in tests\n\tbt.blipContext.FatalErrorHandler = func(err error) {\n\t\ttb.Fatalf(\"BLIP fatal error: %v\", err)\n\t}\n\tbt.blipContext.HandlerPanicHandler = func(request, response *blip.Message, err interface{}) {\n\t\tstack := debug.Stack()\n\t\ttb.Fatalf(\"Panic while handling %s: %v\\n%s\", request.Profile(), err, string(stack))\n\t}\n\n\tconfig := blip.DialOptions{\n\t\tURL: u.String(),\n\t}\n\n\tif len(spec.connectingUsername) > 0 {\n\t\tconfig.HTTPHeader = http.Header{\n\t\t\t\"Authorization\": {\"Basic \" + base64.StdEncoding.EncodeToString([]byte(spec.connectingUsername+\":\"+spec.connectingPassword))},\n\t\t}\n\t}\n\n\tbt.sender, err = bt.blipContext.DialConfig(&config)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tcollections := bt.restTester.getCollectionsForBLIP()\n\tif !spec.skipCollectionsInitialization && len(collections) > 0 {\n\t\tbt.initializeCollections(collections)\n\t}\n\n\treturn bt, nil\n\n}", "func createWebhookService(\n\townerReference metav1.OwnerReference,\n\tserviceName string,\n\tnamespace string,\n) error {\n\n\tvar createService = false\n\t_, err := observer.GetService(namespace, serviceName)\n\tif err == nil {\n\t\t// service already present, no need to do anything\n\t\tcreateService = false\n\t} else {\n\t\tif errors.IsNotFound(err) {\n\t\t\tcreateService = true\n\t\t} else {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tif !createService {\n\t\treturn nil\n\t}\n\n\t// create service resource that refers to KubeDirector pod\n\tkdName, _ := k8sutil.GetOperatorName()\n\tserviceLabels := map[string]string{\"name\": kdName}\n\tservice := &v1.Service{\n\t\tTypeMeta: metav1.TypeMeta{\n\t\t\tKind: \"Service\",\n\t\t\tAPIVersion: \"v1\",\n\t\t},\n\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\tNamespace: namespace,\n\t\t\tName: serviceName,\n\t\t\tLabels: map[string]string{\"webhook\": kdName},\n\t\t\tOwnerReferences: []metav1.OwnerReference{ownerReference},\n\t\t},\n\t\tSpec: v1.ServiceSpec{\n\t\t\tSelector: serviceLabels,\n\t\t\tPorts: []v1.ServicePort{\n\t\t\t\t{\n\t\t\t\t\tProtocol: \"TCP\",\n\t\t\t\t\tPort: 443,\n\t\t\t\t\tTargetPort: intstr.FromInt(validationPort),\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n\treturn shared.Create(context.TODO(), service)\n}", "func (c *MutatingWebhook) Create(options Options) (*component.Summary, error) {\n\tif c.mutatingWebhook == nil {\n\t\treturn nil, errors.New(\"mutatingWebhook is nil\")\n\t}\n\n\tvar sections component.SummarySections\n\n\tclient, err := admissionWebhookClientConfig(c.mutatingWebhook.ClientConfig, options)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tsections.Add(\"Client\", client)\n\trules, err := admissionWebhookRules(c.mutatingWebhook.Rules, options)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tsections.Add(\"Rules\", rules)\n\tnamespaceSelector := admissionWebhookLabelSelector(c.mutatingWebhook.NamespaceSelector)\n\tsections.Add(\"Namespace Selector\", namespaceSelector)\n\tobjectSelector := admissionWebhookLabelSelector(c.mutatingWebhook.ObjectSelector)\n\tsections.Add(\"Object Selector\", objectSelector)\n\treinvocationPolicy := admissionWebhookReinvocationPolicy(c.mutatingWebhook.ReinvocationPolicy)\n\tsections.Add(\"Reinvocation Policy\", reinvocationPolicy)\n\tfailurePolicy := admissionWebhookFailurePolicy(c.mutatingWebhook.FailurePolicy)\n\tsections.Add(\"Failure Policy\", failurePolicy)\n\tmatchPolicy := admissionWebhookMatchPolicy(c.mutatingWebhook.MatchPolicy)\n\tsections.Add(\"Match Policy\", matchPolicy)\n\tsideEffects := admissionWebhookSideEffects(c.mutatingWebhook.SideEffects)\n\tsections.Add(\"Side Effects\", sideEffects)\n\ttimeout := admissionWebhookTimeout(c.mutatingWebhook.TimeoutSeconds)\n\tsections.Add(\"Timeout\", timeout)\n\tadmissionReviewVersions := admissionWebhookAdmissionReviewVersions(c.mutatingWebhook.AdmissionReviewVersions)\n\tsections.Add(\"Admission Review Versions\", admissionReviewVersions)\n\n\tsummary := component.NewSummary(c.mutatingWebhook.Name, sections...)\n\n\treturn summary, nil\n}", "func webhookHandler(w http.ResponseWriter, r *http.Request) {\n\t// parse request body\n\tbody, err := ioutil.ReadAll(r.Body)\n\tif err != nil {\n\t\tlog.Println(err)\n\t\treturn\n\t}\n\tfmt.Println(string(body))\n\t// decoder := json.NewDecoder(r.Body)\n\t// Get student public URL\n\t// run Webdriver IO test against the URL and get its result\n\t// Store result and SHA\n\t// publish status to Github\n\tfmt.Println(\"Hello\")\n}" ]
[ "0.62817883", "0.60171837", "0.5866091", "0.5728155", "0.5688821", "0.5654436", "0.56526816", "0.55921936", "0.55623686", "0.55391085", "0.548519", "0.5471916", "0.5470614", "0.54071194", "0.5395845", "0.5376193", "0.5341764", "0.5336821", "0.53300333", "0.53069496", "0.52973324", "0.5266114", "0.5249888", "0.5229667", "0.51974785", "0.5179271", "0.51706266", "0.51604825", "0.51306707", "0.5093249", "0.50897807", "0.5087566", "0.50747323", "0.5055699", "0.5039446", "0.5022587", "0.5002947", "0.50002706", "0.49957865", "0.49909773", "0.49899828", "0.49854165", "0.49793804", "0.49691114", "0.4938491", "0.49362358", "0.49358657", "0.49314648", "0.49219802", "0.49124727", "0.4909824", "0.4900949", "0.48980555", "0.48864016", "0.48834762", "0.4883385", "0.4869092", "0.4868851", "0.4864461", "0.48509637", "0.483933", "0.4837828", "0.48299426", "0.48287827", "0.48128462", "0.48085043", "0.47995657", "0.4789301", "0.47795832", "0.47354388", "0.47321", "0.4729438", "0.4725746", "0.4724535", "0.47210056", "0.4719929", "0.47067395", "0.46964478", "0.46923214", "0.4691712", "0.46869805", "0.46765214", "0.46664295", "0.46657202", "0.4653255", "0.46502122", "0.46497598", "0.46444088", "0.4643824", "0.46405342", "0.463796", "0.46351516", "0.4626588", "0.46241906", "0.46235302", "0.46179223", "0.45906264", "0.45864722", "0.45849517", "0.45842633" ]
0.8407555
0
NewCronSpec generates a new CronSpec from a job.CronSpec
func NewCronSpec(spec *job.CronSpec) *CronSpec { return &CronSpec{ CronSchedule: spec.CronSchedule, CreatedAt: spec.CreatedAt, UpdatedAt: spec.UpdatedAt, } }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func NewCronFromJobSpec(\n\tjobSpec job.Job,\n\tpipelineRunner pipeline.Runner,\n) (*Cron, error) {\n\n\tcronSpec := jobSpec.CronSpec\n\tspec := jobSpec.PipelineSpec\n\n\tcronLogger := logger.CreateLogger(\n\t\tlogger.Default.With(\n\t\t\t\"jobID\", jobSpec.ID,\n\t\t\t\"schedule\", cronSpec.CronSchedule,\n\t\t),\n\t)\n\n\treturn &Cron{\n\t\tchDone: make(chan struct{}),\n\t\tchStop: make(chan struct{}),\n\t\tcronRunner: cronParser.New(),\n\t\tjobID: jobSpec.ID,\n\t\tlogger: cronLogger,\n\t\tpipelineRunner: pipelineRunner,\n\t\tpipelineSpec: *spec,\n\t\tSchedule: cronSpec.CronSchedule,\n\t}, nil\n}", "func (w *Worker) ByCronSpec(spec string) *Worker {\n\tw.schedule = ByCronSchedule(spec)\n\treturn w\n}", "func createCronJobConfig(module *protos.Module) *v1beta1.CronJob {\n\tvolumes, volumeMounts := makeVolumes(module)\n\tcontainers := makeContainers(module, volumeMounts)\n\tm := module.GetSpec()\n\n\tsuccessfulJobHistoryLimit := int32(1)\n\tfailedJobsHistoryLimit := int32(1)\n\treturn &v1beta1.CronJob{\n\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\tName: module.Name,\n\t\t},\n\t\tSpec: v1beta1.CronJobSpec{\n\t\t\tSchedule: m.Schedule,\n\t\t\tJobTemplate: v1beta1.JobTemplateSpec{\n\t\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\t\tLabels: map[string]string{\n\t\t\t\t\t\t\"name\": module.Name,\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tSpec: batchv1.JobSpec{\n\t\t\t\t\tTemplate: v1.PodTemplateSpec{\n\t\t\t\t\t\tSpec: v1.PodSpec{\n\t\t\t\t\t\t\tContainers: containers,\n\t\t\t\t\t\t\tVolumes: volumes,\n\t\t\t\t\t\t\tHostNetwork: true,\n\t\t\t\t\t\t\tRestartPolicy: \"Never\",\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\tSuccessfulJobsHistoryLimit: &successfulJobHistoryLimit,\n\t\t\tFailedJobsHistoryLimit: &failedJobsHistoryLimit,\n\t\t},\n\t}\n}", "func newCronJobForCR(cr *v1alpha1.Copybird) *v1beta1.CronJob {\n\tlabels := map[string]string{\n\t\t\"app\": cr.Name,\n\t}\n\treturn &v1beta1.CronJob{\n\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\tName: cr.Name + \"-cronjob\",\n\t\t\tNamespace: cr.Namespace,\n\t\t\tLabels: labels,\n\t\t},\n\t\tSpec: v1beta1.CronJobSpec{\n\t\t\tSchedule: cr.Spec.Cron,\n\t\t\tJobTemplate: v1beta1.JobTemplateSpec{\n\t\t\t\tSpec: batchv1.JobSpec{\n\t\t\t\t\tTemplate: v1.PodTemplateSpec{\n\t\t\t\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\t\t\t\tName: cr.Name + \"-copybird\",\n\t\t\t\t\t\t\tNamespace: cr.Namespace,\n\t\t\t\t\t\t\tLabels: labels,\n\t\t\t\t\t\t},\n\t\t\t\t\t\tSpec: v1.PodSpec{\n\t\t\t\t\t\t\tContainers: []v1.Container{\n\t\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t\tName: cr.Name,\n\t\t\t\t\t\t\t\t\tImage: \"copybird/copybird\",\n\t\t\t\t\t\t\t\t\tCommand: []string{},\n\t\t\t\t\t\t\t\t\tArgs: []string{},\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\tRestartPolicy: \"OnFailure\",\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n}", "func NewCronWorkflowSpec() *cronworkflowv1.CronWorkflowSpec {\n\treturn &cronworkflowv1.CronWorkflowSpec{\n\t\tSchedule: \"* * * * *\",\n\t\tConcurrencyPolicy: batchv2alpha1.AllowConcurrent,\n\t\tWorkflowTemplate: NewWorkflowTemplateSpec(),\n\t}\n}", "func (in *CronJobSpec) DeepCopy() *CronJobSpec {\n\tif in == nil {\n\t\treturn nil\n\t}\n\tout := new(CronJobSpec)\n\tin.DeepCopyInto(out)\n\treturn out\n}", "func newCron(schedule string) (chan bool, *cron.Cron) {\n\tchannel := make(chan bool)\n\tcj := cron.New()\n\tcj.AddFunc(schedule, func() { cronTriggered(channel) })\n\tglog.Info(\"db backup schedule: \" + schedule)\n\treturn channel, cj\n}", "func (*CronSpec) Descriptor() ([]byte, []int) {\n\treturn file_toit_model_job_proto_rawDescGZIP(), []int{18}\n}", "func NewCron() helmify.Processor {\n\treturn &cron{}\n}", "func NewCron() *cron {\n\tc := &cron{\n\t\ttasks: make(chan task),\n\t}\n\tgo c.loop()\n\treturn c\n}", "func CreateOrUpdate(ctx context.Context, c client.Client, cj *batchv1.CronJob, equal EqualityFunc, mutate MutateFunc) error {\n\tcurrent := &batchv1.CronJob{}\n\tkey := client.ObjectKey{Name: cj.Name, Namespace: cj.Namespace}\n\terr := c.Get(ctx, key, current)\n\tif err != nil {\n\t\tif apierrors.IsNotFound(err) {\n\t\t\terr = c.Create(ctx, cj)\n\n\t\t\tif err == nil {\n\t\t\t\treturn nil\n\t\t\t}\n\n\t\t\treturn kverrors.Wrap(err, \"failed to create cronjob\",\n\t\t\t\t\"name\", cj.Name,\n\t\t\t\t\"namespace\", cj.Namespace,\n\t\t\t)\n\t\t}\n\n\t\treturn kverrors.Wrap(err, \"failed to get cronjob\",\n\t\t\t\"name\", cj.Name,\n\t\t\t\"namespace\", cj.Namespace,\n\t\t)\n\t}\n\n\tif !equal(current, cj) {\n\t\terr := retry.RetryOnConflict(retry.DefaultRetry, func() error {\n\t\t\tif err := c.Get(ctx, key, current); err != nil {\n\t\t\t\treturn kverrors.Wrap(err, \"failed to get cronjob\",\n\t\t\t\t\t\"name\", cj.Name,\n\t\t\t\t\t\"namespace\", cj.Namespace,\n\t\t\t\t)\n\t\t\t}\n\n\t\t\tmutate(current, cj)\n\t\t\tif err := c.Update(ctx, current); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\treturn nil\n\t\t})\n\t\tif err != nil {\n\t\t\treturn kverrors.Wrap(err, \"failed to update cronjob\",\n\t\t\t\t\"name\", cj.Name,\n\t\t\t\t\"namespace\", cj.Namespace,\n\t\t\t)\n\t\t}\n\t\treturn nil\n\t}\n\n\treturn nil\n}", "func newSchedule(period time.Duration, jobFunc interface{}, params ...interface{}) *schedule {\n\treturn &schedule{\n\t\tstopCh: make(chan struct{}),\n\t\tjobFunc: jobFunc,\n\t\tjobParams: params,\n\t\tticker: time.NewTicker(period),\n\t}\n}", "func (s *deploymentServer) createCronjob(ctx context.Context, manifest []byte, env []EnvVar, initVariables []EnvVar) error {\n\tdecoder := k8sYaml.NewYAMLOrJSONDecoder(bytes.NewReader(manifest), 1000)\n\n\tj := &apibatch.CronJob{}\n\n\tif err := decoder.Decode(&j); err != nil {\n\t\treturn err\n\t}\n\n\tif len(env) > 0 {\n\t\tcontainers := j.Spec.JobTemplate.Spec.Template.Spec.Containers\n\t\tapplyEnvironment(containers, env)\n\t}\n\n\tinitContainers := j.Spec.JobTemplate.Spec.Template.Spec.InitContainers\n\tif len(initContainers) > 0 {\n\t\tfmt.Println(\"job \" + j.Namespace + \".\" + j.Name + \" has initContainers\")\n\t\tapplyEnvironment(initContainers, initVariables)\n\t} else {\n\t\tfmt.Println(\"job \" + j.Namespace + \".\" + j.Name + \" has not initContainers; bug in config\")\n\t}\n\n\tbatchAPI := s.clientset.BatchV1beta1()\n\tapiJobs := batchAPI.CronJobs(j.Namespace)\n\n\tif _, err := apiJobs.Create(ctx, j, metav1.CreateOptions{}); err != nil {\n\t\treturn fmt.Errorf(\"job create error '%s'\", err.Error())\n\t}\n\treturn nil\n}", "func NewSpec(api *gmail.Service, db *db.DB) (*Spec, error) {\n\tlog.SetLevel(log.DebugLevel)\n\tlog.Info(\"starting new spec\")\n\n\tbytes, err := ioutil.ReadFile(\"./spec.yaml\")\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to read file: %v\", err)\n\t}\n\n\tspec := &Spec{\n\t\tapi: api,\n\t\tdb: db,\n\t}\n\n\terr = yaml.Unmarshal(bytes, spec)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to unmarshal: %v\", err)\n\t}\n\n\treturn spec, nil\n}", "func NewUpdateAsCronRequestWithoutParam() *UpdateAsCronRequest {\n\n return &UpdateAsCronRequest{\n JDCloudRequest: core.JDCloudRequest{\n URL: \"/regions/{regionId}/asCrons/{asCronId}\",\n Method: \"POST\",\n Header: nil,\n Version: \"v1\",\n },\n }\n}", "func NewCron() engine.Action {\n\treturn &Cron{}\n}", "func (s *TestSuite) TestRegisterCrons(c *C) {\n\ttemp1 := &ProcessTemplate{\n\t\tCommand: \"/usr/local/bin/node\",\n\t\tArgs: []string{\n\t\t\t\"samples/longrunning.js\",\n\t\t},\n\t\tCron: \"* * * * * *\",\n\t\tName: \"asdf\",\n\t\t// LogFile: \"/tmp/crontest\",\n\t\tResetLog: true,\n\t}\n\n\tconfig := &Config{\n\t\tProcess: []*ProcessTemplate{\n\t\t\ttemp1,\n\t\t},\n\t}\n\n\tmanager := NewManager(config)\n\n\t// log.Println(manager.monitor)\n\tgo manager.Start()\n\n\tfor {\n\t\tif manager.Started {\n\t\t\tmanager.StartCrons()\n\t\t\tbreak\n\t\t}\n\t\ttime.Sleep(100 * time.Millisecond)\n\t}\n\n\t// Make the crons are registered\n\n\tfor {\n\t\tselect {\n\t\tcase proc := <-manager.monitor:\n\t\t\t// fmt.Println(\"Got \", proc)\n\t\t\tc.Assert(proc.Template, Equals, temp1)\n\t\t\treturn\n\t\tdefault:\n\t\t\t// Wait a while before we check again\n\t\t\ttime.Sleep(100 * time.Millisecond)\n\t\t}\n\t}\n\n}", "func (in *RegistryCronJobSpec) DeepCopy() *RegistryCronJobSpec {\n\tif in == nil {\n\t\treturn nil\n\t}\n\tout := new(RegistryCronJobSpec)\n\tin.DeepCopyInto(out)\n\treturn out\n}", "func NewCronSchedule(entry string) *CronSchedule {\n\tschedule := cron.New()\n\treturn &CronSchedule{\n\t\tentry: entry,\n\t\tschedule: schedule,\n\t\tenabled: false,\n\t}\n}", "func createCronjob(client k8sclient.Interface, module *protos.Module) error {\n\tcjConfig := createCronJobConfig(module)\n\tcj, err := client.BatchV1beta1().CronJobs(defaultNS).Create(cjConfig)\n\tif err == nil {\n\t\tlog.Infof(\"Created CronJob %+v\", cj)\n\t} else if errors.IsAlreadyExists(err) {\n\t\tlog.Infof(\"CronJob %+v already exists\", cjConfig)\n\t} else {\n\t\tlog.Errorf(\"Failed to create CronJob %+v with error: %v\", cjConfig, err)\n\t}\n\n\treturn err\n}", "func (*CronSchedule) Descriptor() ([]byte, []int) {\n\treturn file_toit_model_job_proto_rawDescGZIP(), []int{19}\n}", "func (ci *CronImpl) Create(timeZone string) (*cron.Cron, error) {\n\tl, err := time.LoadLocation(timeZone)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn cron.NewWithLocation(l), nil\n}", "func NewSpec(details *SpecDetails) *Spec {\n\treturn &Spec{\n\t\tDetails: details,\n\t\tServices: NewServiceList(),\n\t\tStatus: SpecWaiting,\n\t}\n}", "func New(locker Locker, opts *Options) *Cron {\n\tlocation, err := time.LoadLocation(opts.timezone())\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\treturn &Cron{\n\t\tjobs: make(map[string]*job),\n\t\tlocker: locker,\n\t\topts: opts,\n\t\tlocation: location,\n\t}\n}", "func Mutate(current, desired *batchv1.CronJob) {\n\tcurrent.Spec = desired.Spec\n}", "func (c *Crew) NewTimersSpec() *core.Spec {\n\n\tonlyTimers := func(bs match.Bindings) match.Bindings {\n\t\tacc := match.NewBindings()\n\t\tacc[\"timers\"] = bs[\"timers\"]\n\t\treturn acc\n\t}\n\n\tspec := &core.Spec{\n\t\tName: \"timers\",\n\t\tDoc: \"A machine that makes in-memory timers that send messages.\",\n\t\tNodes: map[string]*core.Node{\n\t\t\t\"start\": {\n\t\t\t\tDoc: \"Wait to hear a request to create or delete a timer.\",\n\t\t\t\tBranches: &core.Branches{\n\t\t\t\t\tType: \"message\",\n\t\t\t\t\tBranches: []*core.Branch{\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tPattern: mustParse(`{\"makeTimer\":{\"in\":\"?in\", \"msg\":\"?msg\", \"id\":\"?id\"}}`),\n\t\t\t\t\t\t\tTarget: \"make\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tPattern: mustParse(`{\"cancelTimer\":\"?id\"}`),\n\t\t\t\t\t\t\tTarget: \"cancel\",\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\t\"make\": {\n\t\t\t\tDoc: \"Try to make the timer.\",\n\t\t\t\tAction: &core.FuncAction{\n\t\t\t\t\tF: func(ctx context.Context, bs match.Bindings, props core.StepProps) (*core.Execution, error) {\n\t\t\t\t\t\tx, have := bs[\"?in\"]\n\t\t\t\t\t\tif !have {\n\t\t\t\t\t\t\treturn core.NewExecution(bs.Extend(\"error\", \"no in\")), nil\n\t\t\t\t\t\t}\n\t\t\t\t\t\tin, is := x.(string)\n\t\t\t\t\t\tif !is {\n\t\t\t\t\t\t\treturn core.NewExecution(bs.Extend(\"error\", fmt.Sprintf(\"non-string in: %T %#v\", x, x))), nil\n\t\t\t\t\t\t}\n\n\t\t\t\t\t\td, err := time.ParseDuration(in)\n\t\t\t\t\t\tif err != nil {\n\t\t\t\t\t\t\tmsg := fmt.Sprintf(\"bad in '%s': %v\", in, err)\n\t\t\t\t\t\t\treturn core.NewExecution(bs.Extend(\"error\", msg)), nil\n\t\t\t\t\t\t}\n\n\t\t\t\t\t\tx, have = bs[\"?id\"]\n\t\t\t\t\t\tif !have {\n\t\t\t\t\t\t\treturn core.NewExecution(bs.Extend(\"error\", \"no id\")), nil\n\t\t\t\t\t\t}\n\t\t\t\t\t\tid, is := x.(string)\n\t\t\t\t\t\tif !is {\n\t\t\t\t\t\t\treturn core.NewExecution(bs.Extend(\"error\", fmt.Sprintf(\"non-string id: %T %#v\", x, x))), nil\n\t\t\t\t\t\t}\n\n\t\t\t\t\t\tmsg, have := bs[\"?msg\"]\n\t\t\t\t\t\tif !have {\n\t\t\t\t\t\t\treturn core.NewExecution(bs.Extend(\"error\", \"no message\")), nil\n\t\t\t\t\t\t}\n\n\t\t\t\t\t\tif err = c.timers.Add(ctx, id, msg, d); err != nil {\n\t\t\t\t\t\t\treturn core.NewExecution(bs.Extend(\"error\", err.Error())), nil\n\t\t\t\t\t\t}\n\n\t\t\t\t\t\tc.timers.changed()\n\n\t\t\t\t\t\treturn core.NewExecution(onlyTimers(bs)), nil\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tBranches: &core.Branches{\n\t\t\t\t\tType: \"bindings\",\n\t\t\t\t\tBranches: []*core.Branch{\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tTarget: \"start\",\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\t\"cancel\": {\n\t\t\t\tDoc: \"Try to delete the timer.\",\n\t\t\t\tAction: &core.FuncAction{\n\t\t\t\t\tF: func(ctx context.Context, bs match.Bindings, props core.StepProps) (*core.Execution, error) {\n\t\t\t\t\t\tx, have := bs[\"?id\"]\n\t\t\t\t\t\tif !have {\n\t\t\t\t\t\t\treturn core.NewExecution(bs.Extend(\"error\", \"no id\")), nil\n\t\t\t\t\t\t}\n\t\t\t\t\t\tid, is := x.(string)\n\t\t\t\t\t\tif !is {\n\t\t\t\t\t\t\treturn core.NewExecution(bs.Extend(\"error\", fmt.Sprintf(\"non-string id: %T %#v\", x, x))), nil\n\t\t\t\t\t\t}\n\n\t\t\t\t\t\tif err := c.timers.Cancel(ctx, id); err != nil {\n\t\t\t\t\t\t\treturn core.NewExecution(bs.Extend(\"error\", err.Error())), nil\n\t\t\t\t\t\t}\n\n\t\t\t\t\t\tc.timers.changed()\n\n\t\t\t\t\t\treturn core.NewExecution(onlyTimers(bs)), nil\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tBranches: &core.Branches{\n\t\t\t\t\tType: \"bindings\",\n\t\t\t\t\tBranches: []*core.Branch{\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tTarget: \"start\",\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n\n\treturn spec\n}", "func TestCronBindingTrigger(t *testing.T) {\n\tconst appName = \"cronapp\"\n\tconst sidecarName = \"cron-sidecar\"\n\n\ttestMatrix := []cronTest{\n\t\t{\n\t\t\tcronName: \"cron1s\",\n\t\t\tschedule: \"@every 1s\", // Test macro cron format\n\t\t\texpectedTriggerCount: 10,\n\t\t\tstep: time.Second / 5,\n\t\t\ttestDuration: time.Second * 10,\n\t\t},\n\t\t{\n\t\t\tcronName: \"cron3s\",\n\t\t\tschedule: \"*/3 * * * * *\", // Test non-standard crontab format\n\t\t\texpectedTriggerCount: 10,\n\t\t\tstep: time.Second,\n\t\t\ttestDuration: time.Second * 30,\n\t\t},\n\t\t{\n\t\t\tcronName: \"cron15m\",\n\t\t\tschedule: \"*/15 * * * *\", // Test standard crontab format\n\t\t\texpectedTriggerCount: 12,\n\t\t\tstep: 30 * time.Second,\n\t\t\ttestDuration: time.Hour * 3,\n\t\t},\n\t\t{\n\t\t\tcronName: \"cron6h\",\n\t\t\tschedule: \"0 0 */6 ? * *\", // Test quartz cron format\n\t\t\texpectedTriggerCount: 12,\n\t\t\tstep: time.Minute,\n\t\t\ttestDuration: time.Hour * 24 * 3,\n\t\t},\n\t}\n\n\tfor _, cronTest := range testMatrix {\n\t\tt.Run(cronTest.cronName, func(t *testing.T) {\n\t\t\tcronTest.clk = clocktesting.NewFakeClock(startTime)\n\n\t\t\tports, _ := dapr_testing.GetFreePorts(3)\n\t\t\tgrpcPort := ports[0]\n\t\t\thttpPort := ports[1]\n\t\t\tappPort := ports[2]\n\n\t\t\ttestFn, triggeredCb := testerFn(cronTest.clk, cronTest.testDuration, cronTest.expectedTriggerCount, cronTest.step)\n\n\t\t\tflow.New(t, \"test cron trigger with different schedules\").\n\t\t\t\tStep(app.Run(appName,\n\t\t\t\t\tfmt.Sprintf(\":%d\", appPort),\n\t\t\t\t\tappWithTriggerCounter(t, cronTest.clk, cronTest.cronName, triggeredCb),\n\t\t\t\t)).\n\t\t\t\tStep(sidecar.Run(sidecarName,\n\t\t\t\t\tappend(componentRuntimeOptions(cronTest.clk),\n\t\t\t\t\t\tembedded.WithResourcesPath(\"./components\"),\n\t\t\t\t\t\tembedded.WithDaprGRPCPort(strconv.Itoa(grpcPort)),\n\t\t\t\t\t\tembedded.WithAppProtocol(protocol.HTTPProtocol, strconv.Itoa(appPort)),\n\t\t\t\t\t\tembedded.WithDaprHTTPPort(strconv.Itoa(httpPort)),\n\t\t\t\t\t)...,\n\t\t\t\t)).\n\t\t\t\tStep(\"run test\", testFn).\n\t\t\t\tStep(\"stop sidecar\", sidecar.Stop(sidecarName)).\n\t\t\t\tStep(\"stop app\", app.Stop(appName)).\n\t\t\t\tRun()\n\t\t})\n\t}\n}", "func (cli *Client) CronCreate(ctx context.Context, name string, sv types.Cron) (types.Cron, error) {\n\tvar cron types.Cron\n\tvar r = url.Values{}\n\tr.Set(\"name\", name)\n\tresp, err := cli.post(ctx, \"/crons/create\", r, sv, nil)\n\tif err != nil {\n\t\treturn cron, err\n\t}\n\terr = json.NewDecoder(resp.body).Decode(&cron)\n\tensureReaderClosed(resp)\n\treturn cron, err\n}", "func NewWebhookSpec(spec *job.WebhookSpec) *WebhookSpec {\n\treturn &WebhookSpec{\n\t\tCreatedAt: spec.CreatedAt,\n\t\tUpdatedAt: spec.UpdatedAt,\n\t}\n}", "func New(pool *redis.Pool) *Cron {\n\treturn NewWithLocation(time.Now().Location(), pool)\n}", "func (j *AuroraJob) CronSchedule(cron string) Job {\n\tj.jobConfig.CronSchedule = &cron\n\treturn j\n}", "func NewCrontabConfig(fields []FieldConfig) *CrontabConfig {\n\tq := &CrontabConfig{\n\t\tFields: fields,\n\t\tFieldUnits: map[string][]int{},\n\t}\n\tunms := map[string]struct{}{}\n\tfor i := 0; i < len(q.Fields); i++ {\n\t\tf := fields[i]\n\t\tu := f.unit\n\t\tunm := u.String()\n\t\tq.FieldUnits[unm] = append(q.FieldUnits[unm], i)\n\t\t_, ok := unms[unm]\n\t\tif !ok {\n\t\t\tunms[unm] = struct{}{}\n\t\t\tq.Units = append(q.Units, u)\n\t\t}\n\t}\n\tSortUnits(q.Units)\n\t// fields are sorted in unit order\n\treturn q\n}", "func Schedule(spec string, job *Job, buildWrapper BuildWrapperFunc) (cron.EntryID, error) {\n\tsched, err := cron.ParseStandard(spec)\n\tif err != nil {\n\t\treturn InvalidEntryID, err\n\t}\n\n\tjob.entryID = mainCron.Schedule(sched, buildWrapper(job))\n\tjob.typ = JobTypeOnce\n\treturn addJob(job), nil\n}", "func ByCronSchedule(schedule string) ScheduleFunc {\n\ts, err := cron.Parse(schedule)\n\tif err != nil {\n\t\tpanic(\"parse cron spec fatal error: \" + err.Error())\n\t}\n\n\treturn func(ctx context.Context, job Job) Job {\n\t\treturn func(ctx context.Context) {\n\t\t\tnow := time.Now()\n\t\t\ttimer := time.NewTimer(s.Next(now).Sub(now))\n\t\t\tdefer timer.Stop()\n\n\t\t\tfor {\n\t\t\t\tselect {\n\t\t\t\tcase <-ctx.Done():\n\t\t\t\t\treturn\n\t\t\t\tcase <-timer.C:\n\t\t\t\t\tjob(ctx)\n\t\t\t\t\tnow = time.Now()\n\t\t\t\t\ttimer.Reset(s.Next(now).Sub(now))\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n}", "func New(cfg *config.Config) *Schedule {\n\ts := Schedule{\n\t\tconfig: cfg,\n\t\tcron: cron.New(),\n\t}\n\n\treturn &s\n}", "func NewCrontab() *Crontab {\n\tctb := &Crontab{\n\t\tjobs: make(map[string]*Job),\n\t\tcontrol: make(chan *crontabControl),\n\t\tticker: time.NewTicker(1e9),\n\t}\n\n\tgo ctb.backend()\n\n\treturn ctb\n}", "func (p cron) Process(appMeta helmify.AppMetadata, obj *unstructured.Unstructured) (bool, helmify.Template, error) {\n\tif obj.GroupVersionKind() != cronGVC {\n\t\treturn false, nil, nil\n\t}\n\tmeta, err := processor.ProcessObjMeta(appMeta, obj)\n\tif err != nil {\n\t\treturn true, nil, err\n\t}\n\tname := appMeta.TrimName(obj.GetName())\n\tnameCamelCase := strcase.ToLowerCamel(name)\n\n\tjobObj := batchv1.CronJob{}\n\terr = runtime.DefaultUnstructuredConverter.FromUnstructured(obj.Object, &jobObj)\n\tif err != nil {\n\t\treturn true, nil, errors.Wrap(err, \"unable to cast to Job\")\n\t}\n\tspec := jobObj.Spec\n\tspecMap, exists, err := unstructured.NestedMap(obj.Object, \"spec\")\n\tif err != nil {\n\t\treturn true, nil, errors.Wrap(err, \"unable to get job spec\")\n\t}\n\tif !exists {\n\t\treturn true, nil, errors.New(\"no job spec presented\")\n\t}\n\n\tvalues := helmify.Values{}\n\n\t// process job spec params:\n\tif spec.Schedule != \"\" {\n\t\terr := templateSpecVal(spec.Schedule, &values, specMap, nameCamelCase, \"schedule\")\n\t\tif err != nil {\n\t\t\treturn true, nil, err\n\t\t}\n\t}\n\n\tif spec.Suspend != nil {\n\t\terr := templateSpecVal(*spec.Suspend, &values, specMap, nameCamelCase, \"suspend\")\n\t\tif err != nil {\n\t\t\treturn true, nil, err\n\t\t}\n\t}\n\n\tif spec.FailedJobsHistoryLimit != nil {\n\t\terr := templateSpecVal(*spec.FailedJobsHistoryLimit, &values, specMap, nameCamelCase, \"failedJobsHistoryLimit\")\n\t\tif err != nil {\n\t\t\treturn true, nil, err\n\t\t}\n\t}\n\n\tif spec.StartingDeadlineSeconds != nil {\n\t\terr := templateSpecVal(*spec.StartingDeadlineSeconds, &values, specMap, nameCamelCase, \"startingDeadlineSeconds\")\n\t\tif err != nil {\n\t\t\treturn true, nil, err\n\t\t}\n\t}\n\n\tif spec.TimeZone != nil {\n\t\terr := templateSpecVal(*spec.TimeZone, &values, specMap, nameCamelCase, \"timeZone\")\n\t\tif err != nil {\n\t\t\treturn true, nil, err\n\t\t}\n\t}\n\n\tif spec.SuccessfulJobsHistoryLimit != nil {\n\t\terr := templateSpecVal(*spec.SuccessfulJobsHistoryLimit, &values, specMap, nameCamelCase, \"successfulJobsHistoryLimit\")\n\t\tif err != nil {\n\t\t\treturn true, nil, err\n\t\t}\n\t}\n\n\t// process job pod template:\n\tpodSpecMap, podValues, err := pod.ProcessSpec(nameCamelCase, appMeta, jobObj.Spec.JobTemplate.Spec.Template.Spec)\n\tif err != nil {\n\t\treturn true, nil, err\n\t}\n\terr = values.Merge(podValues)\n\tif err != nil {\n\t\treturn true, nil, err\n\t}\n\n\terr = unstructured.SetNestedMap(specMap, podSpecMap, \"jobTemplate\", \"spec\", \"template\", \"spec\")\n\tif err != nil {\n\t\treturn true, nil, fmt.Errorf(\"%w: unable to template job spec\", err)\n\t}\n\n\tspecStr, err := yamlformat.Marshal(map[string]interface{}{\"spec\": specMap}, 0)\n\tif err != nil {\n\t\treturn true, nil, err\n\t}\n\tspecStr = strings.ReplaceAll(specStr, \"'\", \"\")\n\n\treturn true, &resultCron{\n\t\tname: name + \".yaml\",\n\t\tdata: struct {\n\t\t\tMeta string\n\t\t\tSpec string\n\t\t}{Meta: meta, Spec: specStr},\n\t\tvalues: values,\n\t}, nil\n}", "func (r *ReconcileDescheduler) createJob(descheduler *deschedulerv1alpha1.Descheduler) (*batch.Job, error) {\n\tactiveDeadline := int64(100)\n\tlog.Printf(\"Creating descheduler job\")\n\tjob := &batch.Job{\n\t\tTypeMeta: metav1.TypeMeta{\n\t\t\tKind: \"Job\",\n\t\t\tAPIVersion: batch.SchemeGroupVersion.String(),\n\t\t},\n\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\tName: descheduler.Name,\n\t\t\tNamespace: descheduler.Namespace,\n\t\t},\n\t\tSpec: batch.JobSpec{\n\t\t\tActiveDeadlineSeconds: &activeDeadline,\n\t\t\tTemplate: v1.PodTemplateSpec{\n\t\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\t\tName: \"descheduler-job-spec\",\n\t\t\t\t},\n\t\t\t\tSpec: v1.PodSpec{\n\t\t\t\t\tVolumes: []v1.Volume{{\n\t\t\t\t\t\tName: \"policy-volume\",\n\t\t\t\t\t\tVolumeSource: v1.VolumeSource{\n\t\t\t\t\t\t\tConfigMap: &v1.ConfigMapVolumeSource{\n\t\t\t\t\t\t\t\tLocalObjectReference: v1.LocalObjectReference{\n\t\t\t\t\t\t\t\t\tName: descheduler.Name,\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\tRestartPolicy: \"Never\",\n\t\t\t\t\tContainers: []v1.Container{{\n\t\t\t\t\t\tName: \"openshift-descheduler\",\n\t\t\t\t\t\tImage: \"registry.svc.ci.openshift.org/openshift/origin-v4.0:descheduler\", // TODO: Make this configurable too.\n\t\t\t\t\t\tPorts: []v1.ContainerPort{{ContainerPort: 80}},\n\t\t\t\t\t\tResources: v1.ResourceRequirements{\n\t\t\t\t\t\t\tLimits: v1.ResourceList{\n\t\t\t\t\t\t\t\tv1.ResourceCPU: resource.MustParse(\"100m\"),\n\t\t\t\t\t\t\t\tv1.ResourceMemory: resource.MustParse(\"500Mi\"),\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\tRequests: v1.ResourceList{\n\t\t\t\t\t\t\t\tv1.ResourceCPU: resource.MustParse(\"100m\"),\n\t\t\t\t\t\t\t\tv1.ResourceMemory: resource.MustParse(\"500Mi\"),\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tCommand: []string{\"/bin/descheduler\", \"--policy-config-file\", \"/policy-dir/policy.yaml\"},\n\t\t\t\t\t\tVolumeMounts: []v1.VolumeMount{{\n\t\t\t\t\t\t\tMountPath: \"/policy-dir\",\n\t\t\t\t\t\t\tName: \"policy-volume\",\n\t\t\t\t\t\t}},\n\t\t\t\t\t}},\n\t\t\t\t\tServiceAccountName: \"openshift-descheduler\", // TODO: This is hardcoded as of now, find a way to reference it from rbac.yaml.\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n\terr := controllerutil.SetControllerReference(descheduler, job, r.scheme)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"Error setting owner references %v\", err)\n\t}\n\treturn job, nil\n}", "func (c *Cron) schedule(schedule Schedule, cmd Job, spec string) {\n\tentry := &Entry{\n\t\tSchedule: schedule,\n\t\tJob: cmd,\n\t\tSpec: spec,\n\t}\n\tif !c.running {\n\t\tc.entries = append(c.entries, entry)\n\t\treturn\n\t}\n\n\tc.add <- entry\n}", "func newCronFederatedHPAs(c *AutoscalingV1alpha1Client, namespace string) *cronFederatedHPAs {\n\treturn &cronFederatedHPAs{\n\t\tclient: c.RESTClient(),\n\t\tns: namespace,\n\t}\n}", "func MockCronJob() batchv1beta1.CronJob {\n\tp := MockPod()\n\treturn batchv1beta1.CronJob{\n\t\tSpec: batchv1beta1.CronJobSpec{\n\t\t\tJobTemplate: batchv1beta1.JobTemplateSpec{\n\t\t\t\tSpec: batchv1.JobSpec{\n\t\t\t\t\tTemplate: corev1.PodTemplateSpec{Spec: p.Spec},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n}", "func TestIssue279CronJobDeletion(t *testing.T) {\n\tctx := framework.NewTestCtx(t)\n\tdefer ctx.Cleanup()\n\n\tnamespace, err := ctx.GetOperatorNamespace()\n\tif err != nil {\n\t\tt.Fatalf(\"could not get namespace: %v\", err)\n\t}\n\tif err = SetupRbacInNamespace(namespace); err != nil {\n\t\tt.Error(err)\n\t}\n\n\tdefer DumpJobsLogsOnError(t, framework.Global, namespace)\n\terr = framework.AddToFrameworkScheme(apis.AddToScheme, &gitopsv1alpha1.GitOpsConfigList{})\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\teunomiaURI, found := os.LookupEnv(\"EUNOMIA_URI\")\n\tif !found {\n\t\teunomiaURI = \"https://github.com/kohlstechnology/eunomia\"\n\t}\n\teunomiaRef, found := os.LookupEnv(\"EUNOMIA_REF\")\n\tif !found {\n\t\teunomiaRef = \"master\"\n\t}\n\n\t// Step 1: create a CR with a Periodic trigger\n\n\tgitops := &gitopsv1alpha1.GitOpsConfig{\n\t\tTypeMeta: metav1.TypeMeta{\n\t\t\tKind: \"GitOpsConfig\",\n\t\t\tAPIVersion: \"eunomia.kohls.io/v1alpha1\",\n\t\t},\n\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\tName: \"gitops-issue279\",\n\t\t\tNamespace: namespace,\n\t\t\tFinalizers: []string{\n\t\t\t\t\"gitopsconfig.eunomia.kohls.io/finalizer\",\n\t\t\t},\n\t\t},\n\t\tSpec: gitopsv1alpha1.GitOpsConfigSpec{\n\t\t\tTemplateSource: gitopsv1alpha1.GitConfig{\n\t\t\t\tURI: eunomiaURI,\n\t\t\t\tRef: eunomiaRef,\n\t\t\t\tContextDir: \"test/e2e/testdata/empty-directory\",\n\t\t\t},\n\t\t\tParameterSource: gitopsv1alpha1.GitConfig{\n\t\t\t\tURI: eunomiaURI,\n\t\t\t\tRef: eunomiaRef,\n\t\t\t\tContextDir: \"test/e2e/testdata/empty-yaml\",\n\t\t\t},\n\t\t\tTriggers: []gitopsv1alpha1.GitOpsTrigger{\n\t\t\t\t{\n\t\t\t\t\tType: \"Periodic\",\n\t\t\t\t\tCron: \"*/1 * * * *\",\n\t\t\t\t},\n\t\t\t},\n\t\t\tTemplateProcessorImage: \"quay.io/kohlstechnology/eunomia-base:dev\",\n\t\t\tResourceHandlingMode: \"Apply\",\n\t\t\tResourceDeletionMode: \"Delete\",\n\t\t\tServiceAccountRef: \"eunomia-operator\",\n\t\t},\n\t}\n\tgitops.Annotations = map[string]string{\"gitopsconfig.eunomia.kohls.io/initialized\": \"true\"}\n\n\terr = framework.Global.Client.Create(context.TODO(), gitops, &framework.CleanupOptions{TestContext: ctx, Timeout: timeout, RetryInterval: retryInterval})\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\t// Step 2: Wait until CronJob creates a Job, whose pod succeeds\n\n\t// Two minutes timeout to give cronjob enought time to kick off\n\terr = wait.Poll(retryInterval, time.Second*120, func() (done bool, err error) {\n\t\tconst name = \"gitopsconfig-gitops-issue279-\"\n\t\tpod, err := GetPod(t, namespace, name, \"quay.io/kohlstechnology/eunomia-base:dev\", framework.Global.KubeClient)\n\t\tswitch {\n\t\tcase apierrors.IsNotFound(err):\n\t\t\tt.Logf(\"Waiting for availability of %s pod\", name)\n\t\t\treturn false, nil\n\t\tcase err != nil:\n\t\t\treturn false, err\n\t\tcase pod != nil && pod.Status.Phase == \"Succeeded\":\n\t\t\treturn true, nil\n\t\tcase pod != nil:\n\t\t\tt.Logf(\"Waiting for pod %s to succeed\", pod.Name)\n\t\t\treturn false, nil\n\t\tdefault:\n\t\t\tt.Logf(\"Waiting for pod %s\", name)\n\t\t\treturn false, nil\n\t\t}\n\t})\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\t// Step 3: Change Periodic trigger to Webhook one in GitOpsConfig, and apply it\n\n\terr = framework.Global.Client.Get(context.TODO(), util.NN{Name: \"gitops-issue279\", Namespace: namespace}, gitops)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tgitops.Spec.Triggers = []gitopsv1alpha1.GitOpsTrigger{{Type: \"Webhook\"}}\n\terr = framework.Global.Client.Update(context.TODO(), gitops)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tt.Logf(\"GitOpsConfig successfully updated\")\n\n\t// Step 4: Wait for CronJob to be deleted\n\n\terr = wait.Poll(retryInterval, timeout, func() (done bool, err error) {\n\t\tconst name = \"gitopsconfig-gitops-issue279-\"\n\t\tcronJob, err := GetCronJob(namespace, name, framework.Global.KubeClient)\n\t\tswitch {\n\t\tcase apierrors.IsNotFound(err) || cronJob == nil:\n\t\t\tt.Logf(\"CronJob %s successfully deleted\", name)\n\t\t\treturn true, nil\n\t\tcase err != nil:\n\t\t\treturn false, err\n\t\tcase cronJob != nil:\n\t\t\tt.Logf(\"Waiting for cronJob %s to be deleted\", cronJob.Name)\n\t\t\treturn false, nil\n\t\tdefault:\n\t\t\tt.Logf(\"Waiting for cronJob %s to be deleted\", name)\n\t\t\treturn false, nil\n\t\t}\n\t})\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n}", "func NewDrupalCronSyncer(droplet *drupal.Drupal, c client.Client, scheme *runtime.Scheme) syncer.Interface {\n\tobjLabels := droplet.ComponentLabels(drupal.DrupalCron)\n\n\tobj := &batchv1beta1.CronJob{\n\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\tName: droplet.ComponentName(drupal.DrupalCron),\n\t\t\tNamespace: droplet.Namespace,\n\t\t},\n\t}\n\n\tvar (\n\t\tcronStartingDeadlineSeconds int64 = 10\n\t\tbackoffLimit int32\n\t\tactiveDeadlineSeconds int64 = 10\n\t\tsuccessfulJobsHistoryLimit int32 = 3\n\t\tfailedJobsHistoryLimit int32 = 1\n\t)\n\n\treturn syncer.NewObjectSyncer(\"DrupalCron\", droplet.Unwrap(), obj, c, scheme, func(existing runtime.Object) error {\n\t\tout := existing.(*batchv1beta1.CronJob)\n\n\t\tout.Labels = labels.Merge(labels.Merge(out.Labels, objLabels), common.ControllerLabels)\n\n\t\tout.Spec.Schedule = \"* * * * *\"\n\t\tout.Spec.ConcurrencyPolicy = \"Forbid\"\n\t\tout.Spec.StartingDeadlineSeconds = &cronStartingDeadlineSeconds\n\t\tout.Spec.SuccessfulJobsHistoryLimit = &successfulJobsHistoryLimit\n\t\tout.Spec.FailedJobsHistoryLimit = &failedJobsHistoryLimit\n\n\t\tout.Spec.JobTemplate.ObjectMeta.Labels = labels.Merge(objLabels, common.ControllerLabels)\n\t\tout.Spec.JobTemplate.Spec.BackoffLimit = &backoffLimit\n\t\tout.Spec.JobTemplate.Spec.ActiveDeadlineSeconds = &activeDeadlineSeconds\n\n\t\tcmd := []string{\"crond\", \"-f\", \"-d\", \"8\"}\n\t\ttemplate := droplet.JobPodTemplateSpec(cmd...)\n\n\t\tout.Spec.JobTemplate.Spec.Template.ObjectMeta = template.ObjectMeta\n\n\t\terr := mergo.Merge(&out.Spec.JobTemplate.Spec.Template.Spec, template.Spec, mergo.WithTransformers(transformers.PodSpec))\n\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\treturn nil\n\t})\n}", "func New() Generator {\n\tspec := rspec.Spec{\n\t\tVersion: rspec.Version,\n\t\tPlatform: rspec.Platform{\n\t\t\tOS: runtime.GOOS,\n\t\t\tArch: runtime.GOARCH,\n\t\t},\n\t\tRoot: rspec.Root{\n\t\t\tPath: \"\",\n\t\t\tReadonly: false,\n\t\t},\n\t\tProcess: rspec.Process{\n\t\t\tTerminal: false,\n\t\t\tUser: rspec.User{},\n\t\t\tArgs: []string{\n\t\t\t\t\"sh\",\n\t\t\t},\n\t\t\tEnv: []string{\n\t\t\t\t\"PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\",\n\t\t\t\t\"TERM=xterm\",\n\t\t\t},\n\t\t\tCwd: \"/\",\n\t\t\tCapabilities: []string{\n\t\t\t\t\"CAP_CHOWN\",\n\t\t\t\t\"CAP_DAC_OVERRIDE\",\n\t\t\t\t\"CAP_FSETID\",\n\t\t\t\t\"CAP_FOWNER\",\n\t\t\t\t\"CAP_MKNOD\",\n\t\t\t\t\"CAP_NET_RAW\",\n\t\t\t\t\"CAP_SETGID\",\n\t\t\t\t\"CAP_SETUID\",\n\t\t\t\t\"CAP_SETFCAP\",\n\t\t\t\t\"CAP_SETPCAP\",\n\t\t\t\t\"CAP_NET_BIND_SERVICE\",\n\t\t\t\t\"CAP_SYS_CHROOT\",\n\t\t\t\t\"CAP_KILL\",\n\t\t\t\t\"CAP_AUDIT_WRITE\",\n\t\t\t},\n\t\t\tRlimits: []rspec.Rlimit{\n\t\t\t\t{\n\t\t\t\t\tType: \"RLIMIT_NOFILE\",\n\t\t\t\t\tHard: uint64(1024),\n\t\t\t\t\tSoft: uint64(1024),\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\tHostname: \"mrsdalloway\",\n\t\tMounts: []rspec.Mount{\n\t\t\t{\n\t\t\t\tDestination: \"/proc\",\n\t\t\t\tType: \"proc\",\n\t\t\t\tSource: \"proc\",\n\t\t\t\tOptions: nil,\n\t\t\t},\n\t\t\t{\n\t\t\t\tDestination: \"/dev\",\n\t\t\t\tType: \"tmpfs\",\n\t\t\t\tSource: \"tmpfs\",\n\t\t\t\tOptions: []string{\"nosuid\", \"strictatime\", \"mode=755\", \"size=65536k\"},\n\t\t\t},\n\t\t\t{\n\t\t\t\tDestination: \"/dev/pts\",\n\t\t\t\tType: \"devpts\",\n\t\t\t\tSource: \"devpts\",\n\t\t\t\tOptions: []string{\"nosuid\", \"noexec\", \"newinstance\", \"ptmxmode=0666\", \"mode=0620\", \"gid=5\"},\n\t\t\t},\n\t\t\t{\n\t\t\t\tDestination: \"/dev/shm\",\n\t\t\t\tType: \"tmpfs\",\n\t\t\t\tSource: \"shm\",\n\t\t\t\tOptions: []string{\"nosuid\", \"noexec\", \"nodev\", \"mode=1777\", \"size=65536k\"},\n\t\t\t},\n\t\t\t{\n\t\t\t\tDestination: \"/dev/mqueue\",\n\t\t\t\tType: \"mqueue\",\n\t\t\t\tSource: \"mqueue\",\n\t\t\t\tOptions: []string{\"nosuid\", \"noexec\", \"nodev\"},\n\t\t\t},\n\t\t\t{\n\t\t\t\tDestination: \"/sys\",\n\t\t\t\tType: \"sysfs\",\n\t\t\t\tSource: \"sysfs\",\n\t\t\t\tOptions: []string{\"nosuid\", \"noexec\", \"nodev\", \"ro\"},\n\t\t\t},\n\t\t},\n\t\tLinux: &rspec.Linux{\n\t\t\tResources: &rspec.Resources{\n\t\t\t\tDevices: []rspec.DeviceCgroup{\n\t\t\t\t\t{\n\t\t\t\t\t\tAllow: false,\n\t\t\t\t\t\tAccess: strPtr(\"rwm\"),\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\tNamespaces: []rspec.Namespace{\n\t\t\t\t{\n\t\t\t\t\tType: \"pid\",\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tType: \"network\",\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tType: \"ipc\",\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tType: \"uts\",\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tType: \"mount\",\n\t\t\t\t},\n\t\t\t},\n\t\t\tDevices: []rspec.Device{},\n\t\t},\n\t}\n\treturn Generator{\n\t\tspec: &spec,\n\t}\n}", "func MakeSpec(\n\tconn, unique string,\n\tneedsUpdate func(db.Specifier, db.Specifier) bool,\n\tnewDBSpec db.Specifier,\n\tnewDBFunc DBMaker,\n\tnewDBError error,\n\tupdateFunc Updater,\n\tupdateErr error,\n) db.Specifier {\n\treturn &Spec{\n\t\tConn: conn,\n\t\tUnique: unique,\n\t\tUpdateNeeded: needsUpdate,\n\t\tNewDBSpec: newDBSpec,\n\t\tNewDBFunc: newDBFunc,\n\t\tNewDBError: newDBError,\n\t\tUpdateFunc: updateFunc,\n\t\tUpdateErr: updateErr,\n\t}\n}", "func InitCronExpr(ctx context.Context, duration time.Duration) error {\n\tif duration < 0 || duration > 12*time.Hour {\n\t\treturn moerr.NewNotSupported(ctx, \"export cron expr not support cycle: %v\", duration)\n\t}\n\tif duration < 5*time.Minute {\n\t\tMergeTaskCronExpr = fmt.Sprintf(\"@every %.0fs\", duration.Seconds())\n\t} else if duration < time.Hour {\n\t\tconst unit = 5 * time.Minute\n\t\tduration = (duration + unit - 1) / unit * unit\n\t\tswitch duration {\n\t\tcase 5 * time.Minute:\n\t\t\tMergeTaskCronExpr = MergeTaskCronExprEvery05Min\n\t\tcase 15 * time.Minute:\n\t\t\tMergeTaskCronExpr = MergeTaskCronExprEvery15Min\n\t\tdefault:\n\t\t\tMergeTaskCronExpr = fmt.Sprintf(\"@every %.0fm\", duration.Minutes())\n\t\t}\n\t} else {\n\t\tminHour := duration / time.Hour\n\t\tswitch minHour {\n\t\tcase 1:\n\t\t\tMergeTaskCronExpr = MergeTaskCronExprEvery1Hour\n\t\tcase 2:\n\t\t\tMergeTaskCronExpr = MergeTaskCronExprEvery2Hour\n\t\tcase 4:\n\t\t\tMergeTaskCronExpr = MergeTaskCronExprEvery4Hour\n\t\tdefault:\n\t\t\tvar hours = make([]string, 0, 12)\n\t\t\tfor h := minHour; h < 24; h += minHour {\n\t\t\t\thours = append(hours, strconv.Itoa(int(h)))\n\t\t\t}\n\t\t\tMergeTaskCronExpr = fmt.Sprintf(\"0 0 %s * * *\", strings.Join(hours, \",\"))\n\t\t}\n\t}\n\treturn nil\n}", "func (ci *CronImpl) Parse(spec string) (cron.Schedule, error) {\n\treturn cron.Parse(spec)\n}", "func CronJobReconciler(reportName string, mrc *kubermaticv1.MeteringReportConfiguration, caBundleName string, getRegistry registry.ImageRewriter, seed *kubermaticv1.Seed) reconciling.NamedCronJobReconcilerFactory {\n\treturn func() (string, reconciling.CronJobReconciler) {\n\t\treturn reportName, func(job *batchv1.CronJob) (*batchv1.CronJob, error) {\n\t\t\tvar args []string\n\t\t\targs = append(args, fmt.Sprintf(\"--ca-bundle=%s\", \"/opt/ca-bundle/ca-bundle.pem\"))\n\t\t\targs = append(args, fmt.Sprintf(\"--prometheus-api=http://%s.%s.svc\", prometheus.Name, seed.Namespace))\n\t\t\targs = append(args, fmt.Sprintf(\"--output-dir=%s\", reportName))\n\t\t\targs = append(args, fmt.Sprintf(\"--output-prefix=%s\", seed.Name))\n\n\t\t\tif mrc.Monthly {\n\t\t\t\targs = append(args, \"--last-month\")\n\t\t\t} else {\n\t\t\t\targs = append(args, fmt.Sprintf(\"--last-number-of-days=%d\", mrc.Interval))\n\t\t\t}\n\n\t\t\t// needs to be last\n\t\t\targs = append(args, mrc.Types...)\n\n\t\t\tif job.Labels == nil {\n\t\t\t\tjob.Labels = make(map[string]string)\n\t\t\t}\n\t\t\tjob.Labels[common.NameLabel] = reportName\n\t\t\tjob.Labels[common.ComponentLabel] = meteringName\n\n\t\t\tjob.Spec.Schedule = mrc.Schedule\n\t\t\tjob.Spec.JobTemplate.Spec.Parallelism = pointer.Int32(1)\n\t\t\tjob.Spec.JobTemplate.Spec.Template.Spec.ServiceAccountName = \"\"\n\t\t\tjob.Spec.JobTemplate.Spec.Template.Spec.DeprecatedServiceAccount = \"\"\n\t\t\tjob.Spec.JobTemplate.Spec.Template.Spec.RestartPolicy = corev1.RestartPolicyOnFailure\n\t\t\tjob.Spec.JobTemplate.Spec.Template.Spec.ImagePullSecrets = []corev1.LocalObjectReference{{Name: resources.ImagePullSecretName}}\n\n\t\t\tjob.Spec.JobTemplate.Spec.Template.Spec.Containers = []corev1.Container{\n\t\t\t\t{\n\t\t\t\t\tName: reportName,\n\t\t\t\t\tImage: getMeteringImage(getRegistry),\n\t\t\t\t\tImagePullPolicy: corev1.PullIfNotPresent,\n\t\t\t\t\tCommand: []string{\"/metering\"},\n\t\t\t\t\tArgs: args,\n\t\t\t\t\tEnv: []corev1.EnvVar{\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tName: \"S3_ENDPOINT\",\n\t\t\t\t\t\t\tValueFrom: &corev1.EnvVarSource{\n\t\t\t\t\t\t\t\tSecretKeyRef: &corev1.SecretKeySelector{\n\t\t\t\t\t\t\t\t\tLocalObjectReference: corev1.LocalObjectReference{\n\t\t\t\t\t\t\t\t\t\tName: SecretName,\n\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\tKey: Endpoint,\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tName: \"S3_BUCKET\",\n\t\t\t\t\t\t\tValueFrom: &corev1.EnvVarSource{\n\t\t\t\t\t\t\t\tSecretKeyRef: &corev1.SecretKeySelector{\n\t\t\t\t\t\t\t\t\tLocalObjectReference: corev1.LocalObjectReference{\n\t\t\t\t\t\t\t\t\t\tName: SecretName,\n\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\tKey: Bucket,\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tName: \"ACCESS_KEY_ID\",\n\t\t\t\t\t\t\tValueFrom: &corev1.EnvVarSource{\n\t\t\t\t\t\t\t\tSecretKeyRef: &corev1.SecretKeySelector{\n\t\t\t\t\t\t\t\t\tLocalObjectReference: corev1.LocalObjectReference{\n\t\t\t\t\t\t\t\t\t\tName: SecretName,\n\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\tKey: AccessKey,\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tName: \"SECRET_ACCESS_KEY\",\n\t\t\t\t\t\t\tValueFrom: &corev1.EnvVarSource{\n\t\t\t\t\t\t\t\tSecretKeyRef: &corev1.SecretKeySelector{\n\t\t\t\t\t\t\t\t\tLocalObjectReference: corev1.LocalObjectReference{\n\t\t\t\t\t\t\t\t\t\tName: SecretName,\n\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\tKey: SecretKey,\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\tVolumeMounts: []corev1.VolumeMount{\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tName: \"ca-bundle\",\n\t\t\t\t\t\t\tMountPath: \"/opt/ca-bundle/\",\n\t\t\t\t\t\t\tReadOnly: true,\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t}\n\n\t\t\tjob.Spec.JobTemplate.Spec.Template.Spec.Volumes = []corev1.Volume{\n\t\t\t\t{\n\t\t\t\t\tName: \"ca-bundle\",\n\t\t\t\t\tVolumeSource: corev1.VolumeSource{\n\t\t\t\t\t\tConfigMap: &corev1.ConfigMapVolumeSource{\n\t\t\t\t\t\t\tLocalObjectReference: corev1.LocalObjectReference{\n\t\t\t\t\t\t\t\tName: caBundleName,\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t}\n\t\t\treturn job, nil\n\t\t}\n\t}\n}", "func (c *Controller) initCron() error {\n\tcronJob = cron.New()\n\tif err := cronJob.AddFunc(Schedule, c.Run); err != nil {\n\t\treturn err\n\t}\n\n\tcronJob.Start()\n\n\tc.ready <- struct{}{}\n\n\treturn nil\n}", "func New(\n\tmetrics []string,\n\tconsumer func(string),\n\trpm uint,\n) (*Generator, error) {\n\tc := cron.New()\n\tp := make(chan string, 1000)\n\tg := &Generator{\n\t\tcron: c,\n\t\tmetrics: metrics,\n\t\tprovide: p,\n\t\tlastProvide: 0,\n\t\trpm: rpm,\n\t}\n\n\t_, err := c.AddFunc(\"@every 10s\", func() { g.feed() })\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t_, err = c.AddFunc(\"@every 1m\", func() { g.consume(consumer) })\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn g, nil\n}", "func newCmdJobSchedule(ctx api.Context) *cobra.Command {\n\tcmd := &cobra.Command{\n\t\tUse: \"schedule\",\n\t\tShort: \"Manage schedules of jobs\",\n\t}\n\n\tcmd.AddCommand(\n\t\tnewCmdJobScheduleAdd(ctx),\n\t\tnewCmdJobScheduleRemove(ctx),\n\t\tnewCmdJobScheduleShow(ctx),\n\t\tnewCmdJobScheduleUpdate(ctx),\n\t)\n\n\treturn cmd\n}", "func Test_AddTask(t *testing.T) {\n\tcron := NewCron()\n\tgo cron.Start()\n\n\tcron.AddTask(&Task{\n\t\tJob:FuncJob(func() {\n\t\t\tfmt.Println(\"hello cron\")\n\t\t}),\n\t\tRunTime:time.Now().UnixNano()+int64(time.Second*2),\n\t})\n\n\n\tcron.AddTask(&Task{\n\t\tJob:FuncJob(func() {\n\t\t\tfmt.Println(\"hello cron1\")\n\t\t}),\n\t\tRunTime:time.Now().UnixNano()+int64(time.Second*3),\n\t})\n\n\tcron.AddTask(&Task{\n\t\tJob: FuncJob(func() {\n\t\t\tfmt.Println(\"hello cron2 loop\")\n\t\t}),\n\t\tRunTime: time.Now().UnixNano() + int64(time.Second*4),\n\t\tSpacing: 1,\n\t\tEndTime: time.Now().UnixNano() + 9*(int64(time.Second)),\n\t})\n\n\ttimer := time.NewTimer(10 * time.Second)\n\tfor {\n\t\tselect {\n\t\tcase <-timer.C:\n\t\t fmt.Println(\"over\")\n\t\t}\n\t\tbreak\n\t}\n}", "func New() (scheduler *Scheduler) {\n\n\tscheduler = &Scheduler{}\n\tscheduler.tasks = make(map[string]Entry)\n\n\tscheduler.cron = cron.New(\n\t\tcron.WithLogger(cron.DefaultLogger),\n\t\tcron.WithChain(\n\t\t\tcron.Recover(cron.DefaultLogger),\n\t\t),\n\t)\n\n\treturn scheduler\n}", "func createSpec(name string) (*TestSpec, error) {\n\t// File must be a yaml file.\n\tif filepath.Ext(name) != \".yml\" {\n\t\treturn nil, fmt.Errorf(\"Cannot parse non-yaml file: %s\", name)\n\t}\n\n\t// Read testspec yaml file contents.\n\tcontents, err := ioutil.ReadFile(name)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"Unable to read yaml test spec: %s. Error: %v\", name, err)\n\t}\n\n\t// Unmarshals testspec yaml file contents into struct.\n\ttest := &TestSpec{}\n\tif err = yaml.Unmarshal(contents, &test); err != nil {\n\t\treturn nil, fmt.Errorf(\"Unable to unmarshal yaml test spec: %s. Error: %v\", name, err)\n\t}\n\n\t// Instanstiates cache for templating.\n\ttest.Cache = make(map[string]string)\n\n\t// Assigns default values for commands.\n\tfor i := range test.Commands {\n\t\t// Skip command by removing from command list.\n\t\tif test.Commands[i].Skip == true {\n\t\t\ttest.Commands = append(test.Commands[:i], test.Commands[i+1:]...)\n\t\t}\n\n\t\t// Default commandspec timeout.\n\t\tif test.Commands[i].Timeout == \"\" {\n\t\t\ttest.Commands[i].Timeout = test.CmdTimeout\n\t\t}\n\t}\n\treturn test, nil\n}", "func NewWorkflowTemplateSpec() cronworkflowv1.WorkflowTemplateSpec {\n\treturn cronworkflowv1.WorkflowTemplateSpec{\n\t\tSpec: workflowv1.WorkflowSpec{\n\t\t\tSteps: []workflowv1.WorkflowStep{\n\t\t\t\t{\n\t\t\t\t\tName: \"one\",\n\t\t\t\t\tJobTemplate: ValidFakeTemplateSpec(),\n\t\t\t\t},\n\t\t\t},\n\t\t\tSelector: &metav1.LabelSelector{\n\t\t\t\tMatchLabels: map[string]string{\"foo\": \"bar\"},\n\t\t\t},\n\t\t},\n\t}\n}", "func NewCronScheduler() *CronScheduler {\n\treturn &CronScheduler{\n\t\tc: cron.New(),\n\t}\n}", "func newSchedule() *schedule {\n\treturn &schedule{}\n}", "func generateModularDailyCronSchedule(input []byte) string {\n\ta := big.NewInt(0).SetBytes(input)\n\tvar hi, mi big.Int\n\tm := mi.Mod(a, big.NewInt(60))\n\th := hi.Mod(a, big.NewInt(24))\n\treturn fmt.Sprintf(\"%d %d * * *\", m.Int64(), h.Int64())\n}", "func NewTimespec(t time.Time) Timespec {\n\treturn Timespec{t.Unix(), int64(t.Nanosecond())}\n}", "func Parse(spec string) (cron.Schedule, error) {\n\treturn standaloneParser.Parse(spec)\n}", "func (b *RepairCronJob) configureDesired() {\n\tb.desired = &batchv1beta1.CronJob{\n\t\tTypeMeta: GetCronJobTypeMeta(),\n\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\tName: b.buildCronJobName(),\n\t\t\tNamespace: b.cluster.GetNamespace(),\n\t\t\tLabels: b.buildLabels(),\n\t\t},\n\t\tSpec: batchv1beta1.CronJobSpec{\n\t\t\tSchedule: b.cluster.Spec.Repair.Schedule,\n\t\t\tConcurrencyPolicy: batchv1beta1.ForbidConcurrent,\n\t\t\tSuccessfulJobsHistoryLimit: &successfulJobsHistoryLimit,\n\t\t\tFailedJobsHistoryLimit: &failedJobsHistoryLimit,\n\t\t\tJobTemplate: batchv1beta1.JobTemplateSpec{\n\t\t\t\tSpec: v1.JobSpec{\n\t\t\t\t\tBackoffLimit: &backoffLimit,\n\t\t\t\t\tTemplate: b.buildCronJobPodTemplateSpec(),\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n\tb.setOwner(asOwner(b.cluster))\n}", "func BuildJobSpec(pod *podtemplatespec.Builder) *jobspec.Builder {\n\tjobSpecObj := jobspec.NewBuilder().\n\t\tWithPodTemplateSpecBuilder(pod)\n\t_, err := jobSpecObj.Build()\n\tif err != nil {\n\t\tlog.Errorln(err)\n\t}\n\treturn jobSpecObj\n}", "func NewWithLocation(location *time.Location, pool *redis.Pool) *Cron {\n\treturn &Cron{\n\t\tentries: nil,\n\t\tadd: make(chan *Entry),\n\t\tstop: make(chan struct{}),\n\t\tsnapshot: make(chan []*Entry),\n\t\trunning: false,\n\t\tErrorLog: nil,\n\t\tlocation: location,\n\t\tpool: pool,\n\t\tkeyPrefix: DefaultKeyPrefix,\n\t\tkeyCleared: DefaultKeyCleared,\n\t}\n}", "func NewBootstrapSpec(spec *job.BootstrapSpec) *BootstrapSpec {\n\treturn &BootstrapSpec{\n\t\tContractID: spec.ContractID,\n\t\tRelay: spec.Relay,\n\t\tRelayConfig: spec.RelayConfig,\n\t\tBlockchainTimeout: spec.BlockchainTimeout,\n\t\tContractConfigTrackerPollInterval: spec.ContractConfigTrackerPollInterval,\n\t\tContractConfigConfirmations: spec.ContractConfigConfirmations,\n\t\tCreatedAt: spec.CreatedAt,\n\t\tUpdatedAt: spec.UpdatedAt,\n\t}\n}", "func New() plugin.Plugin {\n\treturn &cronPlugin{}\n}", "func (j *ScheduledJob) Create(ctx context.Context, ex sqlutil.InternalExecutor, txn *kv.Txn) error {\n\tif j.rec.ScheduleID != 0 {\n\t\treturn errors.New(\"cannot specify schedule id when creating new cron job\")\n\t}\n\n\tif !j.isDirty() {\n\t\treturn errors.New(\"no settings specified for scheduled job\")\n\t}\n\n\tcols, qargs, err := j.marshalChanges()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\trow, retCols, err := ex.QueryRowExWithCols(ctx, \"sched-create\", txn,\n\t\tsessiondata.InternalExecutorOverride{User: security.RootUserName()},\n\t\tfmt.Sprintf(\"INSERT INTO %s (%s) VALUES(%s) RETURNING schedule_id\",\n\t\t\tj.env.ScheduledJobsTableName(), strings.Join(cols, \",\"), generatePlaceholders(len(qargs))),\n\t\tqargs...,\n\t)\n\n\tif err != nil {\n\t\treturn errors.Wrapf(err, \"failed to create new schedule\")\n\t}\n\tif row == nil {\n\t\treturn errors.New(\"failed to create new schedule\")\n\t}\n\n\treturn j.InitFromDatums(row, retCols)\n}", "func (r *BatchV1Beta1CronJobRule) createRule(cronjob *batchV1beta1.CronJob, ydr *YamlDerivedResource) *rule {\n\trule := &rule{\n\t\tID: r.ID,\n\t\tPrereqs: r.Prereqs,\n\t\tCondition: func() bool {\n\t\t\tif r.Condition == nil {\n\t\t\t\treturn true\n\t\t\t}\n\t\t\treturn r.Condition(cronjob)\n\t\t},\n\t\tMessage: r.Message,\n\t\tLevel: r.Level,\n\t\tResources: []*YamlDerivedResource{ydr},\n\t\tFix: func() bool {\n\t\t\tif r.Fix == nil {\n\t\t\t\treturn false\n\t\t\t}\n\t\t\treturn r.Fix(cronjob)\n\t\t},\n\t\tFixDescription: func() string {\n\t\t\tif r.FixDescription == nil {\n\t\t\t\treturn \"\"\n\t\t\t}\n\t\t\treturn r.FixDescription(cronjob)\n\t\t},\n\t}\n\treturn rule\n}", "func New(apiSchedule guestApi.Schedule) (schedule HumanReadableSchedule){\n\n for _, period := range apiSchedule.Periods {\n\n // specify format for parsing time strings\n form := \"15:04:00\"\n\n startDay := period.Open.Day\n startTime, _ := time.Parse(form, period.Open.Time)\n\n endDay := period.Close.Day\n endTime, _ := time.Parse(form, period.Close.Time)\n\n // Determine if the period spans multiple days\n // Handle wrap arround from sat to sunday\n var daySpan int\n if (endDay >= startDay){\n daySpan = (endDay - startDay)\n } else {\n daySpan = (7 - startDay + endDay)\n }\n\n // Determine if we need to split this period into multiple daily hours\n // We allow the end times to go past midnight for a given day (i.e M:10pm - 1am)\n // but if we go past 4am we will consider that the start of a new day\n startOfNewDay, _ := time.Parse(form, \"04:00:00\")\n\n // split periods if necesary until we are no longer spaning multiple days\n var openHours OpenHours\n for daySpan >=0 {\n if ( daySpan < 1 || (daySpan == 1 && endTime.Before(startOfNewDay))){\n // this is the normal case where the start and end times are on the same day\n // or the end time is \"late night\" hours on the next day\n openHours = OpenHours{startTime, endTime}\n daySpan = 0\n } else {\n // since the period spans past the start of a new day we will split it up\n // and carry the remander of the period over to a new day\n openHours = OpenHours{startTime, startOfNewDay}\n }\n\n schedule[startDay] = append(schedule[startDay], openHours)\n\n startTime = startOfNewDay\n startDay = (startDay + 1) % 7 // wrap sat to sun\n daySpan -= 1\n }\n }\n\n // sort the open hours for each day by start time\n for _, dailyOpenHours := range schedule {\n sort.Sort(dailyOpenHours)\n }\n\n return\n}", "func NewScheduler(cronLabelName string, metadataClient *metadata.Client, cattleClient *cattle.Client) (*Scheduler, error) {\n\tschedules := make(model.Schedules)\n\n\treturn &Scheduler{\n\t\tCattleClient: cattleClient,\n\t\tMetadataClient: metadataClient,\n\t\tCronLabelName: cronLabelName,\n\t\tSchedules: &schedules,\n\t}, nil\n}", "func New(s Servicer, b telegram.TBWrapBot, r *reminder.Reminder) func() {\n\treturn func() {\n\t\tbuttons := NewButtons()\n\t\tvar inlineKeys [][]telebot.InlineButton\n\t\tvar inlineButtons []telebot.InlineButton\n\n\t\tsnoozeBtn := *buttons[SnoozeBtn]\n\t\tsnoozeBtn.Data = strconv.Itoa(r.ID)\n\t\tinlineButtons = append(\n\t\t\tinlineButtons,\n\t\t\tsnoozeBtn,\n\t\t)\n\n\t\t// if repeatable job add button to complete it\n\t\tif !r.Job.RunOnlyOnce || (r.Job.RunOnlyOnce && r.Job.RepeatSchedule != nil) {\n\t\t\tcompleteBtn := *buttons[CompleteBtn]\n\t\t\tcompleteBtn.Data = strconv.Itoa(r.ID)\n\t\t\tinlineButtons = append(inlineButtons, completeBtn)\n\t\t}\n\t\tinlineKeys = append(inlineKeys, inlineButtons)\n\n\t\tmessageWithIcon := fmt.Sprintf(\"🗓 %s\", r.Data.Message)\n\t\t_, err := b.Send(&tb.Chat{ID: int64(r.Data.RecipientID)}, messageWithIcon, &telebot.ReplyMarkup{\n\t\t\tInlineKeyboard: inlineKeys,\n\t\t})\n\t\tif err != nil {\n\t\t\tlog.Printf(\"NewReminderCronFunc err: %q\", err)\n\t\t\treturn\n\t\t}\n\n\t\tif !r.Job.RunOnlyOnce {\n\t\t\treturn\n\t\t}\n\n\t\tif r.Job.RepeatSchedule != nil {\n\t\t\tupdateErr := s.UpdateReminderWithRepeatSchedule(r)\n\t\t\tif updateErr != nil {\n\t\t\t\tlog.Printf(\"NewReminderCronFunc UpdateReminderWithRepeatSchedule err: %q\", updateErr)\n\t\t\t\treturn\n\t\t\t}\n\t\t\treturn\n\t\t}\n\n\t\terr = s.Complete(r)\n\t\tif err != nil {\n\t\t\tlog.Printf(\"NewReminderCronFunc complete err: %q\", err)\n\t\t\treturn\n\t\t}\n\t}\n}", "func newScheduledJobs(c *BatchClient, namespace string) *scheduledJobs {\n\treturn &scheduledJobs{c, namespace}\n}", "func newDeployment(t *testing.T, procUpdates func(ProcessUpdate), kubeClient kubernetes.Interface) *Deployment {\n\tcompList, err := config.NewComponentList(\"../test/data/componentlist.yaml\")\n\tassert.NoError(t, err)\n\tconfig := &config.Config{\n\t\tCancelTimeout: cancelTimeout,\n\t\tQuitTimeout: quitTimeout,\n\t\tBackoffInitialIntervalSeconds: 1,\n\t\tBackoffMaxElapsedTimeSeconds: 1,\n\t\tLog: logger.NewLogger(true),\n\t\tComponentList: compList,\n\t}\n\tcore := newCore(config, &overrides.Builder{}, kubeClient, procUpdates)\n\treturn &Deployment{core}\n}", "func Make(t *v2alpha2.TaskSpec) (core.Task, error) {\n\tif *t.Task != TaskName {\n\t\treturn nil, fmt.Errorf(\"task need to be '%s'\", TaskName)\n\t}\n\tvar jsonBytes []byte\n\tvar task Task\n\t// convert t to jsonBytes\n\tjsonBytes, err := json.Marshal(t)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\t// convert jsonString to ExecTask\n\ttask = Task{}\n\terr = json.Unmarshal(jsonBytes, &task)\n\treturn &task, err\n}", "func (*CronTrigger) Descriptor() ([]byte, []int) {\n\treturn file_toit_model_job_proto_rawDescGZIP(), []int{13}\n}", "func CreateCronExpression(interval int) string {\n\tunit := \"minutes\"\n\tif interval == 1 {\n\t\tunit = \"minute\"\n\t}\n\treturn fmt.Sprintf(\"rate(%d %s)\", interval, unit)\n}", "func newPodForCR(cr *cnatv1alpha1.At) *corev1.Pod {\n\tlabels := map[string]string{\n\t\t\"app\": cr.Name,\n\t}\n\treturn &corev1.Pod{\n\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\tName: cr.Name + \"-pod\",\n\t\t\tNamespace: cr.Namespace,\n\t\t\tLabels: labels,\n\t\t},\n\t\tSpec: corev1.PodSpec{\n\t\t\tContainers: []corev1.Container{\n\t\t\t\t{\n\t\t\t\t\tName: \"busybox\",\n\t\t\t\t\tImage: \"busybox\",\n\t\t\t\t\tCommand: strings.Split(cr.Spec.Command, \" \"),\n\t\t\t\t},\n\t\t\t},\n\t\t\tRestartPolicy: corev1.RestartPolicyOnFailure,\n\t\t},\n\t}\n}", "func makePodSpecPatch(\n\tcontainer *pipelinespec.PipelineDeploymentConfig_PipelineContainerSpec,\n\tcomponentSpec *pipelinespec.ComponentSpec,\n\texecutorInput *pipelinespec.ExecutorInput,\n\texecutionID int64,\n\tpipelineName string,\n\trunID string,\n) (string, error) {\n\texecutorInputJSON, err := protojson.Marshal(executorInput)\n\tif err != nil {\n\t\treturn \"\", fmt.Errorf(\"failed to make podSpecPatch: %w\", err)\n\t}\n\tcomponentJSON, err := protojson.Marshal(componentSpec)\n\tif err != nil {\n\t\treturn \"\", fmt.Errorf(\"failed to make podSpecPatch: %w\", err)\n\t}\n\n\tuserCmdArgs := make([]string, 0, len(container.Command)+len(container.Args))\n\tuserCmdArgs = append(userCmdArgs, container.Command...)\n\tuserCmdArgs = append(userCmdArgs, container.Args...)\n\tlauncherCmd := []string{\n\t\t// TODO(Bobgy): workaround argo emissary executor bug, after we upgrade to an argo version with the bug fix, we can remove the following line.\n\t\t// Reference: https://github.com/argoproj/argo-workflows/issues/7406\n\t\t\"/var/run/argo/argoexec\", \"emissary\", \"--\",\n\t\tcomponent.KFPLauncherPath,\n\t\t// TODO(Bobgy): no need to pass pipeline_name and run_id, these info can be fetched via pipeline context and pipeline run context which have been created by root DAG driver.\n\t\t\"--pipeline_name\", pipelineName,\n\t\t\"--run_id\", runID,\n\t\t\"--execution_id\", fmt.Sprintf(\"%v\", executionID),\n\t\t\"--executor_input\", string(executorInputJSON),\n\t\t\"--component_spec\", string(componentJSON),\n\t\t\"--pod_name\",\n\t\tfmt.Sprintf(\"$(%s)\", component.EnvPodName),\n\t\t\"--pod_uid\",\n\t\tfmt.Sprintf(\"$(%s)\", component.EnvPodUID),\n\t\t\"--mlmd_server_address\",\n\t\tfmt.Sprintf(\"$(%s)\", component.EnvMetadataHost),\n\t\t\"--mlmd_server_port\",\n\t\tfmt.Sprintf(\"$(%s)\", component.EnvMetadataPort),\n\t\t\"--\", // separater before user command and args\n\t}\n\tres := k8score.ResourceRequirements{\n\t\tLimits: map[k8score.ResourceName]k8sres.Quantity{},\n\t}\n\tmemoryLimit := container.GetResources().GetMemoryLimit()\n\tif memoryLimit != 0 {\n\t\tq, err := k8sres.ParseQuantity(fmt.Sprintf(\"%vG\", memoryLimit))\n\t\tif err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\t\tres.Limits[k8score.ResourceMemory] = q\n\t}\n\tcpuLimit := container.GetResources().GetCpuLimit()\n\tif cpuLimit != 0 {\n\t\tq, err := k8sres.ParseQuantity(fmt.Sprintf(\"%v\", cpuLimit))\n\t\tif err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\t\tres.Limits[k8score.ResourceCPU] = q\n\t}\n\taccelerator := container.GetResources().GetAccelerator()\n\tif accelerator != nil {\n\t\treturn \"\", fmt.Errorf(\"accelerator resources are not supported yet: https://github.com/kubeflow/pipelines/issues/7043\")\n\t}\n\tpodSpec := &k8score.PodSpec{\n\t\tContainers: []k8score.Container{{\n\t\t\tName: \"main\", // argo task user container is always called \"main\"\n\t\t\tCommand: launcherCmd,\n\t\t\tArgs: userCmdArgs,\n\t\t\tImage: container.Image,\n\t\t\tResources: res,\n\t\t}},\n\t}\n\tpodSpecPatchBytes, err := json.Marshal(podSpec)\n\tif err != nil {\n\t\treturn \"\", fmt.Errorf(\"JSON marshaling pod spec patch: %w\", err)\n\t}\n\treturn string(podSpecPatchBytes), nil\n}", "func testSchedule() *library.Schedule {\n\treturn &library.Schedule{\n\t\tID: new(int64),\n\t\tRepoID: new(int64),\n\t\tActive: new(bool),\n\t\tName: new(string),\n\t\tEntry: new(string),\n\t\tCreatedAt: new(int64),\n\t\tCreatedBy: new(string),\n\t\tUpdatedAt: new(int64),\n\t\tUpdatedBy: new(string),\n\t\tScheduledAt: new(int64),\n\t\tBranch: new(string),\n\t}\n}", "func newExecPodSpec(ns, generateName string) *api.Pod {\n\timmediate := int64(0)\n\tpod := &api.Pod{\n\t\tObjectMeta: api.ObjectMeta{\n\t\t\tGenerateName: generateName,\n\t\t\tNamespace: ns,\n\t\t},\n\t\tSpec: api.PodSpec{\n\t\t\tTerminationGracePeriodSeconds: &immediate,\n\t\t\tContainers: []api.Container{\n\t\t\t\t{\n\t\t\t\t\tName: \"exec\",\n\t\t\t\t\tImage: \"gcr.io/google_containers/busybox:1.24\",\n\t\t\t\t\tCommand: []string{\"sh\", \"-c\", \"while true; do sleep 5; done\"},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n\treturn pod\n}", "func newTaskBuilder(b *jobBuilder, name string) *taskBuilder {\n\tparts, err := b.jobNameSchema.ParseJobName(name)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\treturn &taskBuilder{\n\t\tjobBuilder: b,\n\t\tparts: parts,\n\t\tName: name,\n\t\tSpec: &specs.TaskSpec{},\n\t\trecipeProperties: map[string]string{},\n\t}\n}", "func CreatePlanSpec(spec *PlanSpec) *plan.Spec {\n\treturn createPlanSpec(spec.Nodes, spec.Edges, spec.Resources, spec.Now)\n}", "func NewSpec(yamlConfig string) (*Spec, error) {\n\ts := &Spec{\n\t\tyamlConfig: yamlConfig,\n\t}\n\n\tmeta := &MetaSpec{}\n\terr := yaml.Unmarshal([]byte(yamlConfig), meta)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"unmarshal failed: %v\", err)\n\t}\n\tvr := v.Validate(meta, []byte(yamlConfig))\n\tif !vr.Valid() {\n\t\treturn nil, fmt.Errorf(\"validate metadata failed: \\n%s\", vr)\n\t}\n\n\trootObject, exists := objectRegistry[meta.Kind]\n\tif !exists {\n\t\treturn nil, fmt.Errorf(\"kind %s not found\", meta.Kind)\n\t}\n\n\ts.meta, s.objectSpec = meta, rootObject.DefaultSpec()\n\n\terr = yaml.Unmarshal([]byte(yamlConfig), s.objectSpec)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"unmarshal failed: %v\", err)\n\t}\n\tvr = v.Validate(s.objectSpec, []byte(yamlConfig))\n\tif !vr.Valid() {\n\t\treturn nil, fmt.Errorf(\"validate spec failed: \\n%s\", vr)\n\t}\n\n\treturn s, nil\n}", "func NewRepairCronJob(cc *v1alpha1.CassandraCluster) *RepairCronJob {\n\treturn &RepairCronJob{\n\t\tcluster: cc,\n\t}\n}", "func createPatch(patches []k8sutils.JSONPatchOp, schedPath string) ([]byte, error) {\n\tallPatches := append(patches, k8sutils.JSONPatchOp{\n\t\tOperation: \"replace\",\n\t\tPath: schedPath,\n\t\tValue: []byte(strconv.Quote(storkScheduler)),\n\t})\n\tpatchBytes, err := json.Marshal(&allPatches)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to marshal the patch object: %w\", err)\n\t}\n\treturn patchBytes, nil\n}", "func (c *FakeTZCronJobs) Create(tZCronJob *v1alpha1.TZCronJob) (result *v1alpha1.TZCronJob, err error) {\n\tobj, err := c.Fake.\n\t\tInvokes(testing.NewCreateAction(tzcronjobsResource, c.ns, tZCronJob), &v1alpha1.TZCronJob{})\n\n\tif obj == nil {\n\t\treturn nil, err\n\t}\n\treturn obj.(*v1alpha1.TZCronJob), err\n}", "func CreateCronTask(ctx context.Context, executorID task.TaskCode, taskService taskservice.TaskService) error {\n\tvar err error\n\tctx, span := trace.Start(ctx, \"MetricCreateCronTask\")\n\tdefer span.End()\n\tlogger := runtime.ProcessLevelRuntime().Logger().WithContext(ctx).Named(LoggerName)\n\tlogger.Debug(fmt.Sprintf(\"init metric task with CronExpr: %s\", StorageUsageTaskCronExpr))\n\tif err = taskService.CreateCronTask(ctx, TaskMetadata(StorageUsageCronTask, executorID), StorageUsageTaskCronExpr); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}", "func InitializeCronEngine() *cron.Cron {\n\tc := cron.New()\n\tc.Start()\n\treturn c\n}", "func Fabricate() *InAppCron {\n\tctx, cancelFunc := context.WithCancel(context.Background())\n\treturn &InAppCron{\n\t\tcronList: []provider.InAppCronAdapter{},\n\t\tctx: ctx,\n\t\tcancelFunc: cancelFunc,\n\t\twg: &sync.WaitGroup{},\n\t}\n}", "func makeJob(c context.Context, j *engine.Job) *schedulerJob {\n\ttraits, err := presentation.GetJobTraits(c, config(c).Catalog, j)\n\tif err != nil {\n\t\tlogging.WithError(err).Warningf(c, \"Failed to get task traits for %s\", j.JobID)\n\t}\n\n\tnow := clock.Now(c).UTC()\n\tnextRun := \"\"\n\tswitch ts := j.State.TickTime; {\n\tcase ts == schedule.DistantFuture:\n\t\tnextRun = \"-\"\n\tcase !ts.IsZero():\n\t\tnextRun = humanize.RelTime(ts, now, \"ago\", \"from now\")\n\tdefault:\n\t\tnextRun = \"not scheduled yet\"\n\t}\n\n\t// Internal state names aren't very user friendly. Introduce some aliases.\n\tstate := presentation.GetPublicStateKind(j, traits)\n\tlabelClass := stateToLabelClass[state]\n\tif j.State.State == engine.JobStateSlowQueue {\n\t\t// Job invocation is still in the task queue, but new invocation should be\n\t\t// starting now (so the queue is lagging for some reason).\n\t\tlabelClass = \"label-warning\"\n\t}\n\t// Put triggers after regular jobs.\n\tsortGroup := \"A\"\n\tif j.Flavor == catalog.JobFlavorTrigger {\n\t\tsortGroup = \"B\"\n\t}\n\n\treturn &schedulerJob{\n\t\tProjectID: j.ProjectID,\n\t\tJobName: j.GetJobName(),\n\t\tSchedule: j.Schedule,\n\t\tDefinition: taskToText(j.Task),\n\t\tRevision: j.Revision,\n\t\tRevisionURL: j.RevisionURL,\n\t\tState: string(state),\n\t\tOverruns: j.State.Overruns,\n\t\tNextRun: nextRun,\n\t\tPaused: j.Paused,\n\t\tLabelClass: labelClass,\n\t\tJobFlavorIcon: flavorToIconClass[j.Flavor],\n\t\tJobFlavorTitle: flavorToTitle[j.Flavor],\n\n\t\tsortGroup: sortGroup,\n\t\tnow: now,\n\t\ttraits: traits,\n\t}\n}", "func Equal(current, desired *batchv1.CronJob) bool {\n\treturn equality.Semantic.DeepEqual(current, desired)\n}", "func CreateJobFromCronjob(cronJob *batchv1beta1.CronJob) (*batchv1.Job, error) {\n\t// This duplicates the logic used by kubectl to create a Job from a CronJob\n\tannotations := make(map[string]string)\n\tannotations[\"cronjob.kubernetes.io/instantiate\"] = \"manual\"\n\tfor k, v := range cronJob.Spec.JobTemplate.Annotations {\n\t\tannotations[k] = v\n\t}\n\n\tjobDef := &batchv1.Job{\n\t\tTypeMeta: metav1.TypeMeta{APIVersion: batchv1.SchemeGroupVersion.String(), Kind: \"Job\"},\n\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\tName: fmt.Sprintf(\"%s-%d\", cronJob.ObjectMeta.Name, time.Now().Unix()),\n\t\t\tAnnotations: annotations,\n\t\t\tLabels: cronJob.Spec.JobTemplate.Labels,\n\t\t\tOwnerReferences: []metav1.OwnerReference{\n\t\t\t\t*metav1.NewControllerRef(cronJob, appsv1.SchemeGroupVersion.WithKind(\"CronJob\")),\n\t\t\t},\n\t\t},\n\t\tSpec: cronJob.Spec.JobTemplate.Spec,\n\t}\n\n\tif job, err := client.BatchV1().Jobs(cronJob.ObjectMeta.Namespace).Create(jobDef); err != nil {\n\t\treturn nil, err\n\t} else {\n\t\treturn job, nil\n\t}\n}", "func Make(t *v2alpha2.TaskSpec) (core.Task, error) {\n\tif *t.Task != TaskName {\n\t\treturn nil, fmt.Errorf(\"library and task need to be '%s'\", TaskName)\n\t}\n\tvar jsonBytes []byte\n\tvar task Task\n\t// convert t to jsonBytes\n\tjsonBytes, err := json.Marshal(t)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\t// convert jsonString to task\n\ttask = Task{}\n\terr = json.Unmarshal(jsonBytes, &task)\n\treturn &task, err\n}", "func newScheduler(config infra.Config, logger *log.Logger) (*sched.Scheduler, error) {\n\tvar (\n\t\terr error\n\t\ttdb taskdb.TaskDB\n\t\trepo reflow.Repository\n\t\tlimit int\n\t)\n\tif err = config.Instance(&tdb); err != nil {\n\t\tif !strings.HasPrefix(err.Error(), \"no providers for type taskdb.TaskDB\") {\n\t\t\treturn nil, err\n\t\t}\n\t\tlogger.Debug(err)\n\t}\n\tif err = config.Instance(&repo); err != nil {\n\t\treturn nil, err\n\t}\n\tif limit, err = transferLimit(config); err != nil {\n\t\treturn nil, err\n\t}\n\ttransferer := &repository.Manager{\n\t\tStatus: nil,\n\t\tPendingTransfers: repository.NewLimits(limit),\n\t\tStat: repository.NewLimits(statLimit),\n\t\tLog: logger,\n\t}\n\tif repo != nil {\n\t\ttransferer.PendingTransfers.Set(repo.URL().String(), int(^uint(0)>>1))\n\t}\n\tscheduler := sched.New()\n\n\tscheduler.Transferer = transferer\n\tscheduler.Log = logger.Tee(nil, \"scheduler: \")\n\tscheduler.TaskDB = tdb\n\tscheduler.ExportStats()\n\n\treturn scheduler, nil\n}", "func newPodForCR(cr *devopsv1.Server) *corev1.Pod {\n\tlabels := map[string]string{\n\t\t\"app\": cr.Name,\n\t}\n\treturn &corev1.Pod{\n\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\tName: cr.Name + \"-pod\",\n\t\t\tNamespace: cr.Namespace,\n\t\t\tLabels: labels,\n\t\t},\n\t\tSpec: corev1.PodSpec{\n\t\t\tContainers: []corev1.Container{\n\t\t\t\t{\n\t\t\t\t\tName: \"busybox\",\n\t\t\t\t\tImage: \"busybox\",\n\t\t\t\t\tCommand: []string{\"sleep\", \"3600\"},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n}", "func newDeploymentForCR(cr *tpokkiv1alpha1.GatlingTask, cm *corev1.ConfigMap) *appsv1.Deployment {\n\tlabels := map[string]string{\n\t\t\"app\": \"gatling\",\n\t\t\"gatling_cr\": cr.Name,\n\t}\n\n\tvolumeName := \"configmap-simulations\"\n\t// location must be /input, see https://github.com/tpokki/gatling-image\n\tvolumePath := \"/input\"\n\n\treturn &appsv1.Deployment{\n\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\tName: cr.Name,\n\t\t\tNamespace: cr.Namespace,\n\t\t},\n\t\tSpec: appsv1.DeploymentSpec{\n\t\t\tReplicas: &cr.Spec.Replicas,\n\t\t\tSelector: &metav1.LabelSelector{\n\t\t\t\tMatchLabels: labels,\n\t\t\t},\n\t\t\tTemplate: corev1.PodTemplateSpec{\n\t\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\t\tLabels: labels,\n\t\t\t\t\tAnnotations: map[string]string{\n\t\t\t\t\t\t\"prometheus.io/scrape\": \"true\",\n\t\t\t\t\t\t\"prometheus.io/port\": \"9102\",\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tSpec: corev1.PodSpec{\n\t\t\t\t\tVolumes: []corev1.Volume{\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tName: volumeName,\n\t\t\t\t\t\t\tVolumeSource: corev1.VolumeSource{\n\t\t\t\t\t\t\t\tConfigMap: &corev1.ConfigMapVolumeSource{\n\t\t\t\t\t\t\t\t\tLocalObjectReference: corev1.LocalObjectReference{\n\t\t\t\t\t\t\t\t\t\tName: cm.ObjectMeta.Name,\n\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\tRestartPolicy: cr.Spec.RestartPolicy,\n\t\t\t\t\tContainers: []corev1.Container{\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tName: \"gatling\",\n\t\t\t\t\t\t\tImage: \"quay.io/tpokki/gatling:0.0.1-3.3.1-prometheus\",\n\t\t\t\t\t\t\tArgs: []string{\"-nr\", \"-s\", cr.Spec.ScenarioSpec.Name},\n\t\t\t\t\t\t\tResources: cr.Spec.ResourceRequirements,\n\t\t\t\t\t\t\tVolumeMounts: []corev1.VolumeMount{\n\t\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t\tName: volumeName,\n\t\t\t\t\t\t\t\t\tMountPath: volumePath,\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n}", "func New(executor *mesos.ExecutorInfo, scheduleFunc PodScheduleFunc, client *client.Client, helper tools.EtcdHelper, sr service.Registry) *KubernetesScheduler {\n\treturn &KubernetesScheduler{\n\t\tnew(sync.RWMutex),\n\t\thelper,\n\t\texecutor,\n\t\tnil,\n\t\tnil,\n\t\tnil,\n\t\tfalse,\n\t\tmake(map[string]*mesos.Offer),\n\t\tmake(map[string]*Slave),\n\t\tmake(map[string]string),\n\t\tmake(map[string]*PodTask),\n\t\tmake(map[string]*PodTask),\n\t\tring.New(defaultFinishedTasksSize),\n\t\tmake(map[string]string),\n\t\tscheduleFunc,\n\t\tclient,\n\t\tcache.NewFIFO(),\n\t\tsr,\n\t}\n}", "func NewStableScheduling(kubeCli kubernetes.Interface, cli versioned.Interface) Predicate {\n\tp := &stableScheduling{\n\t\tkubeCli: kubeCli,\n\t\tcli: cli,\n\t}\n\treturn p\n}", "func newBatchSpecExecutionResetter(s *store.Store, observationContext *observation.Context, metrics batchChangesMetrics) *dbworker.Resetter {\n\tworkerStore := NewExecutorStore(s, observationContext)\n\n\toptions := dbworker.ResetterOptions{\n\t\tName: \"batch_spec_executor_resetter\",\n\t\tInterval: 1 * time.Minute,\n\t\tMetrics: metrics.executionResetterMetrics,\n\t}\n\n\tresetter := dbworker.NewResetter(workerStore, options)\n\treturn resetter\n}", "func NewForTest(t *testing.T) (context.Context, *pgxpool.Pool, *testutils.GitBuilder, []string, provider.Provider, *config.InstanceConfig) {\n\tctx := cipd_git.UseGitFinder(context.Background())\n\tctx, cancel := context.WithCancel(ctx)\n\n\t// Create a git repo for testing purposes.\n\tgb := testutils.GitInit(t, ctx)\n\thashes := []string{}\n\thashes = append(hashes, gb.CommitGenAt(ctx, \"foo.txt\", StartTime))\n\thashes = append(hashes, gb.CommitGenAt(ctx, \"foo.txt\", StartTime.Add(time.Minute)))\n\thashes = append(hashes, gb.CommitGenAt(ctx, \"foo.txt\", StartTime.Add(2*time.Minute)))\n\thashes = append(hashes, gb.CommitGenAt(ctx, \"bar.txt\", StartTime.Add(3*time.Minute)))\n\thashes = append(hashes, gb.CommitGenAt(ctx, \"foo.txt\", StartTime.Add(4*time.Minute)))\n\thashes = append(hashes, gb.CommitGenAt(ctx, \"foo.txt\", StartTime.Add(5*time.Minute)))\n\thashes = append(hashes, gb.CommitGenAt(ctx, \"bar.txt\", StartTime.Add(6*time.Minute)))\n\thashes = append(hashes, gb.CommitGenAt(ctx, \"foo.txt\", StartTime.Add(7*time.Minute)))\n\n\thashes = append(hashes, gb.CommitGenAt(ctx, \"foo.txt\", StartTime.Add(8*time.Minute)))\n\thashes = append(hashes, gb.CommitGenAt(ctx, \"foo.txt\", StartTime.Add(9*time.Minute)))\n\thashes = append(hashes, gb.CommitGenAt(ctx, \"foo.txt\", StartTime.Add(10*time.Minute)))\n\thashes = append(hashes, gb.CommitGenAt(ctx, \"foo.txt\", StartTime.Add(11*time.Minute)))\n\thashes = append(hashes, gb.CommitGenAt(ctx, \"foo.txt\", StartTime.Add(12*time.Minute)))\n\thashes = append(hashes, gb.CommitGenAt(ctx, \"foo.txt\", StartTime.Add(13*time.Minute)))\n\thashes = append(hashes, gb.CommitGenAt(ctx, \"foo.txt\", StartTime.Add(14*time.Minute)))\n\thashes = append(hashes, gb.CommitGenAt(ctx, \"foo.txt\", StartTime.Add(15*time.Minute)))\n\n\thashes = append(hashes, gb.CommitGenAt(ctx, \"foo.txt\", StartTime.Add(16*time.Minute)))\n\thashes = append(hashes, gb.CommitGenAt(ctx, \"foo.txt\", StartTime.Add(17*time.Minute)))\n\thashes = append(hashes, gb.CommitGenAt(ctx, \"foo.txt\", StartTime.Add(18*time.Minute)))\n\thashes = append(hashes, gb.CommitGenAt(ctx, \"foo.txt\", StartTime.Add(19*time.Minute)))\n\thashes = append(hashes, gb.CommitGenAt(ctx, \"foo.txt\", StartTime.Add(20*time.Minute)))\n\thashes = append(hashes, gb.CommitGenAt(ctx, \"foo.txt\", StartTime.Add(21*time.Minute)))\n\thashes = append(hashes, gb.CommitGenAt(ctx, \"foo.txt\", StartTime.Add(22*time.Minute)))\n\thashes = append(hashes, gb.CommitGenAt(ctx, \"foo.txt\", StartTime.Add(23*time.Minute)))\n\n\t// Init our sql database.\n\tdb := sqltest.NewCockroachDBForTests(t, \"dbgit\")\n\n\t// Get tmp dir to use for repo checkout.\n\ttmpDir, err := ioutil.TempDir(\"\", \"git\")\n\trequire.NoError(t, err)\n\n\t// Create the cleanup function.\n\tt.Cleanup(func() {\n\t\tcancel()\n\t\terr = os.RemoveAll(tmpDir)\n\t\tassert.NoError(t, err)\n\t\tgb.Cleanup()\n\t})\n\n\tinstanceConfig := &config.InstanceConfig{\n\t\tGitRepoConfig: config.GitRepoConfig{\n\t\t\tURL: gb.Dir(),\n\t\t\tDir: filepath.Join(tmpDir, \"checkout\"),\n\t\t},\n\t}\n\tgp, err := git_checkout.New(ctx, instanceConfig)\n\trequire.NoError(t, err)\n\treturn ctx, db, gb, hashes, gp, instanceConfig\n}", "func (c *FakeTZCronJobs) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.TZCronJob, err error) {\n\tobj, err := c.Fake.\n\t\tInvokes(testing.NewPatchSubresourceAction(tzcronjobsResource, c.ns, name, pt, data, subresources...), &v1alpha1.TZCronJob{})\n\n\tif obj == nil {\n\t\treturn nil, err\n\t}\n\treturn obj.(*v1alpha1.TZCronJob), err\n}" ]
[ "0.74659383", "0.6873497", "0.6515948", "0.64865166", "0.64812636", "0.6299038", "0.62634075", "0.62484765", "0.60576093", "0.5970894", "0.59279096", "0.5879235", "0.58560973", "0.57060903", "0.56286716", "0.5588584", "0.55848086", "0.557354", "0.5539054", "0.55289936", "0.5469673", "0.5434201", "0.5419338", "0.5414887", "0.5413893", "0.54083306", "0.53968894", "0.5376731", "0.53531736", "0.53328025", "0.5332086", "0.5331331", "0.5329141", "0.53289694", "0.53056407", "0.5299275", "0.5281698", "0.5267399", "0.52539843", "0.524159", "0.52378047", "0.52332574", "0.52303743", "0.52049136", "0.5202128", "0.5191517", "0.5160455", "0.51468104", "0.5134734", "0.512492", "0.5123512", "0.5123282", "0.5081838", "0.50481004", "0.50430995", "0.50180244", "0.50153846", "0.5007241", "0.49952987", "0.49814224", "0.4967694", "0.4956651", "0.49556556", "0.4945683", "0.49270326", "0.49177584", "0.49147636", "0.49079958", "0.4903076", "0.4884675", "0.48834944", "0.48761004", "0.48675334", "0.48579696", "0.4831237", "0.4826623", "0.4821324", "0.48176172", "0.47973087", "0.47880724", "0.47871295", "0.47713703", "0.47606194", "0.4760487", "0.47449833", "0.47292814", "0.47247574", "0.47143298", "0.47136325", "0.47086516", "0.46872997", "0.4679424", "0.46768615", "0.46763888", "0.4669908", "0.4669156", "0.4666709", "0.46538162", "0.46529752", "0.46525213" ]
0.81789845
0
NewBlockhashStoreSpec creates a new BlockhashStoreSpec for the given parameters.
func NewBlockhashStoreSpec(spec *job.BlockhashStoreSpec) *BlockhashStoreSpec { return &BlockhashStoreSpec{ CoordinatorV1Address: spec.CoordinatorV1Address, CoordinatorV2Address: spec.CoordinatorV2Address, CoordinatorV2PlusAddress: spec.CoordinatorV2PlusAddress, WaitBlocks: spec.WaitBlocks, LookbackBlocks: spec.LookbackBlocks, BlockhashStoreAddress: spec.BlockhashStoreAddress, TrustedBlockhashStoreAddress: spec.TrustedBlockhashStoreAddress, TrustedBlockhashStoreBatchSize: spec.TrustedBlockhashStoreBatchSize, PollPeriod: spec.PollPeriod, RunTimeout: spec.RunTimeout, EVMChainID: spec.EVMChainID, FromAddresses: spec.FromAddresses, } }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func newMockBlockHeaderStore() *mockBlockHeaderStore {\n\treturn &mockBlockHeaderStore{\n\t\theaders: make(map[chainhash.Hash]wire.BlockHeader),\n\t\theights: make(map[uint32]wire.BlockHeader),\n\t}\n}", "func newChainStore(r repo.Repo, genTS *types.TipSet) *CborBlockStore {\n\ttempBlock := r.Datastore()\n\tcborStore := cbor.NewCborStore(tempBlock)\n\treturn &CborBlockStore{\n\t\tStore: chain.NewStore(r.ChainDatastore(), tempBlock, genTS.At(0).Cid(), chain.NewMockCirculatingSupplyCalculator()),\n\t\tcborStore: cborStore,\n\t}\n}", "func (in *SecretStoreSpec) DeepCopy() *SecretStoreSpec {\n\tif in == nil {\n\t\treturn nil\n\t}\n\tout := new(SecretStoreSpec)\n\tin.DeepCopyInto(out)\n\treturn out\n}", "func newBlockStorage(c *Config, b *Block) *blockStorage {\n\treturn &blockStorage{\n\t\tc: c,\n\t\tblock: b,\n\t\tsigs: make(map[int][]byte),\n\t\tpub: share.NewPubPoly(G2, G2.Point().Base(), c.Public),\n\t}\n}", "func newBlock(prevHash [32]byte, prevHashWithoutTx [32]byte, commitmentProof [crypto.COMM_PROOF_LENGTH]byte, height uint32) *protocol.Block {\n\tblock := new(protocol.Block)\n\tblock.PrevHash = prevHash\n\tblock.PrevHashWithoutTx = prevHashWithoutTx\n\tblock.CommitmentProof = commitmentProof\n\tblock.Height = height\n\tblock.StateCopy = make(map[[32]byte]*protocol.Account)\n\tblock.Aggregated = false\n\n\treturn block\n}", "func newBlockstoreManager(bs bstore.Blockstore, workerCount int) *blockstoreManager {\n\treturn &blockstoreManager{\n\t\tbs: bs,\n\t\tworkerCount: workerCount,\n\t\tjobs: make(chan func()),\n\t}\n}", "func NewMockStore(blocksWritten map[ipld.Link][]byte) (ipldbridge.Loader, ipldbridge.Storer) {\n\tvar storeLk sync.RWMutex\n\tstorer := func(lnkCtx ipldbridge.LinkContext) (io.Writer, ipldbridge.StoreCommitter, error) {\n\t\tvar buffer bytes.Buffer\n\t\tcommitter := func(lnk ipld.Link) error {\n\t\t\tstoreLk.Lock()\n\t\t\tblocksWritten[lnk] = buffer.Bytes()\n\t\t\tstoreLk.Unlock()\n\t\t\treturn nil\n\t\t}\n\t\treturn &buffer, committer, nil\n\t}\n\tloader := func(lnk ipld.Link, lnkCtx ipldbridge.LinkContext) (io.Reader, error) {\n\t\tstoreLk.RLock()\n\t\tdata, ok := blocksWritten[lnk]\n\t\tstoreLk.RUnlock()\n\t\tif ok {\n\t\t\treturn bytes.NewReader(data), nil\n\t\t}\n\t\treturn nil, fmt.Errorf(\"unable to load block\")\n\t}\n\n\treturn loader, storer\n}", "func New(fn string, hashWidth, ptrBytes int) (s *Store, err error) {\n\ts = &Store{\n\t\tHashWidth: min(max(hashWidth, 8), 29),\n\t\tPtrBytes: min(max(ptrBytes, 4), 7),\n\t}\n\n\tdefer func() {\n\t\tif e := recover(); e != nil {\n\t\t\ts = nil\n\t\t\terr = e.(error)\n\t\t}\n\t}()\n\n\tif s.accessor, err = storage.NewFile(fn, os.O_CREATE|os.O_RDWR|os.O_TRUNC, 0666); err != nil {\n\t\treturn nil, fmt.Errorf(\"%s: %s\", me(), err)\n\t}\n\n\tb := []byte{\n\t\t'Z', 'O', 'N', 'E', 'D', 'B',\n\t\tbyte(s.HashWidth),\n\t\tbyte(s.PtrBytes),\n\t\t0, 0, 0, 0, 0, 0, 0, 0,\n\t}\n\tif n, err := s.accessor.WriteAt(b, 0); n != len(b) || err != nil {\n\t\treturn nil, fmt.Errorf(\"%s: %s\", me(), err)\n\t}\n\n\tif s.Store, err = hdb.New(&hdbAccessor{s.delta(), s.accessor}); err != nil {\n\t\treturn nil, fmt.Errorf(\"%s: %s\", me(), err)\n\t}\n\n\treturn\n}", "func NewBlock(hash string) *pfs.Block {\n\treturn &pfs.Block{\n\t\tHash: hash,\n\t}\n}", "func (in *StoreSpec) DeepCopy() *StoreSpec {\n\tif in == nil {\n\t\treturn nil\n\t}\n\tout := new(StoreSpec)\n\tin.DeepCopyInto(out)\n\treturn out\n}", "func NewBlockStorage(blockstore *app.BlockStore, dir string) *BlockStorage {\n\treturn &BlockStorage{\n\t\tblockstore: blockstore,\n\t\tdir: dir,\n\t}\n}", "func NewStore(b backend.Backend, le lease.Lessor, ig ConsistentIndexGetter) *store {\n\ts := &store{\n\t\tb: b,\n\t\tig: ig,\n\t\tkvindex: newTreeIndex(),\n\n\t\tle: le,\n\n\t\tcurrentRev: revision{main: 1},\n\t\tcompactMainRev: -1,\n\n\t\tbytesBuf8: make([]byte, 8, 8),\n\t\tfifoSched: schedule.NewFIFOScheduler(),\n\n\t\tstopc: make(chan struct{}),\n\t}\n\n\tif s.le != nil {\n\t\ts.le.SetRangeDeleter(s)\n\t}\n\n\ttx := s.b.BatchTx()\n\ttx.Lock()\n\ttx.UnsafeCreateBucket(keyBucketName)\n\ttx.UnsafeCreateBucket(metaBucketName)\n\ttx.Unlock()\n\ts.b.ForceCommit()\n\n\tif err := s.restore(); err != nil {\n\t\t// TODO: return the error instead of panic here?\n\t\tpanic(\"failed to recover store from backend\")\n\t}\n\n\treturn s\n}", "func newBlockfileMgr(id string, conf *Conf, indexConfig *blkstorage.IndexConfig, indexStore *leveldbhelper.DBHandle) *blockfileMgr {\n\tlogger.Debugf(\"newBlockfileMgr() initializing file-based block storage for ledger: %s \", id)\n\tvar rwMutexs []*sync.RWMutex\n\n\t//Determine the root directory for the blockfile storage, if it does not exist create it\n\trootDir := conf.getLedgerBlockDir(id)\n\t_, err := util.CreateDirIfMissing(rootDir)\n\tif err != nil {\n\t\tpanic(fmt.Sprintf(\"Error: %s\", err))\n\t}\n\t// Instantiate the manager, i.e. blockFileMgr structure\n\tmgr := &blockfileMgr{rootDir: rootDir, conf: conf, db: indexStore, rwMutexs: rwMutexs}\n\n\t// cp = checkpointInfo, retrieve from the database the file suffix or number of where blocks were stored.\n\t// It also retrieves the current size of that file and the last block number that was written to that file.\n\t// At init checkpointInfo:latestFileChunkSuffixNum=[0], latestFileChunksize=[0], lastBlockNumber=[0]\n\tcpInfo, err := mgr.loadCurrentInfo()\n\tif err != nil {\n\t\tpanic(fmt.Sprintf(\"Could not get block file info for current block file from db: %s\", err))\n\t}\n\tif cpInfo == nil {\n\t\tlogger.Info(`Getting block information from block storage`)\n\t\tif cpInfo, err = constructCheckpointInfoFromBlockFiles(rootDir); err != nil {\n\t\t\tpanic(fmt.Sprintf(\"Could not build checkpoint info from block files: %s\", err))\n\t\t}\n\t\tlogger.Debugf(\"Info constructed by scanning the blocks dir = %s\", spew.Sdump(cpInfo))\n\t} else {\n\t\tlogger.Debug(`Synching block information from block storage (if needed)`)\n\t\tsyncCPInfoFromFS(rootDir, cpInfo)\n\t}\n\terr = mgr.saveCurrentInfo(cpInfo, true)\n\tif err != nil {\n\t\tpanic(fmt.Sprintf(\"Could not save next block file info to db: %s\", err))\n\t}\n\n\tmgr.oldestFileChunkSuffixNum = syncOldestFileNum(rootDir)\n\t//If start up is a restart of an existing storage,new the rwMutex for the files\n\tif conf.dumpConf.Enabled {\n\t\tfor i := 0; i <= cpInfo.latestFileChunkSuffixNum; i++ {\n\t\t\trwMutex := new(sync.RWMutex)\n\t\t\tmgr.rwMutexs = append(mgr.rwMutexs, rwMutex)\n\t\t}\n\t}\n\tmgr.dumpMutex = new(sync.Mutex)\n\n\t//Open a writer to the file identified by the number and truncate it to only contain the latest block\n\t// that was completely saved (file system, index, cpinfo, etc)\n\tcurrentFileWriter, err := newBlockfileWriter(deriveBlockfilePath(rootDir, cpInfo.latestFileChunkSuffixNum))\n\tif err != nil {\n\t\tpanic(fmt.Sprintf(\"Could not open writer to current file: %s\", err))\n\t}\n\t//Truncate the file to remove excess past last block\n\terr = currentFileWriter.truncateFile(cpInfo.latestFileChunksize)\n\tif err != nil {\n\t\tpanic(fmt.Sprintf(\"Could not truncate current file to known size in db: %s\", err))\n\t}\n\n\t// Create a new KeyValue store database handler for the blocks index in the keyvalue database\n\tmgr.index = newBlockIndex(indexConfig, indexStore)\n\n\t// Update the manager with the checkpoint info and the file writer\n\tmgr.cpInfo = cpInfo\n\tmgr.currentFileWriter = currentFileWriter\n\t// Create a checkpoint condition (event) variable, for the goroutine waiting for\n\t// or announcing the occurrence of an event.\n\tmgr.cpInfoCond = sync.NewCond(&sync.Mutex{})\n\n\t// init BlockchainInfo for external API's\n\tbcInfo := &common.BlockchainInfo{\n\t\tHeight: 0,\n\t\tCurrentBlockHash: nil,\n\t\tPreviousBlockHash: nil}\n\n\tif !cpInfo.isChainEmpty {\n\t\t//If start up is a restart of an existing storage, sync the index from block storage and update BlockchainInfo for external API's\n\t\tmgr.syncIndex()\n\t\tlastBlockHeader, err := mgr.retrieveBlockHeaderByNumber(cpInfo.lastBlockNumber)\n\t\tif err != nil {\n\t\t\tpanic(fmt.Sprintf(\"Could not retrieve header of the last block form file: %s\", err))\n\t\t}\n\t\tlastBlockHash := lastBlockHeader.Hash()\n\t\tpreviousBlockHash := lastBlockHeader.PreviousHash\n\t\tbcInfo = &common.BlockchainInfo{\n\t\t\tHeight: cpInfo.lastBlockNumber + 1,\n\t\t\tCurrentBlockHash: lastBlockHash,\n\t\t\tPreviousBlockHash: previousBlockHash}\n\t}\n\tmgr.bcInfo.Store(bcInfo)\n\treturn mgr\n}", "func getBlockStoreProvider(fsPath string) (*blkstorage.BlockStoreProvider, error) {\n\t// Format path to block store\n\tblockStorePath := kvledger.BlockStorePath(filepath.Join(fsPath, ledgersDataDirName))\n\tisEmpty, err := fileutil.DirEmpty(blockStorePath)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif isEmpty {\n\t\treturn nil, errors.Errorf(\"provided path %s is empty. Aborting identifytxs\", fsPath)\n\t}\n\t// Default fields for block store provider\n\tconf := blkstorage.NewConf(blockStorePath, 0)\n\tindexConfig := &blkstorage.IndexConfig{\n\t\tAttrsToIndex: []blkstorage.IndexableAttr{\n\t\t\tblkstorage.IndexableAttrBlockNum,\n\t\t\tblkstorage.IndexableAttrBlockHash,\n\t\t\tblkstorage.IndexableAttrTxID,\n\t\t\tblkstorage.IndexableAttrBlockNumTranNum,\n\t\t},\n\t}\n\tmetricsProvider := &disabled.Provider{}\n\t// Create new block store provider\n\tblockStoreProvider, err := blkstorage.NewProvider(conf, indexConfig, metricsProvider)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn blockStoreProvider, nil\n}", "func NewStore(c meta.Config) *Store {\n\ts := &Store{\n\t\tStore: meta.NewStore(c),\n\t}\n\ts.Logger = log.New(&s.Stderr, \"\", log.LstdFlags)\n\ts.SetHashPasswordFn(mockHashPassword)\n\treturn s\n}", "func generateNewBlock(oldBlock Block, dataPayload string) (Block, error) {\n\n\tvar newBlock Block\n\ttimeNow := time.Now()\n\n\tnewBlock.Index = oldBlock.Index + 1\n\tnewBlock.Timestamp = timeNow.String()\n\n\tnewEvent, err := dataPayloadtoServiceEvent(dataPayload)\n\n\tif err != nil {\n\t\tlog.Println(\"ERROR: Unable to convert data payload into ServiceEvent for new block generation.\")\n\t}\n\n\tnewBlock.Event = newEvent\n\tnewBlock.PrevHash = oldBlock.Hash\n\tnewBlock.Hash = calculateHash(newBlock)\n\n\treturn newBlock, nil\n}", "func (in *MetadataStoreSpec) DeepCopy() *MetadataStoreSpec {\n\tif in == nil {\n\t\treturn nil\n\t}\n\tout := new(MetadataStoreSpec)\n\tin.DeepCopyInto(out)\n\treturn out\n}", "func NewBlockFilesystem(store *persistent.AppStorage, numPtrs, dataSize int64, splitPtrs bool) (*BlockFilesystem, error) {\n\tif numPtrs < 1 {\n\t\treturn nil, fmt.Errorf(\"blockfs: number of pointers must be greater than zero\")\n\t} else if dataSize < 1 || dataSize >= (1<<24) {\n\t\treturn nil, fmt.Errorf(\"blockfs: size of data block must be greater zero and less than %v\", 1<<24)\n\t}\n\n\treturn &BlockFilesystem{\n\t\tstore: store,\n\n\t\tnumPtrs: numPtrs,\n\t\tdataSize: dataSize,\n\t\tsplitPtrs: splitPtrs,\n\t}, nil\n}", "func NewBlockchain(t *testing.T) (*bind.TransactOpts, *backends.SimulatedBackend) {\n\tkey, err := crypto.GenerateKey()\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tauth := bind.NewKeyedTransactor(key)\n\t// https://medium.com/coinmonks/unit-testing-solidity-contracts-on-ethereum-with-go-3cc924091281\n\tbalance := new(big.Int).Mul(big.NewInt(999999999999999), big.NewInt(999999999999999))\n\tgAlloc := map[common.Address]core.GenesisAccount{\n\t\tauth.From: {Balance: balance},\n\t}\n\tsim := backends.NewSimulatedBackend(gAlloc, 8000000)\n\treturn auth, sim\n}", "func New(genesisHeader util.BlockHeader, db database.ChainStore) (*BlockChain, error) {\n\t// Init genesis header\n\t_, err := db.Headers().GetBest()\n\tif err != nil {\n\t\tstoreHeader := &util.Header{BlockHeader: genesisHeader, TotalWork: new(big.Int)}\n\t\tif err := db.Headers().Put(storeHeader, true); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\treturn &BlockChain{db: db}, nil\n}", "func NewStore(lg *zap.Logger, b backend.Backend, le lease.Lessor, cfg StoreConfig) *store {\n\tif lg == nil {\n\t\tlg = zap.NewNop()\n\t}\n\tif cfg.CompactionBatchLimit == 0 {\n\t\tcfg.CompactionBatchLimit = defaultCompactBatchLimit\n\t}\n\ts := &store{\n\t\tcfg: cfg,\n\t\tb: b,\n\t\tkvindex: newTreeIndex(lg),\n\n\t\tle: le,\n\n\t\tcurrentRev: 1,\n\t\tcompactMainRev: -1,\n\n\t\tfifoSched: schedule.NewFIFOScheduler(),\n\n\t\tstopc: make(chan struct{}),\n\n\t\tlg: lg,\n\t}\n\ts.hashes = newHashStorage(lg, s)\n\ts.ReadView = &readView{s}\n\ts.WriteView = &writeView{s}\n\tif s.le != nil {\n\t\ts.le.SetRangeDeleter(func() lease.TxnDelete { return s.Write(traceutil.TODO()) })\n\t}\n\n\ttx := s.b.BatchTx()\n\ttx.LockOutsideApply()\n\ttx.UnsafeCreateBucket(buckets.Key)\n\ttx.UnsafeCreateBucket(buckets.Meta)\n\ttx.Unlock()\n\ts.b.ForceCommit()\n\n\ts.mu.Lock()\n\tdefer s.mu.Unlock()\n\tif err := s.restore(); err != nil {\n\t\t// TODO: return the error instead of panic here?\n\t\tpanic(\"failed to recover store from backend\")\n\t}\n\n\treturn s\n}", "func NewBlock() (*Block, error) {\n\tn, err := findLast()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\th, err := ftoh(n)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tfmt.Println(\"Hash: \" + h)\n\n\treturn &Block{Number: n + 1, PreviousHash: h}, nil\n}", "func New(\n\tdatabaseContext model.DBManager,\n\n\tdifficultyManager model.DifficultyManager,\n\tpastMedianTimeManager model.PastMedianTimeManager,\n\tcoinbaseManager model.CoinbaseManager,\n\tconsensusStateManager model.ConsensusStateManager,\n\tghostdagManager model.GHOSTDAGManager,\n\ttransactionValidator model.TransactionValidator,\n\n\tacceptanceDataStore model.AcceptanceDataStore,\n\tblockRelationStore model.BlockRelationStore,\n\tmultisetStore model.MultisetStore,\n\tghostdagDataStore model.GHOSTDAGDataStore,\n) model.BlockBuilder {\n\n\treturn &blockBuilder{\n\t\tdatabaseContext: databaseContext,\n\t\tdifficultyManager: difficultyManager,\n\t\tpastMedianTimeManager: pastMedianTimeManager,\n\t\tcoinbaseManager: coinbaseManager,\n\t\tconsensusStateManager: consensusStateManager,\n\t\tghostdagManager: ghostdagManager,\n\t\ttransactionValidator: transactionValidator,\n\n\t\tacceptanceDataStore: acceptanceDataStore,\n\t\tblockRelationStore: blockRelationStore,\n\t\tmultisetStore: multisetStore,\n\t\tghostdagDataStore: ghostdagDataStore,\n\t}\n}", "func New(storeConfig config.Store) (*Store, error) {\n\tdb, err := bolt.Open(storeConfig.DBPath, 0644, &bolt.Options{Timeout: 1 * time.Second})\n\tif err != nil {\n\t\treturn nil, errors.Wrap(err, \"could not open bolt DB database\")\n\t}\n\tbucketName := []byte(\"shorted\")\n\terr = db.Update(func(tx *bolt.Tx) error {\n\t\t_, err := tx.CreateBucketIfNotExists(bucketName)\n\t\treturn err\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &Store{\n\t\tdb: db,\n\t\tidLength: storeConfig.ShortedIDLength,\n\t\tbucketName: bucketName,\n\t}, nil\n}", "func BlockHash(v string) predicate.Block {\n\treturn predicate.Block(func(s *sql.Selector) {\n\t\ts.Where(sql.EQ(s.C(FieldBlockHash), v))\n\t})\n}", "func NewStoreEncryptionSpec(value string) (StoreEncryptionSpec, error) {\n\tconst pathField = \"path\"\n\tvar es StoreEncryptionSpec\n\tes.RotationPeriod = DefaultRotationPeriod\n\n\tused := make(map[string]struct{})\n\tfor _, split := range strings.Split(value, \",\") {\n\t\tif len(split) == 0 {\n\t\t\tcontinue\n\t\t}\n\t\tsubSplits := strings.SplitN(split, \"=\", 2)\n\t\tif len(subSplits) == 1 {\n\t\t\treturn StoreEncryptionSpec{}, fmt.Errorf(\"field not in the form <key>=<value>: %s\", split)\n\t\t}\n\t\tfield := strings.ToLower(subSplits[0])\n\t\tvalue := subSplits[1]\n\t\tif _, ok := used[field]; ok {\n\t\t\treturn StoreEncryptionSpec{}, fmt.Errorf(\"%s field was used twice in encryption definition\", field)\n\t\t}\n\t\tused[field] = struct{}{}\n\n\t\tif len(field) == 0 {\n\t\t\treturn StoreEncryptionSpec{}, fmt.Errorf(\"empty field\")\n\t\t}\n\t\tif len(value) == 0 {\n\t\t\treturn StoreEncryptionSpec{}, fmt.Errorf(\"no value specified for %s\", field)\n\t\t}\n\n\t\tswitch field {\n\t\tcase pathField:\n\t\t\tvar err error\n\t\t\tes.Path, err = base.GetAbsoluteStorePath(pathField, value)\n\t\t\tif err != nil {\n\t\t\t\treturn StoreEncryptionSpec{}, err\n\t\t\t}\n\t\tcase \"key\":\n\t\t\tif value == plaintextFieldValue {\n\t\t\t\tes.KeyPath = plaintextFieldValue\n\t\t\t} else {\n\t\t\t\tvar err error\n\t\t\t\tes.KeyPath, err = base.GetAbsoluteStorePath(\"key\", value)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn StoreEncryptionSpec{}, err\n\t\t\t\t}\n\t\t\t}\n\t\tcase \"old-key\":\n\t\t\tif value == plaintextFieldValue {\n\t\t\t\tes.OldKeyPath = plaintextFieldValue\n\t\t\t} else {\n\t\t\t\tvar err error\n\t\t\t\tes.OldKeyPath, err = base.GetAbsoluteStorePath(\"old-key\", value)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn StoreEncryptionSpec{}, err\n\t\t\t\t}\n\t\t\t}\n\t\tcase \"rotation-period\":\n\t\t\tvar err error\n\t\t\tes.RotationPeriod, err = time.ParseDuration(value)\n\t\t\tif err != nil {\n\t\t\t\treturn StoreEncryptionSpec{}, errors.Wrapf(err, \"could not parse rotation-duration value: %s\", value)\n\t\t\t}\n\t\tdefault:\n\t\t\treturn StoreEncryptionSpec{}, fmt.Errorf(\"%s is not a valid enterprise-encryption field\", field)\n\t\t}\n\t}\n\n\t// Check that all fields are set.\n\tif es.Path == \"\" {\n\t\treturn StoreEncryptionSpec{}, fmt.Errorf(\"no path specified\")\n\t}\n\tif es.KeyPath == \"\" {\n\t\treturn StoreEncryptionSpec{}, fmt.Errorf(\"no key specified\")\n\t}\n\tif es.OldKeyPath == \"\" {\n\t\treturn StoreEncryptionSpec{}, fmt.Errorf(\"no old-key specified\")\n\t}\n\n\treturn es, nil\n}", "func (in *SharedMemoryStoreSpec) DeepCopy() *SharedMemoryStoreSpec {\n\tif in == nil {\n\t\treturn nil\n\t}\n\tout := new(SharedMemoryStoreSpec)\n\tin.DeepCopyInto(out)\n\treturn out\n}", "func (d *Delegate) ServicesForSpec(jb job.Job) ([]job.ServiceCtx, error) {\n\tif jb.BlockhashStoreSpec == nil {\n\t\treturn nil, errors.Errorf(\n\t\t\t\"blockhashstore.Delegate expects a BlockhashStoreSpec to be present, got %+v\", jb)\n\t}\n\n\tchain, err := d.chains.Get(jb.BlockhashStoreSpec.EVMChainID.ToInt())\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\n\t\t\t\"getting chain ID %d: %w\", jb.BlockhashStoreSpec.EVMChainID.ToInt(), err)\n\t}\n\n\tif jb.BlockhashStoreSpec.WaitBlocks < int32(chain.Config().EvmFinalityDepth()) {\n\t\treturn nil, fmt.Errorf(\n\t\t\t\"waitBlocks must be greater than or equal to chain's finality depth (%d), currently %d\",\n\t\t\tchain.Config().EvmFinalityDepth(), jb.BlockhashStoreSpec.WaitBlocks)\n\t}\n\n\tkeys, err := d.ks.SendingKeys(chain.ID())\n\tif err != nil {\n\t\treturn nil, errors.Wrap(err, \"getting sending keys\")\n\t}\n\tfromAddress := keys[0].Address\n\tif jb.BlockhashStoreSpec.FromAddress != nil {\n\t\tfromAddress = *jb.BlockhashStoreSpec.FromAddress\n\t}\n\n\tbhs, err := blockhash_store.NewBlockhashStore(\n\t\tjb.BlockhashStoreSpec.BlockhashStoreAddress.Address(), chain.Client())\n\tif err != nil {\n\t\treturn nil, errors.Wrap(err, \"building BHS\")\n\t}\n\n\tvar coordinators []Coordinator\n\tif jb.BlockhashStoreSpec.CoordinatorV1Address != nil {\n\t\tvar c *v1.VRFCoordinator\n\t\tif c, err = v1.NewVRFCoordinator(\n\t\t\tjb.BlockhashStoreSpec.CoordinatorV1Address.Address(), chain.Client()); err != nil {\n\n\t\t\treturn nil, errors.Wrap(err, \"building V1 coordinator\")\n\t\t}\n\t\tcoordinators = append(coordinators, NewV1Coordinator(c))\n\t}\n\tif jb.BlockhashStoreSpec.CoordinatorV2Address != nil {\n\t\tvar c *v2.VRFCoordinatorV2\n\t\tif c, err = v2.NewVRFCoordinatorV2(\n\t\t\tjb.BlockhashStoreSpec.CoordinatorV2Address.Address(), chain.Client()); err != nil {\n\n\t\t\treturn nil, errors.Wrap(err, \"building V2 coordinator\")\n\t\t}\n\t\tcoordinators = append(coordinators, NewV2Coordinator(c))\n\t}\n\n\tbpBHS, err := NewBulletproofBHS(chain.Config(), fromAddress.Address(), chain.TxManager(), bhs)\n\tif err != nil {\n\t\treturn nil, errors.Wrap(err, \"building bulletproof bhs\")\n\t}\n\n\tlog := d.logger.Named(\"BHS Feeder\").With(\"jobID\", jb.ID, \"externalJobID\", jb.ExternalJobID)\n\tfeeder := NewFeeder(\n\t\tlog,\n\t\tNewMultiCoordinator(coordinators...),\n\t\tbpBHS,\n\t\tint(jb.BlockhashStoreSpec.WaitBlocks),\n\t\tint(jb.BlockhashStoreSpec.LookbackBlocks),\n\t\tfunc(ctx context.Context) (uint64, error) {\n\t\t\thead, err := chain.Client().HeadByNumber(ctx, nil)\n\t\t\tif err != nil {\n\t\t\t\treturn 0, errors.Wrap(err, \"getting chain head\")\n\t\t\t}\n\t\t\treturn uint64(head.Number), nil\n\t\t})\n\n\treturn []job.ServiceCtx{&service{\n\t\tfeeder: feeder,\n\t\tpollPeriod: jb.BlockhashStoreSpec.PollPeriod,\n\t\trunTimeout: jb.BlockhashStoreSpec.RunTimeout,\n\t\tlogger: log,\n\t\tstop: make(chan struct{}),\n\t\tdone: make(chan struct{}),\n\t}}, nil\n}", "func newFsStore(root string) Backend {\n\treturn &fsStore{\n\t\troot: root,\n\t\tkinds: map[string]bool{},\n\t\tcheckDuration: defaultDuration,\n\t\tdata: map[Key]*resource{},\n\t\tdonec: make(chan struct{}),\n\t\tProbe: probe.NewProbe(),\n\t}\n}", "func InitChain(address string) *BlockChain {\n\n\tvar lastHash []byte\n\n\tif DBExist() {\n\t\tfmt.Println(\"Blockchain Already Exist\")\n\t\truntime.Goexit()\n\t}\n\n\topts := badger.DefaultOptions\n\topts.Dir = dbpath\n\topts.ValueDir = dbpath\n\n\tdb, err := badger.Open(opts)\n\tHandle(err)\n\n\terr = db.Update(func(txn *badger.Txn) error {\n\t\t// create transaction with address and genesis data\n\t\tcbtx := CoinbaseTx(address, genesisData)\n\n\t\t// create first block with genesis transaction\n\t\tgenesis := Genesis(cbtx)\n\n\t\tfmt.Println(\"Genesis Block Created \")\n\n\t\terr := txn.Set([]byte(\"lasthash\"), genesis.Hash)\n\t\tHandle(err)\n\n\t\terr = txn.Set(genesis.Hash, genesis.Serialize())\n\n\t\tlastHash = genesis.Hash\n\n\t\treturn err\n\t})\n\n\t// // Checking and also updating database with blockchain\n\t// err = db.Update(func(txn *badger.Txn) error {\n\t// \t// check if there is a blockchain via lasthash key\n\t// \t// if not\n\t// \tif _, err := txn.Get([]byte(\"lasthash\")); err == badger.ErrKeyNotFound {\n\t// \t\tfmt.Println(\"No Blockchain Exist. Creating One ...\")\n\t// \t\t// create genesis block\n\t// \t\tgenesis := Genesis()\n\n\t// \t\t// save hash to database with lasthash key --> to disk\n\t// \t\t// the purpose of this is later when we want to get blocks from block chain we can get from deserialized this block\n\t// \t\terr := txn.Set(genesis.Hash, genesis.Serialize())\n\t// \t\tHandle(err)\n\n\t// \t\t// save serialize block to database with it's hash key --> to disk\n\t// \t\terr = txn.Set([]byte(\"lasthash\"), genesis.Hash)\n\n\t// \t\t// set lasthash with genesis hash so we get lasthash in memory --> in memory\n\t// \t\tlastHash = genesis.Hash\n\n\t// \t\treturn err\n\n\t// \t} else {\n\t// \t\t// else if there is a block chain\n\t// \t\t// get lasthash value\n\t// \t\titem, err := txn.Get([]byte(\"lasthash\"))\n\t// \t\tHandle(err)\n\n\t// \t\t// set lasthash to lasthash in memory --> in memory\n\t// \t\tlastHash, err = item.Value()\n\n\t// \t\treturn err\n\t// \t}\n\t// })\n\n\tHandle(err)\n\n\tblockchain := BlockChain{lastHash, db}\n\n\treturn &blockchain\n\n}", "func TestBlock(t *testing.T) {\n\tb := btcutil.NewBlock(&Block100000)\n\n\t// Ensure we get the same data back out.\n\tif msgBlock := b.MsgBlock(); !reflect.DeepEqual(msgBlock, &Block100000) {\n\t\tt.Errorf(\"MsgBlock: mismatched MsgBlock - got %v, want %v\",\n\t\t\tspew.Sdump(msgBlock), spew.Sdump(&Block100000))\n\t}\n\n\t// Ensure block height set and get work properly.\n\twantHeight := int32(100000)\n\tb.SetHeight(wantHeight)\n\tif gotHeight := b.Height(); gotHeight != wantHeight {\n\t\tt.Errorf(\"Height: mismatched height - got %v, want %v\",\n\t\t\tgotHeight, wantHeight)\n\t}\n\n\t// Hash for block 100,000.\n\twantHashStr := \"3ba27aa200b1cecaad478d2b00432346c3f1f3986da1afd33e506\"\n\twantHash, err := chainhash.NewHashFromStr(wantHashStr)\n\tif err != nil {\n\t\tt.Errorf(\"NewHashFromStr: %v\", err)\n\t}\n\n\t// Request the hash multiple times to test generation and caching.\n\tfor i := 0; i < 2; i++ {\n\t\thash := b.Hash()\n\t\tif !hash.IsEqual(wantHash) {\n\t\t\tt.Errorf(\"Hash #%d mismatched hash - got %v, want %v\",\n\t\t\t\ti, hash, wantHash)\n\t\t}\n\t}\n\n\t// Hashes for the transactions in Block100000.\n\twantTxHashes := []string{\n\t\t\"8c14f0db3df150123e6f3dbbf30f8b955a8249b62ac1d1ff16284aefa3d06d87\",\n\t\t\"fff2525b8931402dd09222c50775608f75787bd2b87e56995a7bdd30f79702c4\",\n\t\t\"6359f0868171b1d194cbee1af2f16ea598ae8fad666d9b012c8ed2b79a236ec4\",\n\t\t\"e9a66845e05d5abc0ad04ec80f774a7e585c6e8db975962d069a522137b80c1d\",\n\t}\n\n\t// Create a new block to nuke all cached data.\n\tb = btcutil.NewBlock(&Block100000)\n\n\t// Request hash for all transactions one at a time via Tx.\n\tfor i, txHash := range wantTxHashes {\n\t\twantHash, err := chainhash.NewHashFromStr(txHash)\n\t\tif err != nil {\n\t\t\tt.Errorf(\"NewHashFromStr: %v\", err)\n\t\t}\n\n\t\t// Request the hash multiple times to test generation and\n\t\t// caching.\n\t\tfor j := 0; j < 2; j++ {\n\t\t\ttx, err := b.Tx(i)\n\t\t\tif err != nil {\n\t\t\t\tt.Errorf(\"Tx #%d: %v\", i, err)\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\thash := tx.Hash()\n\t\t\tif !hash.IsEqual(wantHash) {\n\t\t\t\tt.Errorf(\"Hash #%d mismatched hash - got %v, \"+\n\t\t\t\t\t\"want %v\", j, hash, wantHash)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t}\n\t}\n\n\t// Create a new block to nuke all cached data.\n\tb = btcutil.NewBlock(&Block100000)\n\n\t// Request slice of all transactions multiple times to test generation\n\t// and caching.\n\tfor i := 0; i < 2; i++ {\n\t\ttransactions := b.Transactions()\n\n\t\t// Ensure we get the expected number of transactions.\n\t\tif len(transactions) != len(wantTxHashes) {\n\t\t\tt.Errorf(\"Transactions #%d mismatched number of \"+\n\t\t\t\t\"transactions - got %d, want %d\", i,\n\t\t\t\tlen(transactions), len(wantTxHashes))\n\t\t\tcontinue\n\t\t}\n\n\t\t// Ensure all of the hashes match.\n\t\tfor j, tx := range transactions {\n\t\t\twantHash, err := chainhash.NewHashFromStr(wantTxHashes[j])\n\t\t\tif err != nil {\n\t\t\t\tt.Errorf(\"NewHashFromStr: %v\", err)\n\t\t\t}\n\n\t\t\thash := tx.Hash()\n\t\t\tif !hash.IsEqual(wantHash) {\n\t\t\t\tt.Errorf(\"Transactions #%d mismatched hashes \"+\n\t\t\t\t\t\"- got %v, want %v\", j, hash, wantHash)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t}\n\t}\n\n\t// Serialize the test block.\n\tvar block100000Buf bytes.Buffer\n\terr = Block100000.Serialize(&block100000Buf)\n\tif err != nil {\n\t\tt.Errorf(\"Serialize: %v\", err)\n\t}\n\tblock100000Bytes := block100000Buf.Bytes()\n\n\t// Request serialized bytes multiple times to test generation and\n\t// caching.\n\tfor i := 0; i < 2; i++ {\n\t\tserializedBytes, err := b.Bytes()\n\t\tif err != nil {\n\t\t\tt.Errorf(\"Bytes: %v\", err)\n\t\t\tcontinue\n\t\t}\n\t\tif !bytes.Equal(serializedBytes, block100000Bytes) {\n\t\t\tt.Errorf(\"Bytes #%d wrong bytes - got %v, want %v\", i,\n\t\t\t\tspew.Sdump(serializedBytes),\n\t\t\t\tspew.Sdump(block100000Bytes))\n\t\t\tcontinue\n\t\t}\n\t}\n\n\t// Transaction offsets and length for the transaction in Block100000.\n\twantTxLocs := []wire.TxLoc{\n\t\t{TxStart: 81, TxLen: 144},\n\t\t{TxStart: 225, TxLen: 259},\n\t\t{TxStart: 484, TxLen: 257},\n\t\t{TxStart: 741, TxLen: 225},\n\t}\n\n\t// Ensure the transaction location information is accurate.\n\ttxLocs, err := b.TxLoc()\n\tif err != nil {\n\t\tt.Errorf(\"TxLoc: %v\", err)\n\t\treturn\n\t}\n\tif !reflect.DeepEqual(txLocs, wantTxLocs) {\n\t\tt.Errorf(\"TxLoc: mismatched transaction location information \"+\n\t\t\t\"- got %v, want %v\", spew.Sdump(txLocs),\n\t\t\tspew.Sdump(wantTxLocs))\n\t}\n}", "func NewBlockChain(address string) *BlockChain {\n\n\tvar tip []byte\n\n\t//db, err := bolt.Open(dbFile, 0600, &bolt.Options{Timeout: 1 * time.Minute})\n\tdb, err := bolt.Open(dbFile, 0600, nil)\n\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\terr = db.Update(func(tx *bolt.Tx) error {\n\t\tb := tx.Bucket([]byte(blocksBucket))\n\n\t\t// if we dont already have bucket with blocks\n\t\tif b == nil {\n\t\t\tfmt.Println(\"Creating New Blockchain...\")\n\t\t\tnewTx := NewCoinBase(address, genesisCoinBaseData)\n\t\t\tbl := NewGenesisBlock(newTx)\n\n\t\t\tbckt, _ := tx.CreateBucket([]byte(blocksBucket))\n\t\t\tif newerr := bckt.Put(bl.Hash, bl.Serialize()); err != nil {\n\t\t\t\treturn newerr\n\t\t\t}\n\t\t\tif newerr := bckt.Put([]byte(\"l\"), bl.Hash); err != nil {\n\t\t\t\treturn newerr\n\t\t\t}\n\t\t\ttip = bl.Hash\n\t\t} else {\n\t\t\t// set tip to last existing hash\n\t\t\ttip = b.Get([]byte(\"l\"))\n\t\t}\n\t\treturn nil\n\t})\n\n\tif err != nil {\n\t\tlog.Fatal(\"NewBlockChain :\", err)\n\t}\n\treturn &BlockChain{tip, db}\n}", "func NewStore() *Store {\n\tstore := Store{\n\t\ttxs: make(map[blockTx]*btcutil.Tx),\n\t\tsorted: list.New(),\n\t\tsigned: make(map[blockTx]*signedTx),\n\t\trecv: make(map[blockOutPoint]*recvTxOut),\n\t\tunspent: make(map[btcwire.OutPoint]*recvTxOut),\n\t}\n\treturn &store\n}", "func NewBlockChain(address string) *BlockChain {\n\t//return \t&BlockClain{\n\t//\t[]*Block{genesisBlock},\n\t//}\n\n\tvar lastHash []byte\n\tdb, err := bolt.Open(BlockChainDB, 0600, nil)\n\t//defer db.Close()\n\tif err != nil {\n\t\tlog.Fatal(\"create database failed\")\n\t}\n\terr = db.Update(func(tx *bolt.Tx) error {\n\t\tbucket := tx.Bucket([]byte(BlockBucket))\n\t\tif bucket == nil{\n\t\t\tbucket,err = tx.CreateBucket([]byte(BlockBucket))\n\t\t\tif err != nil{\n\t\t\t\tlog.Fatal(\"create bucket failed\")\n\t\t\t}\n\n\t\t\t//Create genesis block\n\t\t\tgenesisBlock := GenesisBlock(address)\n\n\t\t\t//Write message into database\n\t\t\tbucket.Put(genesisBlock.Hash,genesisBlock.Serialize())\n\t\t\tbucket.Put([]byte(\"LastHashKey\"),genesisBlock.Hash)\n\t\t\tlastHash = genesisBlock.Hash\n\t\t}else{\n\t\t\tlastHash = bucket.Get([]byte(\"LastHashKey\"))\n\t\t}\n\n\t\treturn nil\n\t})\n\treturn &BlockChain{db,lastHash}\n}", "func NewBlock(index uint64, ordered Events) *Block {\n\tevents := make(hash.EventsSlice, len(ordered))\n\tfor i, e := range ordered {\n\t\tevents[i] = e.Hash()\n\t}\n\n\treturn &Block{\n\t\tIndex: index,\n\t\tEvents: events,\n\t}\n}", "func newStore(c *Config, httpAddr, raftAddr string) *store {\n\tinternalData := meta.Data{\n\t\tIndex: 1,\n\t}\n\ts := store{\n\t\tdata: &Data{\n\t\t\tData: internalData,\n\t\t},\n\t\tclosing: make(chan struct{}),\n\t\tdataChanged: make(chan struct{}),\n\t\tpath: c.Dir,\n\t\tconfig: c,\n\t\thttpAddr: httpAddr,\n\t\traftAddr: raftAddr,\n\t\tlogger: zap.New(zap.NullEncoder()),\n\t}\n\n\treturn &s\n}", "func NewStore()(*Store) {\n m := &Store{\n Entity: *iadcd81124412c61e647227ecfc4449d8bba17de0380ddda76f641a29edf2b242.NewEntity(),\n }\n return m\n}", "func newBlockDAO(kvstore db.KVStore, writeIndex bool, compressBlock bool, maxCacheSize int) *blockDAO {\n\tblockDAO := &blockDAO{\n\t\twriteIndex: writeIndex,\n\t\tcompressBlock: compressBlock,\n\t\tkvstore: kvstore,\n\t}\n\tif maxCacheSize > 0 {\n\t\tblockDAO.headerCache = cache.NewThreadSafeLruCache(maxCacheSize)\n\t\tblockDAO.bodyCache = cache.NewThreadSafeLruCache(maxCacheSize)\n\t\tblockDAO.footerCache = cache.NewThreadSafeLruCache(maxCacheSize)\n\t}\n\ttimerFactory, err := prometheustimer.New(\n\t\t\"iotex_block_dao_perf\",\n\t\t\"Performance of block DAO\",\n\t\t[]string{\"type\"},\n\t\t[]string{\"default\"},\n\t)\n\tif err != nil {\n\t\treturn nil\n\t}\n\tblockDAO.timerFactory = timerFactory\n\tblockDAO.lifecycle.Add(kvstore)\n\treturn blockDAO\n}", "func New(ctx context.Context, alias, path string, cfgdir string) (*Store, error) {\n\tpath = fsutil.CleanPath(path)\n\ts := &Store{\n\t\talias: alias,\n\t\tpath: path,\n\t\tsync: gitmock.New(),\n\t}\n\n\t// init store backend\n\tswitch backend.GetStoreBackend(ctx) {\n\tcase backend.FS:\n\t\ts.store = fs.New(path)\n\t\tout.Debug(ctx, \"Using Store Backend: fs\")\n\tcase backend.KVMock:\n\t\ts.store = kvmock.New()\n\t\tout.Debug(ctx, \"Using Store Backend: kvmock\")\n\tdefault:\n\t\treturn nil, fmt.Errorf(\"Unknown store backend\")\n\t}\n\n\t// init sync backend\n\tswitch backend.GetSyncBackend(ctx) {\n\tcase backend.GoGit:\n\t\tout.Cyan(ctx, \"WARNING: Using experimental sync backend 'go-git'\")\n\t\tgit, err := gogit.Open(path)\n\t\tif err != nil {\n\t\t\tout.Debug(ctx, \"Failed to initialize sync backend 'gogit': %s\", err)\n\t\t} else {\n\t\t\ts.sync = git\n\t\t\tout.Debug(ctx, \"Using Sync Backend: go-git\")\n\t\t}\n\tcase backend.GitCLI:\n\t\tgpgBin, _ := gpgcli.Binary(ctx, \"\")\n\t\tgit, err := gitcli.Open(path, gpgBin)\n\t\tif err != nil {\n\t\t\tout.Debug(ctx, \"Failed to initialize sync backend 'git': %s\", err)\n\t\t} else {\n\t\t\ts.sync = git\n\t\t\tout.Debug(ctx, \"Using Sync Backend: git-cli\")\n\t\t}\n\tcase backend.GitMock:\n\t\t// no-op\n\t\tout.Debug(ctx, \"Using Sync Backend: git-mock\")\n\tdefault:\n\t\treturn nil, fmt.Errorf(\"Unknown Sync Backend\")\n\t}\n\n\t// init crypto backend\n\tswitch backend.GetCryptoBackend(ctx) {\n\tcase backend.GPGCLI:\n\t\tgpg, err := gpgcli.New(ctx, gpgcli.Config{\n\t\t\tUmask: fsutil.Umask(),\n\t\t\tArgs: gpgcli.GPGOpts(),\n\t\t})\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\ts.crypto = gpg\n\t\tout.Debug(ctx, \"Using Crypto Backend: gpg-cli\")\n\tcase backend.XC:\n\t\t//out.Red(ctx, \"WARNING: Using highly experimental crypto backend!\")\n\t\tcrypto, err := xc.New(cfgdir, client.New(cfgdir))\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\ts.crypto = crypto\n\t\tout.Debug(ctx, \"Using Crypto Backend: xc\")\n\tcase backend.GPGMock:\n\t\t//out.Red(ctx, \"WARNING: Using no-op crypto backend (NO ENCRYPTION)!\")\n\t\ts.crypto = gpgmock.New()\n\t\tout.Debug(ctx, \"Using Crypto Backend: gpg-mock\")\n\tcase backend.OpenPGP:\n\t\tcrypto, err := openpgp.New(ctx)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\ts.crypto = crypto\n\t\tout.Debug(ctx, \"Using Crypto Backend: openpgp\")\n\tdefault:\n\t\treturn nil, fmt.Errorf(\"no valid crypto backend selected\")\n\t}\n\n\treturn s, nil\n}", "func startNewStateAndWaitForBlock(t *testing.T, consensusReplayConfig *cfg.Config,\n\tlastBlockHeight int64, blockDB dbm.DB, stateStore sm.Store,\n) {\n\tlogger := log.TestingLogger()\n\tstate, _ := stateStore.LoadFromDBOrGenesisFile(consensusReplayConfig.GenesisFile())\n\tprivValidator := loadPrivValidator(consensusReplayConfig)\n\tcs := newStateWithConfigAndBlockStore(\n\t\tconsensusReplayConfig,\n\t\tstate,\n\t\tprivValidator,\n\t\tkvstore.NewApplication(),\n\t\tblockDB,\n\t)\n\tcs.SetLogger(logger)\n\n\tbytes, _ := os.ReadFile(cs.config.WalFile())\n\tt.Logf(\"====== WAL: \\n\\r%X\\n\", bytes)\n\n\terr := cs.Start()\n\trequire.NoError(t, err)\n\tdefer func() {\n\t\tif err := cs.Stop(); err != nil {\n\t\t\tt.Error(err)\n\t\t}\n\t}()\n\n\t// This is just a signal that we haven't halted; its not something contained\n\t// in the WAL itself. Assuming the consensus state is running, replay of any\n\t// WAL, including the empty one, should eventually be followed by a new\n\t// block, or else something is wrong.\n\tnewBlockSub, err := cs.eventBus.Subscribe(context.Background(), testSubscriber, types.EventQueryNewBlock)\n\trequire.NoError(t, err)\n\tselect {\n\tcase <-newBlockSub.Out():\n\tcase <-newBlockSub.Cancelled():\n\t\tt.Fatal(\"newBlockSub was cancelled\")\n\tcase <-time.After(120 * time.Second):\n\t\tt.Fatal(\"Timed out waiting for new block (see trace above)\")\n\t}\n}", "func dbStoreBlock(dbTx database.Tx, block *asiutil.Block) error {\n\tblockKey := database.NewNormalBlockKey(block.Hash())\n\thasBlock, err := dbTx.HasBlock(blockKey)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif hasBlock {\n\t\treturn nil\n\t}\n\n\tblockBytes, err := block.Bytes()\n\tif err != nil {\n\t\tstr := fmt.Sprintf(\"failed to get serialized bytes for block %s\",\n\t\t\tblock.Hash())\n\t\treturn ruleError(ErrFailedSerializedBlock, str)\n\t}\n\tlog.Infof(\"save block, height=%d, hash=%s\", block.Height(), block.Hash())\n\treturn dbTx.StoreBlock(blockKey, blockBytes)\n}", "func (b *Block) NewBlock(height int32, parentHash string, value p1.MerklePatriciaTrie) {\n\n var header Header\n mptAsBytes := getBytes(value)\n\n header.Height = height\n header.Timestamp = int64(time.Now().Unix())\n header.ParentHash = parentHash\n header.Size = int32(len(mptAsBytes))\n header.Nonce = \"\"\n hashString := string(header.Height) + string(header.Timestamp) + header.ParentHash + value.Root + string(header.Size)\n sum := sha3.Sum256([]byte(hashString))\n header.Hash = hex.EncodeToString(sum[:])\n\n b.Header = header\n b.Mpt = value\n}", "func NewStore(log logrus.FieldLogger, db *bolt.DB) (*Store, error) {\n\tif db == nil {\n\t\treturn nil, errors.New(\"new exchange Store failed, db is nil\")\n\t}\n\n\tif err := db.Update(func(tx *bolt.Tx) error {\n\n\t\t// create deposit status bucket if not exist\n\t\tif _, err := tx.CreateBucketIfNotExists(DepositInfoBkt); err != nil {\n\t\t\treturn dbutil.NewCreateBucketFailedErr(DepositInfoBkt, err)\n\t\t}\n\n\t\t// create bind address bucket if not exist\n\t\tfor _, ct := range scanner.GetCoinTypes() {\n\t\t\tbktName := MustGetBindAddressBkt(ct)\n\t\t\tif _, err := tx.CreateBucketIfNotExists(bktName); err != nil {\n\t\t\t\treturn dbutil.NewCreateBucketFailedErr(bktName, err)\n\t\t\t}\n\t\t}\n\n\t\tif _, err := tx.CreateBucketIfNotExists(KittyDepositSeqsIndexBkt); err != nil {\n\t\t\treturn dbutil.NewCreateBucketFailedErr(KittyDepositSeqsIndexBkt, err)\n\t\t}\n\n\t\tif _, err := tx.CreateBucketIfNotExists(TxsBkt); err != nil {\n\t\t\treturn dbutil.NewCreateBucketFailedErr(TxsBkt, err)\n\t\t}\n\n\t\tif _, err := tx.CreateBucketIfNotExists(DepositTrackBkt); err != nil {\n\t\t\treturn dbutil.NewCreateBucketFailedErr(DepositTrackBkt, err)\n\t\t}\n\n\t\treturn nil\n\t}); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn &Store{\n\t\tdb: db,\n\t\tlog: log.WithField(\"prefix\", \"exchange.Store\"),\n\t}, nil\n}", "func (s Store) StoreBlock (block types.FullSignedBlock) error {\r\n\r\n\t// Open badger\r\n\tstor, err := badger.Open(badger.DefaultOptions(s.StorFileLocation))\r\n\tif err != nil {\r\n\t\tpanic(err)\r\n\t}\r\n\r\n\tdefer stor.Close()\r\n\r\n\t// Serialize all the parts: block in json\r\n\tbytes, err := json.Marshal(block)\r\n\r\n\terr = stor.Update(func(txn *badger.Txn) error {\r\n\r\n\t\tvar txErr error\r\n\t\t// Store the hash as a key. This is the main register\r\n\t\tif txErr = storeStringIndex(txn, block.Hash, bytes, HashKeyPrefix); txErr == nil {\r\n\t\t\t// And now store the indexes. Using this indexes it is possible to retrieve the hash, and next the block\r\n\t\t\tif txErr = storeUIntIndex(txn, block.Timestamp, []byte(block.Hash), TimestampKeyPrefix); txErr != nil { // By timestamp\r\n\t\t\t\treturn txErr\r\n\t\t\t}\r\n\r\n\t\t\tif txErr = storeUIntIndex(txn, block.Height, []byte(block.Hash), HeightKeyPrefix); txErr != nil { // By block Height\r\n\t\t\t\treturn txErr\r\n\t\t\t}\r\n\t\t} \r\n\r\n\t\t return txErr\r\n\t})\r\n\r\n\treturn err\r\n}", "func NewBlockchain(address string) (chain *Blockchain, err error) {\n\terr = os.Remove(DbFilePath)\n\tif err != nil {\n\t\treturn\n\t}\n\tdb, err := bolt.Open(DbFilePath, 0600, nil)\n\tif err != nil {\n\t\treturn\n\t}\n\tvar tail []byte\n\terr = db.Update(func(tx *bolt.Tx) error {\n\t\tgenesis := NewGenesisBlock([]*Transaction{\n\t\t\tNewGenesisTransaction(address),\n\t\t})\n\t\tbucket, err := tx.CreateBucket([]byte(BucketName))\n\t\tbytes, err := json.Marshal(genesis)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\terr = bucket.Put(genesis.Hash, bytes)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\terr = bucket.Put([]byte(LastBlockKey), genesis.Hash)\n\t\tif err != nil {\n\t\t\t_ = bucket.Delete(genesis.Hash)\n\t\t\treturn err\n\t\t}\n\t\ttail = genesis.Hash\n\t\treturn nil\n\t})\n\tif err != nil {\n\t\treturn\n\t}\n\tchain = &Blockchain{tail, db}\n\treturn\n}", "func NewStore() *Store {\n\treturn &Store{\n\t\tstore: make(map[workloadmeta.Kind]map[string]workloadmeta.Entity),\n\t}\n}", "func NewStore(c Config) *Store {\n\treturn &Store{\n\t\tpath: c.Dir,\n\t\thost: c.Hostname,\n\t\taddr: c.BindAddress,\n\t\tdata: &Data{},\n\t\tHeartbeatTimeout: time.Duration(c.HeartbeatTimeout),\n\t\tElectionTimeout: time.Duration(c.ElectionTimeout),\n\t\tLeaderLeaseTimeout: time.Duration(c.LeaderLeaseTimeout),\n\t\tCommitTimeout: time.Duration(c.CommitTimeout),\n\t\tLogger: log.New(os.Stderr, \"\", log.LstdFlags),\n\t}\n}", "func NewBlock(_data string, _prevHash []byte) *Block {\n\t_block := &Block{\n\t\tTimestamp: time.Now().Unix(),\n\t\tData: []byte(_data),\n\t\tPrevHash: _prevHash,\n\t\tHash: []byte{},\n\t}\n\n\tpow := NewProofOfWork(_block)\n\tnonce, hash := pow.Run()\n\n\t_block.Nonce = nonce\n\t_block.Hash = hash[:]\n\n\treturn _block\n}", "func makeBlock(txns []map[string]int, chain []map[string]interface{}) map[string]interface{} {\n\tparentBlock := chain[len(chain)-1]\n\tparentHash := parentBlock[\"hash\"]\n\tcontents := parentBlock[\"contents\"].(map[string]interface{})\n\tblockNumber := contents[\"blockNumber\"].(int)\n\ttxnCount := len(txns)\n\tblockContents := map[string]interface{}{\"blockNumber\": blockNumber + 1, \"parentHash\": parentHash, \"txnCount\": txnCount, \"txns\": txns}\n\tblockhash := hashme(blockContents)\n\tblock := map[string]interface{}{\"hash\": blockhash, \"contents\": blockContents}\n\treturn block\n}", "func NewStore(\n\tctx context.Context, cfg StoreConfig, eng storage.Engine, nodeDesc *roachpb.NodeDescriptor,\n) *Store {\n\t// TODO(tschottdorf): find better place to set these defaults.\n\tcfg.SetDefaults()\n\n\tif !cfg.Valid() {\n\t\tlog.Fatalf(ctx, \"invalid store configuration: %+v\", &cfg)\n\t}\n\ts := &Store{\n\t\tcfg: cfg,\n\t\tdb: cfg.DB, // TODO(tschottdorf): remove redundancy.\n\t\tengine: eng,\n\t\tnodeDesc: nodeDesc,\n\t\tmetrics: newStoreMetrics(cfg.HistogramWindowInterval),\n\t\tctSender: cfg.ClosedTimestampSender,\n\t}\n\tif cfg.RPCContext != nil {\n\t\ts.allocator = MakeAllocator(cfg.StorePool, cfg.RPCContext.RemoteClocks.Latency)\n\t} else {\n\t\ts.allocator = MakeAllocator(cfg.StorePool, func(string) (time.Duration, bool) {\n\t\t\treturn 0, false\n\t\t})\n\t}\n\ts.replRankings = newReplicaRankings()\n\n\ts.draining.Store(false)\n\ts.scheduler = newRaftScheduler(s.metrics, s, storeSchedulerConcurrency)\n\n\ts.raftEntryCache = raftentry.NewCache(cfg.RaftEntryCacheSize)\n\ts.metrics.registry.AddMetricStruct(s.raftEntryCache.Metrics())\n\n\ts.coalescedMu.Lock()\n\ts.coalescedMu.heartbeats = map[roachpb.StoreIdent][]RaftHeartbeat{}\n\ts.coalescedMu.heartbeatResponses = map[roachpb.StoreIdent][]RaftHeartbeat{}\n\ts.coalescedMu.Unlock()\n\n\ts.mu.Lock()\n\ts.mu.replicaPlaceholders = map[roachpb.RangeID]*ReplicaPlaceholder{}\n\ts.mu.replicasByKey = newStoreReplicaBTree()\n\ts.mu.uninitReplicas = map[roachpb.RangeID]*Replica{}\n\ts.mu.Unlock()\n\n\ts.unquiescedReplicas.Lock()\n\ts.unquiescedReplicas.m = map[roachpb.RangeID]struct{}{}\n\ts.unquiescedReplicas.Unlock()\n\n\ts.rangefeedReplicas.Lock()\n\ts.rangefeedReplicas.m = map[roachpb.RangeID]struct{}{}\n\ts.rangefeedReplicas.Unlock()\n\n\ts.tsCache = tscache.New(cfg.Clock)\n\ts.metrics.registry.AddMetricStruct(s.tsCache.Metrics())\n\n\ts.txnWaitMetrics = txnwait.NewMetrics(cfg.HistogramWindowInterval)\n\ts.metrics.registry.AddMetricStruct(s.txnWaitMetrics)\n\ts.snapshotApplySem = make(chan struct{}, cfg.concurrentSnapshotApplyLimit)\n\n\ts.renewableLeasesSignal = make(chan struct{})\n\n\ts.limiters.BulkIOWriteRate = rate.NewLimiter(rate.Limit(bulkIOWriteLimit.Get(&cfg.Settings.SV)), bulkIOWriteBurst)\n\tbulkIOWriteLimit.SetOnChange(&cfg.Settings.SV, func() {\n\t\ts.limiters.BulkIOWriteRate.SetLimit(rate.Limit(bulkIOWriteLimit.Get(&cfg.Settings.SV)))\n\t})\n\ts.limiters.ConcurrentImportRequests = limit.MakeConcurrentRequestLimiter(\n\t\t\"importRequestLimiter\", int(importRequestsLimit.Get(&cfg.Settings.SV)),\n\t)\n\timportRequestsLimit.SetOnChange(&cfg.Settings.SV, func() {\n\t\ts.limiters.ConcurrentImportRequests.SetLimit(int(importRequestsLimit.Get(&cfg.Settings.SV)))\n\t})\n\ts.limiters.ConcurrentExportRequests = limit.MakeConcurrentRequestLimiter(\n\t\t\"exportRequestLimiter\", int(ExportRequestsLimit.Get(&cfg.Settings.SV)),\n\t)\n\n\t// The snapshot storage is usually empty at this point since it is cleared\n\t// after each snapshot application, except when the node crashed right before\n\t// it can clean it up. If this fails it's not a correctness issue since the\n\t// storage is also cleared before receiving a snapshot.\n\ts.sstSnapshotStorage = NewSSTSnapshotStorage(s.engine, s.limiters.BulkIOWriteRate)\n\tif err := s.sstSnapshotStorage.Clear(); err != nil {\n\t\tlog.Warningf(ctx, \"failed to clear snapshot storage: %v\", err)\n\t}\n\ts.protectedtsCache = cfg.ProtectedTimestampCache\n\n\t// On low-CPU instances, a default limit value may still allow ExportRequests\n\t// to tie up all cores so cap limiter at cores-1 when setting value is higher.\n\texportCores := runtime.GOMAXPROCS(0) - 1\n\tif exportCores < 1 {\n\t\texportCores = 1\n\t}\n\tExportRequestsLimit.SetOnChange(&cfg.Settings.SV, func() {\n\t\tlimit := int(ExportRequestsLimit.Get(&cfg.Settings.SV))\n\t\tif limit > exportCores {\n\t\t\tlimit = exportCores\n\t\t}\n\t\ts.limiters.ConcurrentExportRequests.SetLimit(limit)\n\t})\n\ts.limiters.ConcurrentAddSSTableRequests = limit.MakeConcurrentRequestLimiter(\n\t\t\"addSSTableRequestLimiter\", int(addSSTableRequestLimit.Get(&cfg.Settings.SV)),\n\t)\n\taddSSTableRequestLimit.SetOnChange(&cfg.Settings.SV, func() {\n\t\ts.limiters.ConcurrentAddSSTableRequests.SetLimit(int(addSSTableRequestLimit.Get(&cfg.Settings.SV)))\n\t})\n\ts.limiters.ConcurrentRangefeedIters = limit.MakeConcurrentRequestLimiter(\n\t\t\"rangefeedIterLimiter\", int(concurrentRangefeedItersLimit.Get(&cfg.Settings.SV)),\n\t)\n\tconcurrentRangefeedItersLimit.SetOnChange(&cfg.Settings.SV, func() {\n\t\ts.limiters.ConcurrentRangefeedIters.SetLimit(\n\t\t\tint(concurrentRangefeedItersLimit.Get(&cfg.Settings.SV)))\n\t})\n\n\ts.tenantRateLimiters = tenantrate.NewLimiterFactory(cfg.Settings, &cfg.TestingKnobs.TenantRateKnobs)\n\ts.metrics.registry.AddMetricStruct(s.tenantRateLimiters.Metrics())\n\n\ts.systemConfigUpdateQueueRateLimiter = quotapool.NewRateLimiter(\n\t\t\"SystemConfigUpdateQueue\",\n\t\tquotapool.Limit(queueAdditionOnSystemConfigUpdateRate.Get(&cfg.Settings.SV)),\n\t\tqueueAdditionOnSystemConfigUpdateBurst.Get(&cfg.Settings.SV))\n\tupdateSystemConfigUpdateQueueLimits := func() {\n\t\ts.systemConfigUpdateQueueRateLimiter.UpdateLimit(\n\t\t\tquotapool.Limit(queueAdditionOnSystemConfigUpdateRate.Get(&cfg.Settings.SV)),\n\t\t\tqueueAdditionOnSystemConfigUpdateBurst.Get(&cfg.Settings.SV))\n\t}\n\tqueueAdditionOnSystemConfigUpdateRate.SetOnChange(&cfg.Settings.SV,\n\t\tupdateSystemConfigUpdateQueueLimits)\n\tqueueAdditionOnSystemConfigUpdateBurst.SetOnChange(&cfg.Settings.SV,\n\t\tupdateSystemConfigUpdateQueueLimits)\n\n\tif s.cfg.Gossip != nil {\n\t\t// Add range scanner and configure with queues.\n\t\ts.scanner = newReplicaScanner(\n\t\t\ts.cfg.AmbientCtx, s.cfg.Clock, cfg.ScanInterval,\n\t\t\tcfg.ScanMinIdleTime, cfg.ScanMaxIdleTime, newStoreReplicaVisitor(s),\n\t\t)\n\t\ts.gcQueue = newGCQueue(s, s.cfg.Gossip)\n\t\ts.mergeQueue = newMergeQueue(s, s.db, s.cfg.Gossip)\n\t\ts.splitQueue = newSplitQueue(s, s.db, s.cfg.Gossip)\n\t\ts.replicateQueue = newReplicateQueue(s, s.cfg.Gossip, s.allocator)\n\t\ts.replicaGCQueue = newReplicaGCQueue(s, s.db, s.cfg.Gossip)\n\t\ts.raftLogQueue = newRaftLogQueue(s, s.db, s.cfg.Gossip)\n\t\ts.raftSnapshotQueue = newRaftSnapshotQueue(s, s.cfg.Gossip)\n\t\ts.consistencyQueue = newConsistencyQueue(s, s.cfg.Gossip)\n\t\t// NOTE: If more queue types are added, please also add them to the list of\n\t\t// queues on the EnqueueRange debug page as defined in\n\t\t// pkg/ui/src/views/reports/containers/enqueueRange/index.tsx\n\t\ts.scanner.AddQueues(\n\t\t\ts.gcQueue, s.mergeQueue, s.splitQueue, s.replicateQueue, s.replicaGCQueue,\n\t\t\ts.raftLogQueue, s.raftSnapshotQueue, s.consistencyQueue)\n\t\ttsDS := s.cfg.TimeSeriesDataStore\n\t\tif s.cfg.TestingKnobs.TimeSeriesDataStore != nil {\n\t\t\ttsDS = s.cfg.TestingKnobs.TimeSeriesDataStore\n\t\t}\n\t\tif tsDS != nil {\n\t\t\ts.tsMaintenanceQueue = newTimeSeriesMaintenanceQueue(\n\t\t\t\ts, s.db, s.cfg.Gossip, tsDS,\n\t\t\t)\n\t\t\ts.scanner.AddQueues(s.tsMaintenanceQueue)\n\t\t}\n\t}\n\n\tif cfg.TestingKnobs.DisableGCQueue {\n\t\ts.setGCQueueActive(false)\n\t}\n\tif cfg.TestingKnobs.DisableMergeQueue {\n\t\ts.setMergeQueueActive(false)\n\t}\n\tif cfg.TestingKnobs.DisableRaftLogQueue {\n\t\ts.setRaftLogQueueActive(false)\n\t}\n\tif cfg.TestingKnobs.DisableReplicaGCQueue {\n\t\ts.setReplicaGCQueueActive(false)\n\t}\n\tif cfg.TestingKnobs.DisableReplicateQueue {\n\t\ts.SetReplicateQueueActive(false)\n\t}\n\tif cfg.TestingKnobs.DisableSplitQueue {\n\t\ts.setSplitQueueActive(false)\n\t}\n\tif cfg.TestingKnobs.DisableTimeSeriesMaintenanceQueue {\n\t\ts.setTimeSeriesMaintenanceQueueActive(false)\n\t}\n\tif cfg.TestingKnobs.DisableRaftSnapshotQueue {\n\t\ts.setRaftSnapshotQueueActive(false)\n\t}\n\tif cfg.TestingKnobs.DisableConsistencyQueue {\n\t\ts.setConsistencyQueueActive(false)\n\t}\n\tif cfg.TestingKnobs.DisableScanner {\n\t\ts.setScannerActive(false)\n\t}\n\n\treturn s\n}", "func newStore(config *Config) *store {\n\treturn &store{\n\t\thashMap: hash.NewUnsafeHash(config.Capacity),\n\t\texpireHeap: newTimeHeap(config.Capacity),\n\t\texpireTimer: new(refreshTimer),\n\t}\n}", "func NewBlockchain(config Config, params params.ChainParams, db blockdb.DB, ip state.InitializationParameters) (Blockchain, error) {\n\tstate, err := NewStateService(config.Log, ip, params, db)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tvar genesisTime time.Time\n\n\terr = db.Update(func(tx blockdb.DBUpdateTransaction) error {\n\t\tgenesisTime, err = tx.GetGenesisTime()\n\t\tif err != nil {\n\t\t\tconfig.Log.Infof(\"using genesis time %d from params\", ip.GenesisTime.Unix())\n\t\t\tgenesisTime = ip.GenesisTime\n\t\t\tif err := tx.SetGenesisTime(ip.GenesisTime); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t} else {\n\t\t\tconfig.Log.Infof(\"using genesis time %d from db\", genesisTime.Unix())\n\t\t}\n\t\treturn nil\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\ttxidx, err := txindex.NewTxIndex(config.Datadir)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tch := &blockchain{\n\t\tlog: config.Log,\n\t\tconfig: config,\n\t\tparams: params,\n\t\ttxidx: txidx,\n\t\tdb: db,\n\t\tstate: state,\n\t\tnotifees: make(map[BlockchainNotifee]struct{}),\n\t\tgenesisTime: genesisTime,\n\t}\n\treturn ch, db.Update(func(txn blockdb.DBUpdateTransaction) error {\n\t\treturn ch.UpdateChainHead(txn, state.Tip().Hash)\n\t})\n}", "func newBlock(lastBlock Block, seed int, npeer string, transactions []SignedTransaction) Block {\n\tvar newBlock Block\n\n\tnewBlock.Seed = seed\n\tnewBlock.Index = lastBlock.Index + 1\n\tnewBlock.LastHash = lastBlock.Hash\n\tnewBlock.Peer = npeer\n\tnewBlock.SpecialAccounts = lastBlock.SpecialAccounts\n\tnewBlock.Transactions = transactions\n\tnewBlock.Hash = blockHash(newBlock)\n\treturn newBlock\n}", "func NewBlock(oldBlock Block, data string) Block {\n\t// fmt.Println(\"******TODO: IMPLEMENT NewBlock!******\")\n\tblock := Block{Data: data, Timestamp: time.Now().Unix(), PrevHash: oldBlock.Hash, Hash: []byte{}}\n\tblock.Hash = block.calculateHash()\n\t// fmt.Println(\"data: \" + block.Data)\n\t// fmt.Printf(\"timestamp: %d\", block.Timestamp)\n\t// fmt.Println()\n\t// fmt.Printf(\"preHash: %x\", block.PrevHash)\n\t// fmt.Println()\n\t// fmt.Printf(\"currentHash: %x\", block.Hash)\n\t// fmt.Println()\n\t// fmt.Println(\"******TODO: END NewBlock!******\")\n\t// fmt.Println()\n\t// fmt.Println()\n\t// fmt.Println()\n\treturn block\n}", "func TestHash(t *testing.T) {\n\t// Hash of block 234439.\n\tblockHashStr := \"14a0810ac680a3eb3f82edc878cea25ec41d6b790744e5daeef\"\n\tblockHash, err := NewHashFromStr(blockHashStr)\n\tassert.NoError(t, err)\n\n\t// Hash of block 234440 as byte slice.\n\tbuf := []byte{\n\t\t0x79, 0xa6, 0x1a, 0xdb, 0xc6, 0xe5, 0xa2, 0xe1,\n\t\t0x39, 0xd2, 0x71, 0x3a, 0x54, 0x6e, 0xc7, 0xc8,\n\t\t0x75, 0x63, 0x2e, 0x75, 0xf1, 0xdf, 0x9c, 0x3f,\n\t\t0xa6, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,\n\t}\n\n\thash, err := NewHash(buf)\n\tassert.NoError(t, err)\n\n\t// Ensure proper size.\n\tassert.Equal(t, HashSize, len(hash))\n\n\t// Ensure contents match.\n\tassert.Equal(t, buf[:], hash[:])\n\n\t// Ensure contents of hash of block 234440 don't match 234439.\n\tassert.False(t, hash.IsEqual(blockHash))\n\n\t// Set hash from byte slice and ensure contents match.\n\terr = hash.SetBytes(blockHash.CloneBytes())\n\tassert.NoError(t, err)\n\tassert.True(t, hash.IsEqual(blockHash))\n\n\t// Ensure nil hashes are handled properly.\n\tassert.True(t, (*Hash)(nil).IsEqual(nil))\n\n\tassert.False(t, hash.IsEqual(nil))\n\n\t// Invalid size for SetBytes.\n\terr = hash.SetBytes([]byte{0x00})\n\tassert.NotNil(t, err)\n\n\t// Invalid size for NewHash.\n\tinvalidHash := make([]byte, HashSize+1)\n\t_, err = NewHash(invalidHash)\n\tassert.NotNil(t, err)\n\n\tif err == nil {\n\t\tt.Errorf(\"NewHash: failed to received expected err - got: nil\")\n\t}\n}", "func newStore(c *Config) (*Store, error) {\n\tif c == nil {\n\t\tc = defaultConfig()\n\t}\n\tmutex := &sync.RWMutex{}\n\tstore := new(Store)\n\tstartTime := time.Now().UTC()\n\tfileWatcher, err := newWatcher(\".\")\n\tif err != nil {\n\t\tlog.Info(fmt.Sprintf(\"unable to init file watcher: %v\", err))\n\t}\n\tif c.Monitoring {\n\t\tmonitoring.Init()\n\t}\n\tstore.fileWatcher = fileWatcher\n\tstore.store = makeStorage(\"\")\n\tstore.keys = []string{}\n\tstore.compression = c.Compression\n\tstore.dbs = make(map[string]*DB)\n\tstore.lock = mutex\n\tstore.stat = new(stats.Statistics)\n\tstore.stat.Start = startTime\n\tstore.indexes = make(map[string]*index)\n\tc.setMissedValues()\n\tstore.config = c\n\tif c.LoadPath != \"\" {\n\t\terrLoad := loadData(store, c.LoadPath)\n\t\tif errLoad != nil {\n\t\t\treturn nil, fmt.Errorf(\"unable to load data: %v\", errLoad)\n\t\t}\n\t}\n\tstore.writer, err = newWriter(c.LoadPath)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"unable to create writer: %v\", err)\n\t}\n\treturn store, nil\n}", "func NewStore(storeType string) Store {\n\tns := Store{}\n\tns.Name = \"Shop with no Sign\"\n\towner := character.NewCharacter(\"\", \"\", \"\")\n\tns.Owner = owner.Name\n\tns.Location = \"Heldheim\"\n\tns.StoreType = storeType\n\t//ns.Inventory = generateInventoryForStore()\n\tcp := 0\n\tss := 0\n\tgc := 0\n\tns.Money = NewMoney(cp, ss, gc)\n\n\treturn ns\n}", "func NewBlockHeaderFeederSpec(spec *job.BlockHeaderFeederSpec) *BlockHeaderFeederSpec {\n\treturn &BlockHeaderFeederSpec{\n\t\tCoordinatorV1Address: spec.CoordinatorV1Address,\n\t\tCoordinatorV2Address: spec.CoordinatorV2Address,\n\t\tCoordinatorV2PlusAddress: spec.CoordinatorV2PlusAddress,\n\t\tWaitBlocks: spec.WaitBlocks,\n\t\tLookbackBlocks: spec.LookbackBlocks,\n\t\tBlockhashStoreAddress: spec.BlockhashStoreAddress,\n\t\tBatchBlockhashStoreAddress: spec.BatchBlockhashStoreAddress,\n\t\tPollPeriod: spec.PollPeriod,\n\t\tRunTimeout: spec.RunTimeout,\n\t\tEVMChainID: spec.EVMChainID,\n\t\tFromAddresses: spec.FromAddresses,\n\t\tGetBlockhashesBatchSize: spec.GetBlockhashesBatchSize,\n\t\tStoreBlockhashesBatchSize: spec.StoreBlockhashesBatchSize,\n\t}\n}", "func NewDB(sugar *zap.SugaredLogger, db *sqlx.DB, options ...Option) (*BlockInfoStorage, error) {\n\tconst schemaFMT = `\n\tCREATE TABLE IF NOT EXISTS %[1]s\n(\n\tblock bigint NOT NULL,\n\ttime timestamp NOT NULL,\n\tCONSTRAINT %[1]s_pk PRIMARY KEY(block)\n) ;\nCREATE INDEX IF NOT EXISTS %[1]s_time_idx ON %[1]s (time);\n`\n\tvar (\n\t\tlogger = sugar.With(\"func\", caller.GetCurrentFunctionName())\n\t\ttableNames = map[string]string{blockInfoTable: blockInfoTable}\n\t)\n\ths := &BlockInfoStorage{\n\t\tsugar: sugar,\n\t\tdb: db,\n\t\ttableNames: tableNames,\n\t}\n\tfor _, opt := range options {\n\t\tif err := opt(hs); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\tquery := fmt.Sprintf(schemaFMT, hs.tableNames[blockInfoTable])\n\tlogger.Debugw(\"initializing database schema\", \"query\", query)\n\tif _, err := hs.db.Exec(query); err != nil {\n\t\treturn nil, err\n\t}\n\tlogger.Debug(\"database schema initialized successfully\")\n\treturn hs, nil\n}", "func NewBlockStorage(ctx context.Context, db Database) *BlockStorage {\n\treturn &BlockStorage{\n\t\tdb: db,\n\t}\n}", "func NewBlock(data string, transactions []*Tx, prevBlockHash []byte) *Block {\n\tblock := &Block{\n\t\tIdentifier: internal.GenerateID(),\n\t\tData: []byte(data),\n\t\tTransactions: transactions,\n\t\tPrevBlockHash: prevBlockHash,\n\t\tTimestamp: time.Now().Unix(),\n\t}\n\n\tpow := NewPow(block)\n\tnonce, hash := pow.Run()\n\n\tblock.Hash = hash[:]\n\tblock.Nonce = nonce\n\treturn block\n}", "func NewStore(ctx context.Context, l log.Logger, db *sqlx.DB, beaconName string) (*Store, error) {\n\tp := Store{\n\t\tlog: l,\n\t\tdb: db,\n\n\t\trequiresPrevious: chain.PreviousRequiredFromContext(ctx),\n\t}\n\n\tid, err := p.AddBeaconID(ctx, beaconName)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tp.beaconID = id\n\n\treturn &p, nil\n}", "func NewBlockChain() *Blockchain {\n\tvar tip []byte\n\t// Set up connection to db\n\tbbto := gorocksdb.NewDefaultBlockBasedTableOptions()\n\tbbto.SetBlockCache(gorocksdb.NewLRUCache(3 << 30))\n\topts := gorocksdb.NewDefaultOptions()\n\topts.SetBlockBasedTableFactory(bbto)\n\topts.SetCreateIfMissing(true)\n\tshouldCreateGenesisBlock := false\n\t/* If no existing blockchain create genesis block */\n\tif _, err := os.Stat(dbFile); os.IsNotExist(err) {\n\t\tshouldCreateGenesisBlock = true\n\t}\n\tdb, err := gorocksdb.OpenDb(opts, dbFile)\n\tif err != nil {\n\t\tlog.Panic(\"Could not connect to database:\", err)\n\t}\n\tif shouldCreateGenesisBlock == true {\n\t\tfmt.Println(\"No existing blockchain found. Creating a new one...\")\n\t\t// Generate new block\n\t\tgenesis := NewGenesisBlock()\n\t\t// serialise the genesis block\n\t\tserialisedGenesis, err := proto.Marshal(genesis)\n\t\tif err != nil {\n\t\t\tlog.Fatalln(\"Could not serialise genesis block:\", err)\n\t\t}\n\t\t// write to db\n\t\two := gorocksdb.NewDefaultWriteOptions()\n\t\terr = db.Put(wo, genesis.Hash, serialisedGenesis) // save block\n\t\tif err != nil {\n\t\t\tlog.Fatalln(\"Could not store genesis block:\", err)\n\t\t}\n\t\terr = db.Put(wo, []byte(\"l\"), genesis.Hash) // last block of the hash chain\n\t\tif err != nil {\n\t\t\tlog.Fatalln(\"Could not store last hash for genesis block:\", err)\n\t\t}\n\t\ttip = genesis.Hash\n\t} else {\n\t\t// Get existing tip\n\t\tro := gorocksdb.NewDefaultReadOptions()\n\t\ttipRaw, err := db.Get(ro, []byte(\"l\"))\n\t\tdefer tipRaw.Free()\n\t\ttip = tipRaw.Data()\n\t\tfmt.Println(\"Using existing blockchain\")\n\t\tif err != nil {\n\t\t\tlog.Fatalln(\"Could not get tip hash:\", err)\n\t\t}\n\t}\n\treturn &Blockchain{tip, db}\n}", "func dbStoreBlockNode(dbTx database.Tx, node *blockNode) error {\n\t// Serialize block data to be stored.\n\tsize := blockHdrSize + 1\n\tif node.status&statusFirstInRound == statusFirstInRound {\n\t\tsize += roundHdrDeltaSize\n\t}\n\tw := bytes.NewBuffer(make([]byte, 0, size))\n\theader := node.Header()\n\terr := header.Serialize(w)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\terr = w.WriteByte(byte(node.status))\n\tif err != nil {\n\t\treturn err\n\t}\n\tif node.status&statusFirstInRound == statusFirstInRound {\n\t\terr = node.round.Serialize(w)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\tvalue := w.Bytes()\n\n\t// Write block header data to block index bucket.\n\tblockIndexBucket := dbTx.Metadata().Bucket(blockIndexBucketName)\n\tkey := blockIndexKey(&node.hash, uint32(node.height))\n\n\treturn blockIndexBucket.Put(key, value)\n}", "func NewBlock(data string, prevBlockHash []byte) *Block {\n\tblock := &Block{time.Now().Unix(), []byte(data), prevBlockHash, []byte{}, 0}\n\tpow := NewProofOfWork(block)\n\n\tnonce, hash := pow.Run()\n\n\tblock.Hash = hash[:]\n\tblock.Nonce = nonce\n\n\treturn block\n}", "func New(rootPath string) (*VolumeStore, error) {\n\tvs := &VolumeStore{\n\t\tlocks: &locker.Locker{},\n\t\tnames: make(map[string]volume.Volume),\n\t\trefs: make(map[string]map[string]struct{}),\n\t\tlabels: make(map[string]map[string]string),\n\t\toptions: make(map[string]map[string]string),\n\t}\n\n\tif rootPath != \"\" {\n\t\t// initialize metadata store\n\t\tvolPath := filepath.Join(rootPath, volumeDataDir)\n\t\tif err := os.MkdirAll(volPath, 750); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tdbPath := filepath.Join(volPath, \"metadata.db\")\n\n\t\tvar err error\n\t\tvs.db, err = bolt.Open(dbPath, 0600, &bolt.Options{Timeout: 1 * time.Second})\n\t\tif err != nil {\n\t\t\treturn nil, errors.Wrap(err, \"error while opening volume store metadata database\")\n\t\t}\n\n\t\t// initialize volumes bucket\n\t\tif err := vs.db.Update(func(tx *bolt.Tx) error {\n\t\t\tif _, err := tx.CreateBucketIfNotExists(volumeBucketName); err != nil {\n\t\t\t\treturn errors.Wrap(err, \"error while setting up volume store metadata database\")\n\t\t\t}\n\t\t\treturn nil\n\t\t}); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\tvs.restore()\n\n\treturn vs, nil\n}", "func NewBlockHeader(version uint32, prevBlockHash, merkleRootHash *chain.Hash, bits uint32) {\n return &BlockHeader{\n Version: version,\n\t PrevBlock: *prevBlockHash,\n\t MerkleRoot: *merkleRootHash,\n\t Timestamp: time.Unix(time.Now().Unix(), 0),\n\t Bits: bits,\n }\n}", "func TestAttrStore_Blocks(t *testing.T) {\n\ts := MustOpenAttrStore()\n\tdefer s.Close()\n\n\t// Set attributes.\n\tif err := s.SetAttrs(1, map[string]interface{}{\"A\": uint64(100)}); err != nil {\n\t\tt.Fatal(err)\n\t} else if err := s.SetAttrs(2, map[string]interface{}{\"A\": uint64(200)}); err != nil {\n\t\tt.Fatal(err)\n\t} else if err := s.SetAttrs(100, map[string]interface{}{\"B\": \"VALUE\"}); err != nil {\n\t\tt.Fatal(err)\n\t} else if err := s.SetAttrs(350, map[string]interface{}{\"C\": \"FOO\"}); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\t// Retrieve blocks.\n\tblks0, err := s.Blocks()\n\tif err != nil {\n\t\tt.Fatal(err)\n\t} else if len(blks0) != 3 || blks0[0].ID != 0 || blks0[1].ID != 1 || blks0[2].ID != 3 {\n\t\tt.Fatalf(\"unexpected blocks: %#v\", blks0)\n\t}\n\n\t// Change second block.\n\tif err := s.SetAttrs(100, map[string]interface{}{\"X\": 12}); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\t// Ensure second block changed.\n\tblks1, err := s.Blocks()\n\tif err != nil {\n\t\tt.Fatal(err)\n\t} else if !reflect.DeepEqual(blks0[0], blks1[0]) {\n\t\tt.Fatalf(\"block 0 mismatch: %#v != %#v\", blks0[0], blks1[0])\n\t} else if reflect.DeepEqual(blks0[1], blks1[1]) {\n\t\tt.Fatalf(\"block 1 match: %#v \", blks0[0])\n\t} else if !reflect.DeepEqual(blks0[2], blks1[2]) {\n\t\tt.Fatalf(\"block 2 mismatch: %#v != %#v\", blks0[2], blks1[2])\n\t}\n}", "func New(cfg *config.Config, hub *component.ComponentHub) (*SimpleBlockFactory, error) {\n\tconsensus.InitBlockInterval(cfg.Consensus.BlockInterval)\n\n\ts := &SimpleBlockFactory{\n\t\tComponentHub: hub,\n\t\tjobQueue: make(chan interface{}, slotQueueMax),\n\t\tblockInterval: consensus.BlockInterval,\n\t\tmaxBlockBodySize: chain.MaxBlockBodySize(),\n\t\tquit: make(chan interface{}),\n\t}\n\n\ts.txOp = chain.NewCompTxOp(\n\t\tchain.TxOpFn(func(txIn *types.Tx) (*types.BlockState, error) {\n\t\t\tselect {\n\t\t\tcase <-s.quit:\n\t\t\t\treturn nil, chain.ErrQuit\n\t\t\tdefault:\n\t\t\t\treturn nil, nil\n\t\t\t}\n\t\t}),\n\t)\n\n\treturn s, nil\n}", "func NewGetBlockHashCmd(index int64) *GetBlockHashCmd {\n\treturn &GetBlockHashCmd{\n\t\tIndex: index,\n\t}\n}", "func NewStore() *Store {\n\treturn &Store{commands: make(map[string]*Config, 0)}\n}", "func NewBlockManager(ntmgr notify.Notify, indexManager blockchain.IndexManager, db database.DB,\n\ttimeSource blockchain.MedianTimeSource, sigCache *txscript.SigCache,\n\tcfg *config.Config, par *params.Params,\n\tinterrupt <-chan struct{}, events *event.Feed, peerServer *p2p.Service) (*BlockManager, error) {\n\tbm := BlockManager{\n\t\tconfig: cfg,\n\t\tparams: par,\n\t\tnotify: ntmgr,\n\t\tprogressLogger: progresslog.NewBlockProgressLogger(\"Processed\", log),\n\t\tmsgChan: make(chan interface{}, cfg.MaxPeers*3),\n\t\theaderList: list.New(),\n\t\tquit: make(chan struct{}),\n\t\tpeerServer: peerServer,\n\t}\n\n\t// Create a new block chain instance with the appropriate configuration.\n\tvar err error\n\tbm.chain, err = blockchain.New(&blockchain.Config{\n\t\tDB: db,\n\t\tInterrupt: interrupt,\n\t\tChainParams: par,\n\t\tTimeSource: timeSource,\n\t\tEvents: events,\n\t\tSigCache: sigCache,\n\t\tIndexManager: indexManager,\n\t\tDAGType: cfg.DAGType,\n\t\tCacheInvalidTx: cfg.CacheInvalidTx,\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tbest := bm.chain.BestSnapshot()\n\tbm.chain.DisableCheckpoints(cfg.DisableCheckpoints)\n\tif !cfg.DisableCheckpoints {\n\t\t// Initialize the next checkpoint based on the current height.\n\t\tbm.nextCheckpoint = bm.findNextHeaderCheckpoint(uint64(best.GraphState.GetMainHeight()))\n\t\tif bm.nextCheckpoint != nil {\n\t\t\tbm.resetHeaderState(&best.Hash, uint64(best.GraphState.GetMainHeight()))\n\t\t}\n\t} else {\n\t\tlog.Info(\"Checkpoints are disabled\")\n\t}\n\n\tif cfg.DumpBlockchain != \"\" {\n\t\terr = bm.chain.DumpBlockChain(cfg.DumpBlockchain, par, uint64(best.GraphState.GetTotal())-1)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\treturn nil, fmt.Errorf(\"closing after dumping blockchain\")\n\t}\n\n\tbm.zmqNotify = zmq.NewZMQNotification(cfg)\n\n\tbm.chain.Subscribe(bm.handleNotifyMsg)\n\treturn &bm, nil\n}", "func NewStore(config Config) *Store {\n\terr := os.MkdirAll(config.RootDir, os.FileMode(0700))\n\tif err != nil {\n\t\tlogger.Fatal(err)\n\t}\n\torm := models.NewORM(config.RootDir)\n\tethrpc, err := rpc.Dial(config.EthereumURL)\n\tif err != nil {\n\t\tlogger.Fatal(err)\n\t}\n\tkeyStore := NewKeyStore(config.KeysDir())\n\n\tht, err := NewHeadTracker(orm)\n\tif err != nil {\n\t\tlogger.Fatal(err)\n\t}\n\n\tstore := &Store{\n\t\tORM: orm,\n\t\tConfig: config,\n\t\tKeyStore: keyStore,\n\t\tExiter: os.Exit,\n\t\tClock: Clock{},\n\t\tHeadTracker: ht,\n\t\tTxManager: &TxManager{\n\t\t\tConfig: config,\n\t\t\tEthClient: &EthClient{ethrpc},\n\t\t\tKeyStore: keyStore,\n\t\t\tORM: orm,\n\t\t},\n\t}\n\treturn store\n}", "func NewBlock(chain uint64, producer Address) *StBlock {\n\tvar hashPowerLimit uint64\n\tvar blockInterval uint64\n\tvar pStat BaseInfo\n\tout := new(StBlock)\n\tgetDataFormDB(chain, dbStat{}, []byte{StatBaseInfo}, &pStat)\n\tgetDataFormDB(chain, dbStat{}, []byte{StatHashPower}, &hashPowerLimit)\n\tgetDataFormDB(chain, dbStat{}, []byte{StatBlockInterval}, &blockInterval)\n\n\tif pStat.ID == 0 {\n\t\tlog.Println(\"fail to get the last block. chain:\", chain)\n\t\treturn nil\n\t}\n\n\thashPowerLimit = hashPowerLimit / 1000\n\tif hashPowerLimit < minHPLimit {\n\t\thashPowerLimit = minHPLimit\n\t}\n\n\tout.HashpowerLimit = hashPowerLimit\n\n\tif pStat.ID == 1 && chain > 1 {\n\t\tpStat.Time = pStat.Time + blockSyncMax + blockSyncMin + TimeSecond\n\t} else {\n\t\tpStat.Time += blockInterval\n\t}\n\n\tout.Previous = pStat.Key\n\tout.Producer = producer\n\tout.Time = pStat.Time\n\n\tout.Chain = chain\n\tout.Index = pStat.ID + 1\n\n\tif pStat.Chain > 1 {\n\t\tvar key Hash\n\t\tvar tmp BlockInfo\n\t\tgetDataFormLog(chain/2, logBlockInfo{}, runtime.Encode(pStat.ParentID+1), &key)\n\t\tgetDataFormLog(chain/2, logBlockInfo{}, key[:], &tmp)\n\t\tif out.Index != 2 && !key.Empty() && out.Time > tmp.Time && out.Time-tmp.Time > blockSyncMin {\n\t\t\tvar key2 Hash\n\t\t\tgetDataFormLog(chain/2, logBlockInfo{}, runtime.Encode(pStat.ParentID+2), &key2)\n\t\t\tgetDataFormLog(chain/2, logBlockInfo{}, key2[:], &tmp)\n\t\t\tif !key2.Empty() && out.Time > tmp.Time && out.Time-tmp.Time > blockSyncMin {\n\t\t\t\tout.Parent = key2\n\t\t\t} else {\n\t\t\t\tout.Parent = key\n\t\t\t}\n\t\t\t// assert(out.Time-tmp.Time <= blockSyncMax)\n\t\t} else {\n\t\t\tgetDataFormLog(chain/2, logBlockInfo{}, runtime.Encode(pStat.ParentID), &key)\n\t\t\tout.Parent = key\n\t\t}\n\t}\n\tif pStat.LeftChildID > 0 {\n\t\tvar key Hash\n\t\tvar tmp BlockInfo\n\t\tgetDataFormLog(2*chain, logBlockInfo{}, runtime.Encode(pStat.LeftChildID+1), &key)\n\t\tgetDataFormLog(2*chain, logBlockInfo{}, key[:], &tmp)\n\t\tif !key.Empty() && out.Time > tmp.Time && out.Time-tmp.Time > blockSyncMin {\n\t\t\tvar key2 Hash\n\t\t\tgetDataFormLog(2*chain, logBlockInfo{}, runtime.Encode(pStat.LeftChildID+2), &key2)\n\t\t\tgetDataFormLog(2*chain, logBlockInfo{}, key2[:], &tmp)\n\t\t\tif !key2.Empty() && out.Time > tmp.Time && out.Time-tmp.Time > blockSyncMin {\n\t\t\t\tout.LeftChild = key2\n\t\t\t} else {\n\t\t\t\tout.LeftChild = key\n\t\t\t}\n\t\t\t// assert(out.Time-tmp.Time <= blockSyncMax)\n\t\t} else if pStat.LeftChildID == 1 {\n\t\t\tgetDataFormLog(chain, logBlockInfo{}, runtime.Encode(pStat.LeftChildID), &key)\n\t\t\tout.LeftChild = key\n\t\t} else {\n\t\t\tgetDataFormLog(2*chain, logBlockInfo{}, runtime.Encode(pStat.LeftChildID), &key)\n\t\t\tout.LeftChild = key\n\t\t}\n\t}\n\tif pStat.RightChildID > 0 {\n\t\tvar key Hash\n\t\tvar tmp BlockInfo\n\t\tgetDataFormLog(2*chain+1, logBlockInfo{}, runtime.Encode(pStat.RightChildID+1), &key)\n\t\tgetDataFormLog(2*chain+1, logBlockInfo{}, key[:], &tmp)\n\t\tif !key.Empty() && out.Time > tmp.Time && out.Time-tmp.Time > blockSyncMin {\n\t\t\tvar key2 Hash\n\t\t\tgetDataFormLog(2*chain+1, logBlockInfo{}, runtime.Encode(pStat.RightChildID+2), &key2)\n\t\t\tgetDataFormLog(2*chain+1, logBlockInfo{}, key2[:], &tmp)\n\t\t\tif !key2.Empty() && out.Time > tmp.Time && out.Time-tmp.Time > blockSyncMin {\n\t\t\t\tout.RightChild = key2\n\t\t\t} else {\n\t\t\t\tout.RightChild = key\n\t\t\t}\n\t\t\t// assert(out.Time-tmp.Time <= blockSyncMax)\n\t\t} else if pStat.RightChildID == 1 {\n\t\t\tgetDataFormLog(chain, logBlockInfo{}, runtime.Encode(pStat.RightChildID), &key)\n\t\t\tout.RightChild = key\n\t\t} else {\n\t\t\tgetDataFormLog(2*chain+1, logBlockInfo{}, runtime.Encode(pStat.RightChildID), &key)\n\t\t\tout.RightChild = key\n\t\t}\n\t}\n\n\treturn out\n}", "func BlockHashIn(vs ...string) predicate.Block {\n\tv := make([]interface{}, len(vs))\n\tfor i := range v {\n\t\tv[i] = vs[i]\n\t}\n\treturn predicate.Block(func(s *sql.Selector) {\n\t\ts.Where(sql.In(s.C(FieldBlockHash), v...))\n\t})\n}", "func NewBlock(index idx.Block, time Timestamp, events hash.Events, prevHash hash.Event) *Block {\n\treturn &Block{\n\t\tIndex: index,\n\t\tTime: time,\n\t\tEvents: events,\n\t\tPrevHash: prevHash,\n\t\tSkippedTxs: make([]uint, 0),\n\t}\n}", "func NewBlock(\n\tblockStart xtime.UnixNano,\n\tmd namespace.Metadata,\n\tblockOpts BlockOptions,\n\tnamespaceRuntimeOptsMgr namespace.RuntimeOptionsManager,\n\topts Options,\n) (Block, error) {\n\tblockSize := md.Options().IndexOptions().BlockSize()\n\tiopts := opts.InstrumentOptions()\n\tscope := iopts.MetricsScope().SubScope(\"index\").SubScope(\"block\")\n\tiopts = iopts.SetMetricsScope(scope)\n\n\tcpus := int(math.Max(1, math.Ceil(0.25*float64(runtime.GOMAXPROCS(0)))))\n\tcachedSearchesWorkers := xsync.NewWorkerPool(cpus)\n\tcachedSearchesWorkers.Init()\n\n\tsegs := newMutableSegments(\n\t\tmd,\n\t\tblockStart,\n\t\topts,\n\t\tblockOpts,\n\t\tcachedSearchesWorkers,\n\t\tnamespaceRuntimeOptsMgr,\n\t\tiopts,\n\t)\n\n\tcoldSegs := newMutableSegments(\n\t\tmd,\n\t\tblockStart,\n\t\topts,\n\t\tblockOpts,\n\t\tcachedSearchesWorkers,\n\t\tnamespaceRuntimeOptsMgr,\n\t\tiopts,\n\t)\n\n\t// NB(bodu): The length of coldMutableSegments is always at least 1.\n\tcoldMutableSegments := []*mutableSegments{coldSegs}\n\tb := &block{\n\t\tstate: blockStateOpen,\n\t\tblockStart: blockStart,\n\t\tblockEnd: blockStart.Add(blockSize),\n\t\tblockSize: blockSize,\n\t\tblockOpts: blockOpts,\n\t\tcachedSearchesWorkers: cachedSearchesWorkers,\n\t\tmutableSegments: segs,\n\t\tcoldMutableSegments: coldMutableSegments,\n\t\tshardRangesSegmentsByVolumeType: make(shardRangesSegmentsByVolumeType),\n\t\topts: opts,\n\t\tiopts: iopts,\n\t\tnsMD: md,\n\t\tnamespaceRuntimeOptsMgr: namespaceRuntimeOptsMgr,\n\t\tmetrics: newBlockMetrics(scope),\n\t\tlogger: iopts.Logger(),\n\t\tfetchDocsLimit: opts.QueryLimits().FetchDocsLimit(),\n\t\taggDocsLimit: opts.QueryLimits().AggregateDocsLimit(),\n\t}\n\tb.newFieldsAndTermsIteratorFn = newFieldsAndTermsIterator\n\tb.newExecutorWithRLockFn = b.executorWithRLock\n\tb.addAggregateResultsFn = b.addAggregateResults\n\n\treturn b, nil\n}", "func Open(path string, opts *Options) (*Blockstore, error) {\n\tvar err error\n\tvar db *leveldb.DB\n\n\tif path == \"\" {\n\t\tdb, err = leveldb.Open(storage.NewMemStorage(), &opts.Options)\n\t} else {\n\t\tdb, err = leveldb.OpenFile(path, &opts.Options)\n\t\tif errors.IsCorrupted(err) && !opts.GetReadOnly() {\n\t\t\tlog.Warnf(\"leveldb blockstore appears corrupted; recovering\")\n\t\t\tdb, err = leveldb.RecoverFile(path, &opts.Options)\n\t\t}\n\t}\n\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to open leveldb blockstore: %w\", err)\n\t}\n\n\tbs := &Blockstore{\n\t\tDB: db,\n\t\tclosing: make(chan struct{}),\n\t}\n\n\tctx := context.Background()\n\tif opts.Name != \"\" {\n\t\tvar err error\n\t\tctx, err = tag.New(context.Background(), tag.Insert(TagName, opts.Name))\n\t\tif err != nil {\n\t\t\tlog.Warnf(\"failed to instantiate metrics tag; metrics will be untagged; err: %s\", err)\n\t\t}\n\t}\n\n\tbs.wg.Add(1)\n\tgo bs.recordMetrics(ctx, MetricsFrequency)\n\n\treturn bs, nil\n}", "func newBlockchain(opts ...emulator.Option) *emulator.Blockchain {\n\tb, err := emulator.NewBlockchain(\n\t\tappend(\n\t\t\t[]emulator.Option{\n\t\t\t\temulator.WithStorageLimitEnabled(false),\n\t\t\t},\n\t\t\topts...,\n\t\t)...,\n\t)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\treturn b\n}", "func newBlockchain(opts ...emulator.Option) *emulator.Blockchain {\n\tb, err := emulator.NewBlockchain(\n\t\tappend(\n\t\t\t[]emulator.Option{\n\t\t\t\temulator.WithStorageLimitEnabled(false),\n\t\t\t},\n\t\t\topts...,\n\t\t)...,\n\t)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\treturn b\n}", "func NewBlock(data string, prevBlockHash []byte) *Block {\n\tblock := &Block{\n\t\tTimestamp: time.Now().Unix(),\n\t\tData: []byte(data),\n\t\tPrevBlockHash: prevBlockHash,\n\t\tHash: []byte{},\n\t}\n\tpow := NewProofOfWork(block)\n\tnonce, hash := pow.Run()\n\n\tblock.Hash = hash[:]\n\tblock.Nonce = nonce\n\n\treturn block\n}", "func NewStore() *Store {\n\tvar st Store\n\tst.Records = make(map[string]HostSet)\n\tst.Netviews = make(map[string]string)\n\tst.Cidrs = make(map[string]string)\n\treturn &st\n}", "func NewBlock(data string, prevBlockHash []byte) *Block {\n\tblock := &Block{\n\t\tTimestamp: time.Now().UTC().Unix(),\n\t\tPrevBlockHash: prevBlockHash,\n\t\tHash: []byte{},\n\t\tData: []byte(data),\n\t}\n\n\tpow := NewProofOfWork(block)\n\tnonce, hash := pow.Run()\n\n\tblock.Hash = hash[:]\n\tblock.Nonce = nonce\n\n\treturn block\n}", "func NewStore(\n\ttracer trace.Tracer,\n\tlogger log.Logger,\n\tmetadata MetadataManager,\n\tbucket objstore.Bucket,\n\tdebuginfodClients DebuginfodClients,\n\tsignedUpload SignedUpload,\n\tmaxUploadDuration time.Duration,\n\tmaxUploadSize int64,\n) (*Store, error) {\n\treturn &Store{\n\t\ttracer: tracer,\n\t\tlogger: log.With(logger, \"component\", \"debuginfo\"),\n\t\tbucket: bucket,\n\t\tmetadata: metadata,\n\t\tdebuginfodClients: debuginfodClients,\n\t\tsignedUpload: signedUpload,\n\t\tmaxUploadDuration: maxUploadDuration,\n\t\tmaxUploadSize: maxUploadSize,\n\t\ttimeNow: time.Now,\n\t}, nil\n}", "func NewStore(dbName string) *Store {\n\tdb, err := bolt.Open(dbName, 0600, nil)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\treturn &Store{db: db}\n}", "func NewStore(keyPairs ...[]byte) Store {\n\treturn &store{bzhysessions.NewCookieStore(keyPairs...)}\n}", "func startNewConsensusStateAndWaitForBlock(t *testing.T, lastBlockHeight int64, blockDB dbm.DB, stateDB dbm.DB) {\n\tlogger := log.TestingLogger()\n\tstate, _ := sm.LoadStateFromDBOrGenesisFile(stateDB, consensusReplayConfig.GenesisFile())\n\tprivValidator := loadPrivValidator(consensusReplayConfig)\n\tcs := newConsensusStateWithConfigAndBlockStore(consensusReplayConfig, state, privValidator, kvstore.NewKVStoreApplication(), blockDB)\n\tcs.SetLogger(logger)\n\n\tbytes, _ := ioutil.ReadFile(cs.config.WalFile())\n\t// fmt.Printf(\"====== WAL: \\n\\r%s\\n\", bytes)\n\tt.Logf(\"====== WAL: \\n\\r%X\\n\", bytes)\n\n\terr := cs.Start()\n\trequire.NoError(t, err)\n\tdefer cs.Stop()\n\n\t// This is just a signal that we haven't halted; its not something contained\n\t// in the WAL itself. Assuming the consensus state is running, replay of any\n\t// WAL, including the empty one, should eventually be followed by a new\n\t// block, or else something is wrong.\n\tnewBlockCh := make(chan interface{}, 1)\n\terr = cs.eventBus.Subscribe(context.Background(), testSubscriber, types.EventQueryNewBlock, newBlockCh)\n\trequire.NoError(t, err)\n\tselect {\n\tcase <-newBlockCh:\n\tcase <-time.After(60 * time.Second):\n\t\tt.Fatalf(\"Timed out waiting for new block (see trace above)\")\n\t}\n}", "func NewStore(dir string, hints map[string]string) *Store {\n\tif hints == nil {\n\t\thints = map[string]string{}\n\t}\n\treturn &Store{\n\t\tData: &sync.Map{},\n\t\tDir: dir,\n\t\tHints: hints,\n\t\tListFiles: listFiles,\n\t\tParseFile: parseFile,\n\t\tFileExists: fileExists,\n\t}\n}", "func CreateNewBlock(txs []*model.Transaction, prevHash string, reward float64, height int64, pk []byte, l *model.Ledger, difficulty int, ctl chan commands.Command) (*model.Block, commands.Command, []*model.Transaction, error) {\n\torigL := GetLedgerDeepCopy(l)\n\n\terrTxs, err := HandleTransactions(txs, l)\n\tif err != nil {\n\t\treturn nil, commands.NewDefaultCommand(), errTxs, err\n\t}\n\n\t// All transactions are valid if reached here, calculate transaction fee on the original ledger.\n\tfee, err := CalcTxFee(txs, origL)\n\tif err != nil {\n\t\tlog.Fatalln(\"there should never be a case where handle transaction success but fail calcFee\")\n\t}\n\n\tblock := model.Block{\n\t\tPrevHash: prevHash,\n\t\tTxs: txs,\n\t\tCoinbase: CreateCoinbaseTx(reward+fee, pk, height),\n\t}\n\n\tc, err := Mine(&block, difficulty, ctl)\n\treturn &block, c, []*model.Transaction{}, err\n}", "func newStore(ts service.Service, config *Config) (*Store, error) {\n\tif config.Datastore == nil {\n\t\tdatastore, err := newDefaultDatastore(config.RepoPath, config.LowMem)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tconfig.Datastore = datastore\n\t}\n\tif config.EventCodec == nil {\n\t\tconfig.EventCodec = newDefaultEventCodec(config.JsonMode)\n\t}\n\tif !managedDatastore(config.Datastore) {\n\t\tif config.Debug {\n\t\t\tif err := util.SetLogLevels(map[string]logging.LogLevel{\"store\": logging.LevelDebug}); err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t}\n\t}\n\n\tctx, cancel := context.WithCancel(context.Background())\n\ts := &Store{\n\t\tctx: ctx,\n\t\tcancel: cancel,\n\t\tdatastore: config.Datastore,\n\t\tdispatcher: newDispatcher(config.Datastore),\n\t\teventcodec: config.EventCodec,\n\t\tmodelNames: make(map[string]*Model),\n\t\tjsonMode: config.JsonMode,\n\t\tlocalEventsBus: &localEventsBus{bus: broadcast.NewBroadcaster(0)},\n\t\tstateChangedNotifee: &stateChangedNotifee{},\n\t\tservice: ts,\n\t}\n\n\tif s.jsonMode {\n\t\tif err := s.reregisterSchemas(); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\ts.dispatcher.Register(s)\n\treturn s, nil\n}", "func stateAndStore(config *cfg.Config, pubKey crypto.PubKey, appVersion version.Protocol) (dbm.DB, sm.State, *mockBlockStore) {\n\tstateDB := dbm.NewMemDB()\n\tstate, _ := sm.MakeGenesisStateFromFile(config.GenesisFile())\n\tstate.Version.Consensus.App = appVersion\n\tstore := NewMockBlockStore(config, state.ConsensusParams)\n\treturn stateDB, state, store\n}", "func NewBlock(filename string, blknum uint64) *Block {\n\treturn &Block{\n\t\tfilename: filename,\n\t\tblknum: blknum,\n\t}\n}", "func NewStore(name string, secretConfig map[string]interface{}) (SecretStore, error) {\n\tstoresLock.RLock()\n\tdefer storesLock.RUnlock()\n\n\tif init, exists := secretStores[name]; exists {\n\t\treturn init(secretConfig)\n\t}\n\treturn nil, ErrNotSupported\n}", "func generateBlock(oldBlock Block, Key int) Block {\n\n\tvar newBlock Block\n\n\tt := time.Now()\n\n\tnewBlock.Index = oldBlock.Index + 1\n\tnewBlock.Timestamp = t.String()\n\tnewBlock.Key = Key\n\tnewBlock.PrevHash = oldBlock.Hash\n\tnewBlock.Hash = calculateHash(newBlock)\n\n\tf, err := os.OpenFile(\"blocks.txt\",\n\t\tos.O_APPEND|os.O_CREATE|os.O_WRONLY, 0644)\n\tif err != nil {\n\t\tlog.Println(err)\n\t}\n\tb, err := json.Marshal(newBlock)\n\tif err != nil {\n\t\tfmt.Println(err)\n\t}\n\tdefer f.Close()\n\tif _, err := f.WriteString(string(b)); err != nil {\n\t\tlog.Println(err)\n\t}\n\n\treturn newBlock\n}", "func (genesis *Genesis) store(bcStore store.BlockchainStore, accountStateDB database.Database) error {\n\tstatedb := getStateDB(genesis.info)\n\n\tbatch := accountStateDB.NewBatch()\n\tif _, err := statedb.Commit(batch); err != nil {\n\t\treturn errors.NewStackedError(err, \"failed to commit batch into statedb\")\n\t}\n\n\tif err := batch.Commit(); err != nil {\n\t\treturn errors.NewStackedError(err, \"failed to commit batch into database\")\n\t}\n\n\t if err := bcStore.PutBlockHeader(genesis.header.Hash(), genesis.header, genesis.header.Difficulty, true); err != nil {\n \t\treturn errors.NewStackedError(err, \"failed to put genesis block header into store\")\n\t}\n\n\treturn nil\n\n\t//fmt.Println(\"============================================\")\n\t//fmt.Println(\"genesis.header.hash:\", genesis.header.Hash())\n\t//fmt.Println(\"genesis.header:\", genesis.header)\n\t//fmt.Println(\"genesis.header.Difficulty:\", genesis.header.Difficulty)\n\t//fmt.Println(\"=============================================\")\n\t//\n\t//err:=bcStore.PutBlockHeader(genesis.header.Hash(),genesis.header,genesis.header.Difficulty,true)\n //if err!=nil{\n //\tfmt.Println(\"PutBlockHead is err:\",err)\n\t//\treturn err\n\t//}\n\t//return nil\n}", "func genRandomBlockHash() *chainhash.Hash {\n\tvar seed [32]byte\n\trand.Read(seed[:])\n\thash := chainhash.Hash(seed)\n\treturn &hash\n}", "func createTestStore(t *testing.T) (*Store, *hlc.ManualClock, *mockEngine) {\n\tmanual := hlc.ManualClock(0)\n\tclock := hlc.NewHLClock(manual.UnixNano)\n\tengine := newMockEngine()\n\tstore := NewStore(clock, engine, nil)\n\treplica := Replica{RangeID: 1}\n\t_, err := store.CreateRange(Key(\"a\"), Key(\"z\"), []Replica{replica})\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\treturn store, &manual, engine\n}", "func New(cfg *Config) secrets.Store {\n\tconfig := Config{}\n\tif cfg != nil {\n\t\tconfig = *cfg\n\t}\n\tif strings.Contains(config.Prefix, \":\") {\n\t\tpanic(\"forbidden character ':' in Prefix\")\n\t}\n\tif config.SecretLen == 0 {\n\t\tconfig.SecretLen = 32\n\t}\n\tif config.Entropy == nil {\n\t\tconfig.Entropy = rand.Reader\n\t}\n\treturn &storeImpl{config}\n}", "func NewBlock(b *block.Block, chain blockchainer.Blockchainer) Block {\n\tres := Block{\n\t\tBlock: *b,\n\t\tBlockMetadata: BlockMetadata{\n\t\t\tSize: io.GetVarSize(b),\n\t\t\tConfirmations: chain.BlockHeight() - b.Index + 1,\n\t\t},\n\t}\n\n\thash := chain.GetHeaderHash(int(b.Index) + 1)\n\tif !hash.Equals(util.Uint256{}) {\n\t\tres.NextBlockHash = &hash\n\t}\n\n\treturn res\n}", "func (s *BlockStore) Write(b *block.Block) error {\n\terr := os.MkdirAll(s.RootDir, 0666)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tfilename := s.filename(b.Hash)\n\tf, err := os.Create(filename)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer f.Close()\n\n\tdata, err := json.Marshal(b)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t_, err = f.Write(data)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}" ]
[ "0.6099992", "0.56509185", "0.55223393", "0.5484488", "0.5479262", "0.5428878", "0.5401267", "0.5392514", "0.5379299", "0.53530794", "0.5318673", "0.52941626", "0.52702796", "0.52518064", "0.52428454", "0.5215994", "0.52090174", "0.5206185", "0.51709896", "0.5166824", "0.5156433", "0.51524335", "0.5149808", "0.5119955", "0.51083595", "0.51060617", "0.504892", "0.5038167", "0.5021815", "0.5014339", "0.5001475", "0.49875957", "0.49671417", "0.49557975", "0.495537", "0.4953221", "0.49522653", "0.49442428", "0.4940095", "0.49307963", "0.49230802", "0.49199295", "0.49122754", "0.49054936", "0.49033147", "0.48956323", "0.48907325", "0.48858124", "0.4885575", "0.48660225", "0.4862966", "0.4861874", "0.4847885", "0.4846664", "0.48414835", "0.48411328", "0.4837773", "0.48343495", "0.4825931", "0.48207498", "0.4820586", "0.48144248", "0.48106334", "0.48094925", "0.48086745", "0.48080412", "0.48080155", "0.48016244", "0.47983107", "0.4797277", "0.47939122", "0.47771338", "0.47765476", "0.47750565", "0.47746998", "0.47719365", "0.4767619", "0.4763492", "0.47627118", "0.47627118", "0.47611725", "0.47583166", "0.4755457", "0.47373348", "0.47362924", "0.47358954", "0.4735095", "0.4734413", "0.47322667", "0.4731394", "0.47288376", "0.47245678", "0.4719523", "0.47194278", "0.47187942", "0.47155374", "0.4709524", "0.4708626", "0.47066084", "0.4704293" ]
0.7850846
0
NewBlockHeaderFeederSpec creates a new BlockHeaderFeederSpec for the given parameters.
func NewBlockHeaderFeederSpec(spec *job.BlockHeaderFeederSpec) *BlockHeaderFeederSpec { return &BlockHeaderFeederSpec{ CoordinatorV1Address: spec.CoordinatorV1Address, CoordinatorV2Address: spec.CoordinatorV2Address, CoordinatorV2PlusAddress: spec.CoordinatorV2PlusAddress, WaitBlocks: spec.WaitBlocks, LookbackBlocks: spec.LookbackBlocks, BlockhashStoreAddress: spec.BlockhashStoreAddress, BatchBlockhashStoreAddress: spec.BatchBlockhashStoreAddress, PollPeriod: spec.PollPeriod, RunTimeout: spec.RunTimeout, EVMChainID: spec.EVMChainID, FromAddresses: spec.FromAddresses, GetBlockhashesBatchSize: spec.GetBlockhashesBatchSize, StoreBlockhashesBatchSize: spec.StoreBlockhashesBatchSize, } }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (_Rootchain *RootchainFilterer) FilterNewHeaderBlock(opts *bind.FilterOpts, proposer []common.Address, headerBlockId []*big.Int, reward []*big.Int) (*RootchainNewHeaderBlockIterator, error) {\n\n\tvar proposerRule []interface{}\n\tfor _, proposerItem := range proposer {\n\t\tproposerRule = append(proposerRule, proposerItem)\n\t}\n\tvar headerBlockIdRule []interface{}\n\tfor _, headerBlockIdItem := range headerBlockId {\n\t\theaderBlockIdRule = append(headerBlockIdRule, headerBlockIdItem)\n\t}\n\tvar rewardRule []interface{}\n\tfor _, rewardItem := range reward {\n\t\trewardRule = append(rewardRule, rewardItem)\n\t}\n\n\tlogs, sub, err := _Rootchain.contract.FilterLogs(opts, \"NewHeaderBlock\", proposerRule, headerBlockIdRule, rewardRule)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &RootchainNewHeaderBlockIterator{contract: _Rootchain.contract, event: \"NewHeaderBlock\", logs: logs, sub: sub}, nil\n}", "func NewBlockHeader(version uint32, prevBlockHash, merkleRootHash *chain.Hash, bits uint32) {\n return &BlockHeader{\n Version: version,\n\t PrevBlock: *prevBlockHash,\n\t MerkleRoot: *merkleRootHash,\n\t Timestamp: time.Unix(time.Now().Unix(), 0),\n\t Bits: bits,\n }\n}", "func TestBlockHeader(t *testing.T) {\n\tnonce, err := random.Uint64()\n\tif err != nil {\n\t\tt.Errorf(\"random.Uint64: Error generating nonce: %v\", err)\n\t}\n\n\thashes := []*daghash.Hash{mainnetGenesisHash, simnetGenesisHash}\n\n\tmerkleHash := mainnetGenesisMerkleRoot\n\tacceptedIDMerkleRoot := exampleAcceptedIDMerkleRoot\n\tbits := uint32(0x1d00ffff)\n\tbh := NewBlockHeader(1, hashes, merkleHash, acceptedIDMerkleRoot, exampleUTXOCommitment, bits, nonce)\n\n\t// Ensure we get the same data back out.\n\tif !reflect.DeepEqual(bh.ParentHashes, hashes) {\n\t\tt.Errorf(\"NewBlockHeader: wrong prev hashes - got %v, want %v\",\n\t\t\tspew.Sprint(bh.ParentHashes), spew.Sprint(hashes))\n\t}\n\tif !bh.HashMerkleRoot.IsEqual(merkleHash) {\n\t\tt.Errorf(\"NewBlockHeader: wrong merkle root - got %v, want %v\",\n\t\t\tspew.Sprint(bh.HashMerkleRoot), spew.Sprint(merkleHash))\n\t}\n\tif bh.Bits != bits {\n\t\tt.Errorf(\"NewBlockHeader: wrong bits - got %v, want %v\",\n\t\t\tbh.Bits, bits)\n\t}\n\tif bh.Nonce != nonce {\n\t\tt.Errorf(\"NewBlockHeader: wrong nonce - got %v, want %v\",\n\t\t\tbh.Nonce, nonce)\n\t}\n}", "func NewBlockHeader(version int32, prevHash *common.Hash) *BlockHeader {\n\n\t// Limit the timestamp to one second precision since the protocol\n\t// doesn't support better.\n\treturn &BlockHeader{\n\t\tVersion: version,\n\t\tPrevBlock: *prevHash,\n\t\tTimestamp: time.Now().Unix(),\n\t}\n}", "func NewBlockHeader(prev BlockHeader, uxHash cipher.SHA256, currentTime, fee uint64, body BlockBody) BlockHeader {\n\tif currentTime <= prev.Time {\n\t\tlog.Panic(\"Time can only move forward\")\n\t}\n\tbodyHash := body.Hash()\n\tprevHash := prev.Hash()\n\treturn BlockHeader{\n\t\tBodyHash: bodyHash,\n\t\tVersion: prev.Version,\n\t\tPrevHash: prevHash,\n\t\tTime: currentTime,\n\t\tBkSeq: prev.BkSeq + 1,\n\t\tFee: fee,\n\t\tUxHash: uxHash,\n\t}\n}", "func NewBlockHeader(prev BlockHeader, uxHash cipher.SHA256, currentTime, fee uint64, body BlockBody) BlockHeader {\n\tif currentTime <= prev.Time {\n\t\tlogger.Panic(\"Time can only move forward\")\n\t}\n\tprevHash := prev.Hash()\n\treturn BlockHeader{\n\t\tBodyHash: body.Hash(),\n\t\tVersion: prev.Version,\n\t\tPrevHash: prevHash,\n\t\tTime: currentTime,\n\t\tBkSeq: prev.BkSeq + 1,\n\t\tFee: fee,\n\t\tUxHash: uxHash,\n\t}\n}", "func TestFakeBlockHeaderFetcher(t *testing.T) {\n\tlogger := log.GetLogger().WithOutput(log.NewFormattingOutput(os.Stdout, log.NewHumanReadableFormatter()))\n\tffc := NewFakeBlockAndTimestampGetter(logger)\n\n\trequire.EqualValues(t, FAKE_CLIENT_LAST_TIMESTAMP_EXPECTED, ffc.data[FAKE_CLIENT_NUMBER_OF_BLOCKS-1], \"expected ffc last block to be of specific ts\")\n}", "func generateNewBlock(oldBlock Block, dataPayload string) (Block, error) {\n\n\tvar newBlock Block\n\ttimeNow := time.Now()\n\n\tnewBlock.Index = oldBlock.Index + 1\n\tnewBlock.Timestamp = timeNow.String()\n\n\tnewEvent, err := dataPayloadtoServiceEvent(dataPayload)\n\n\tif err != nil {\n\t\tlog.Println(\"ERROR: Unable to convert data payload into ServiceEvent for new block generation.\")\n\t}\n\n\tnewBlock.Event = newEvent\n\tnewBlock.PrevHash = oldBlock.Hash\n\tnewBlock.Hash = calculateHash(newBlock)\n\n\treturn newBlock, nil\n}", "func (_Rootchain *RootchainFilterer) WatchNewHeaderBlock(opts *bind.WatchOpts, sink chan<- *RootchainNewHeaderBlock, proposer []common.Address, headerBlockId []*big.Int, reward []*big.Int) (event.Subscription, error) {\n\n\tvar proposerRule []interface{}\n\tfor _, proposerItem := range proposer {\n\t\tproposerRule = append(proposerRule, proposerItem)\n\t}\n\tvar headerBlockIdRule []interface{}\n\tfor _, headerBlockIdItem := range headerBlockId {\n\t\theaderBlockIdRule = append(headerBlockIdRule, headerBlockIdItem)\n\t}\n\tvar rewardRule []interface{}\n\tfor _, rewardItem := range reward {\n\t\trewardRule = append(rewardRule, rewardItem)\n\t}\n\n\tlogs, sub, err := _Rootchain.contract.WatchLogs(opts, \"NewHeaderBlock\", proposerRule, headerBlockIdRule, rewardRule)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn event.NewSubscription(func(quit <-chan struct{}) error {\n\t\tdefer sub.Unsubscribe()\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase log := <-logs:\n\t\t\t\t// New log arrived, parse the event and forward to the user\n\t\t\t\tevent := new(RootchainNewHeaderBlock)\n\t\t\t\tif err := _Rootchain.contract.UnpackLog(event, \"NewHeaderBlock\", log); err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\tevent.Raw = log\n\n\t\t\t\tselect {\n\t\t\t\tcase sink <- event:\n\t\t\t\tcase err := <-sub.Err():\n\t\t\t\t\treturn err\n\t\t\t\tcase <-quit:\n\t\t\t\t\treturn nil\n\t\t\t\t}\n\t\t\tcase err := <-sub.Err():\n\t\t\t\treturn err\n\t\t\tcase <-quit:\n\t\t\t\treturn nil\n\t\t\t}\n\t\t}\n\t}), nil\n}", "func NewLegacyBlockHeader(version int32, prevHash, merkleRootHash *chainhash.Hash,\n\tbits uint32, nonce uint32) *BlockHeader {\n\tnounce256 := Uint256FromUint32(nonce)\n\treturn NewBlockHeader(version, prevHash, merkleRootHash, 0, bits, &nounce256, []byte{})\n}", "func (l *Ledger) FormatFakeBlock(txList []*pb.Transaction,\n\tproposer []byte, ecdsaPk *ecdsa.PrivateKey, /*矿工的公钥私钥*/\n\ttimestamp int64, curTerm int64, curBlockNum int64,\n\tpreHash []byte, utxoTotal *big.Int, blockHeight int64) (*pb.InternalBlock, error) {\n\treturn l.formatBlock(txList, proposer, ecdsaPk, timestamp, curTerm, curBlockNum, preHash, 0, utxoTotal, false, nil, nil, blockHeight)\n}", "func NewBlockHeader(prevHash *ShaHash, merkleRootHash *ShaHash) *BlockHeader {\n\n\treturn &BlockHeader{\n\t\tPrevBlock: *prevHash,\n\t\tMerkleRoot: *merkleRootHash,\n\t}\n}", "func NewPbftBlockHeader(n, gasLimit, gasUsed int64, parentHash, txnsHash []byte) *pb.PbftBlockHeader {\n\theader := &pb.PbftBlockHeader{\n\t\tNumber: n,\n\t\tGasLimit: gasLimit,\n\t\tGasUsed: gasUsed,\n\t\tTimestamp: time.Now().Unix(),\n\t\tParentHash: parentHash,\n\t\tTxnsHash: txnsHash,\n\t}\n\n\treturn header\n}", "func (*GetBlockByHeightRequest) Descriptor() ([]byte, []int) {\n\treturn file_service_proto_rawDescGZIP(), []int{13}\n}", "func NewBlockHeader(version int32, prevHash, merkleRootHash *chainhash.Hash,\n\theight uint32, bits uint32, nonce *[32]byte, solution []byte) *BlockHeader {\n\n\t// Limit the timestamp to one second precision since the protocol\n\t// doesn't support better.\n\tsolutionCopy := make([]byte, len(solution))\n\tcopy(solutionCopy, solution)\n\treturn &BlockHeader{\n\t\tVersion: version,\n\t\tPrevBlock: *prevHash,\n\t\tMerkleRoot: *merkleRootHash,\n\t\tTimestamp: time.Unix(time.Now().Unix(), 0),\n\t\tHeight: height,\n\t\tReserved: [7]uint32{},\n\t\tBits: bits,\n\t\tNonce: *nonce,\n\t\tSolution: solutionCopy,\n\t}\n}", "func (f *Fetcher) StartNewBlockSubscriber(BlockArrivalChannel chan<- *atypes.DistilledBlock) {\n\tgo func() {\n\t\t// ch := pubsub.Channel()\n\t\tlog.Print(\"wait event\")\n\t\tpubsub := f.client.PSubscribe(\"newblock*\")\n\t\tdefer pubsub.Close()\n\t\tch := pubsub.ChannelSize(100)\n\t\tfor {\n\t\t\t// msg, _ := pubsub.ReceiveMessage()\n\t\t\tmsg := <-ch\n\t\t\t// get a block number\n\t\t\tnumber, _ := strconv.ParseInt(msg.Payload, 10, 64)\n\n\t\t\t// get a block\n\t\t\tfetchedBlock := f.FetchBlock(number)\n\n\t\t\tBlockArrivalChannel <- fetchedBlock\n\t\t\t// log.Printf(\"received pattern[%s], payload[%s]\", number, msg.Pattern, msg.Payload)\n\t\t\tlog.Printf(\"received newblock[%+v], pattern[%s], payload[%s]\", fetchedBlock, msg.Pattern, msg.Payload)\n\t\t}\n\t}()\n}", "func (*GetBlockByHashRequest) Descriptor() ([]byte, []int) {\n\treturn file_service_proto_rawDescGZIP(), []int{12}\n}", "func NewBlockHeader(version uint16, parentHashes []*externalapi.DomainHash, hashMerkleRoot *externalapi.DomainHash,\n\tacceptedIDMerkleRoot *externalapi.DomainHash, utxoCommitment *externalapi.DomainHash, bits uint32, nonce uint64) *MsgBlockHeader {\n\n\t// Limit the timestamp to one millisecond precision since the protocol\n\t// doesn't support better.\n\treturn &MsgBlockHeader{\n\t\tVersion: version,\n\t\tParentHashes: parentHashes,\n\t\tHashMerkleRoot: hashMerkleRoot,\n\t\tAcceptedIDMerkleRoot: acceptedIDMerkleRoot,\n\t\tUTXOCommitment: utxoCommitment,\n\t\tTimestamp: mstime.Now(),\n\t\tBits: bits,\n\t\tNonce: nonce,\n\t}\n}", "func NewBlock(header *Header, txs []*Transaction, receipts []*Receipt, signs []*PbftSign) *Block {\n\tb := &Block{header: CopyHeader(header)}\n\n\t// TODO: panic if len(txs) != len(receipts)\n\tif len(txs) == 0 {\n\t\tb.header.TxHash = EmptyRootHash\n\t} else {\n\t\tb.header.TxHash = DeriveSha(Transactions(txs))\n\t\tb.transactions = make(Transactions, len(txs))\n\t\tcopy(b.transactions, txs)\n\t}\n\n\tif len(receipts) == 0 {\n\t\tb.header.ReceiptHash = EmptyRootHash\n\t} else {\n\t\tb.header.ReceiptHash = DeriveSha(Receipts(receipts))\n\t\tb.header.Bloom = CreateBloom(receipts)\n\t}\n\n\tif len(receipts) == 0 {\n\t\tb.header.ReceiptHash = EmptyRootHash\n\t} else {\n\t\tb.header.ReceiptHash = DeriveSha(Receipts(receipts))\n\t\tb.header.Bloom = CreateBloom(receipts)\n\t}\n\n\tif len(signs) != 0 {\n\t\tb.signs = make(PbftSigns, len(signs))\n\t\tcopy(b.signs, signs)\n\t}\n\n\treturn b\n}", "func generateGenesisBlock() Block {\n\n\tvar genesisBlock Block\n\tvar genesisRecord ServiceEvent\n\tvar genesisRecordEventDescription EventDescription\n\tvar genesisRecordEventDescriptionType EventType\n\tvar genesisRecordVehicle Vehicle\n\tvar genesisRecordGarage Garage\n\n\t// Seed values for Garage, Vehicle, EventType and EventDescription\n\tgenesisRecordGarage.GarageId = 0\n\tgenesisRecordGarage.Location = \"genesis location\"\n\tgenesisRecordGarage.Name = \"genesis inc.\"\n\tgenesisRecordGarage.Owner = \"genesis and co.\"\n\tgenesisRecordGarage.Type = \"main dealer\"\n\n\tgenesisRecordVehicle.V5c = \"63ne515\"\n\tgenesisRecordVehicle.VehicleColour = append(genesisRecordVehicle.VehicleColour, \"starting colour\")\n\tgenesisRecordVehicle.VehicleMake = \"genesis make\"\n\tgenesisRecordVehicle.VehicleModel = \"genesis model\"\n\tgenesisRecordVehicle.VehicleRegistration = append(genesisRecordVehicle.VehicleRegistration, \"GEN 351 S\")\n\n\tgenesisRecordEventDescriptionType.EventId = 0\n\tgenesisRecordEventDescriptionType.EventDescription = \"genesis event\"\n\n\tgenesisRecordEventDescription.EventItem = append(genesisRecordEventDescription.EventItem, genesisRecordEventDescriptionType)\n\tgenesisRecordEventDescription.VehicleMilage = 10000000\n\n\t// Pull all the objects into ServiceEvent\n\tgenesisRecord.EventAuthorisor = \"Created by serviceChain as the Genesis Block\"\n\tgenesisRecord.EventDetails = genesisRecordEventDescription\n\tgenesisRecord.Identifier = 1\n\tgenesisRecord.PerformedBy = genesisRecordGarage\n\tgenesisRecord.PerformedOnVehicle = genesisRecordVehicle\n\n\t// Set the values for the Block\n\tgenesisBlock.Index = 1\n\tgenesisBlock.Hash = \"0\"\n\tgenesisBlock.PrevHash = \"0\"\n\tgenesisBlock.Timestamp = time.Now().String()\n\tgenesisBlock.Event = genesisRecord\n\n\tblockString, err := json.MarshalIndent(genesisBlock, \"\", \"\\t\")\n\tif err != nil {\n\t\tlog.Println(\"INFO: serviceChain.createGenesisBlock(): Problem creating the JSON output of the genesis block. Continuing...\")\n\t}\n\n\tlog.Println(\"INFO: serviceChain.generateGenesisBlock(): Created block with contents: \" + string(blockString))\n\n\treturn genesisBlock\n}", "func (*BlockHeader) Descriptor() ([]byte, []int) {\n\treturn file_github_com_yahuizhan_dappley_metrics_go_api_core_block_pb_block_proto_rawDescGZIP(), []int{1}\n}", "func PrepareHeartBeatDataWithBlock(sbc *SyncBlockChain, selfId int32, peerMapJsonString map[string]int32, books map[string]int32, addr string, mpt p1.MerklePatriciaTrie, nonce string) HeartBeatData {\n\n\tblock := sbc.GenBlock(mpt)\n\tblock.Header.Nonce = nonce\n\tblockJson, err := block.EncodeToJson()\n\tif err != nil {\n\t\tfmt.Println(\"Error in PrepareHeartBeatData\")\n\t\tpanic(err)\n\t}\n\tfmt.Println(\"We created a block!\")\n\treturn HeartBeatData{true, selfId, blockJson, peerMapJsonString, books,addr, 3}\n\n}", "func (w *Writer) newBlockWriter(typ byte) *blockWriter {\n\tblock := w.block\n\n\tvar blockStart uint32\n\tif w.next == 0 {\n\t\thb := w.headerBytes()\n\t\tblockStart = uint32(copy(block, hb))\n\t}\n\n\tbw := newBlockWriter(typ, block, blockStart)\n\tbw.restartInterval = w.opts.RestartInterval\n\treturn bw\n}", "func (b *Block) CreateGenesisBlock() {\n\n header := Header{0, int64(time.Now().Unix()), \"GenesisBlock\", \"\", 0, \"\"}\n b.Mpt = p1.GetMPTrie()\n b.Header = header\n}", "func (*GenesisResponse_ConsensusParams_Block) Descriptor() ([]byte, []int) {\n\treturn file_resources_proto_rawDescGZIP(), []int{28, 0, 0}\n}", "func TestBlockHeaderSerialize(t *testing.T) {\n\tnonce := uint64(123123) // 0x01e0f3\n\n\t// baseBlockHdr is used in the various tests as a baseline BlockHeader.\n\tbits := uint32(0x1d00ffff)\n\tbaseBlockHdr := &BlockHeader{\n\t\tVersion: 1,\n\t\tParentHashes: []*daghash.Hash{mainnetGenesisHash, simnetGenesisHash},\n\t\tHashMerkleRoot: mainnetGenesisMerkleRoot,\n\t\tAcceptedIDMerkleRoot: exampleAcceptedIDMerkleRoot,\n\t\tUTXOCommitment: exampleUTXOCommitment,\n\t\tTimestamp: mstime.UnixMilliseconds(0x17315ed0f99),\n\t\tBits: bits,\n\t\tNonce: nonce,\n\t}\n\n\t// baseBlockHdrEncoded is the domainmessage encoded bytes of baseBlockHdr.\n\tbaseBlockHdrEncoded := []byte{\n\t\t0x01, 0x00, 0x00, 0x00, // Version 1\n\t\t0x02, // NumParentBlocks\n\t\t0xdc, 0x5f, 0x5b, 0x5b, 0x1d, 0xc2, 0xa7, 0x25, // mainnetGenesisHash\n\t\t0x49, 0xd5, 0x1d, 0x4d, 0xee, 0xd7, 0xa4, 0x8b,\n\t\t0xaf, 0xd3, 0x14, 0x4b, 0x56, 0x78, 0x98, 0xb1,\n\t\t0x8c, 0xfd, 0x9f, 0x69, 0xdd, 0xcf, 0xbb, 0x63,\n\t\t0xf6, 0x7a, 0xd7, 0x69, 0x5d, 0x9b, 0x66, 0x2a, // simnetGenesisHash\n\t\t0x72, 0xff, 0x3d, 0x8e, 0xdb, 0xbb, 0x2d, 0xe0,\n\t\t0xbf, 0xa6, 0x7b, 0x13, 0x97, 0x4b, 0xb9, 0x91,\n\t\t0x0d, 0x11, 0x6d, 0x5c, 0xbd, 0x86, 0x3e, 0x68,\n\t\t0x4a, 0x5e, 0x1e, 0x4b, 0xaa, 0xb8, 0x9f, 0x3a, // HashMerkleRoot\n\t\t0x32, 0x51, 0x8a, 0x88, 0xc3, 0x1b, 0xc8, 0x7f,\n\t\t0x61, 0x8f, 0x76, 0x67, 0x3e, 0x2c, 0xc7, 0x7a,\n\t\t0xb2, 0x12, 0x7b, 0x7a, 0xfd, 0xed, 0xa3, 0x3b,\n\t\t0x09, 0x3B, 0xC7, 0xE3, 0x67, 0x11, 0x7B, 0x3C, // AcceptedIDMerkleRoot\n\t\t0x30, 0xC1, 0xF8, 0xFD, 0xD0, 0xD9, 0x72, 0x87,\n\t\t0x7F, 0x16, 0xC5, 0x96, 0x2E, 0x8B, 0xD9, 0x63,\n\t\t0x65, 0x9C, 0x79, 0x3C, 0xE3, 0x70, 0xD9, 0x5F,\n\t\t0x10, 0x3B, 0xC7, 0xE3, 0x67, 0x11, 0x7B, 0x3C, // UTXOCommitment\n\t\t0x30, 0xC1, 0xF8, 0xFD, 0xD0, 0xD9, 0x72, 0x87,\n\t\t0x7F, 0x16, 0xC5, 0x96, 0x2E, 0x8B, 0xD9, 0x63,\n\t\t0x65, 0x9C, 0x79, 0x3C, 0xE3, 0x70, 0xD9, 0x5F,\n\t\t0x99, 0x0f, 0xed, 0x15, 0x73, 0x01, 0x00, 0x00, // Timestamp\n\t\t0xff, 0xff, 0x00, 0x1d, // Bits\n\t\t0xf3, 0xe0, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, // Fake Nonce. TODO: (Ori) Replace to a real nonce\n\t}\n\n\ttests := []struct {\n\t\tin *BlockHeader // Data to encode\n\t\tout *BlockHeader // Expected decoded data\n\t\tbuf []byte // Serialized data\n\t}{\n\t\t{\n\t\t\tbaseBlockHdr,\n\t\t\tbaseBlockHdr,\n\t\t\tbaseBlockHdrEncoded,\n\t\t},\n\t}\n\n\tt.Logf(\"Running %d tests\", len(tests))\n\tfor i, test := range tests {\n\t\t// Serialize the block header.\n\t\tvar buf bytes.Buffer\n\t\terr := test.in.Serialize(&buf)\n\t\tif err != nil {\n\t\t\tt.Errorf(\"Serialize #%d error %v\", i, err)\n\t\t\tcontinue\n\t\t}\n\t\tif !bytes.Equal(buf.Bytes(), test.buf) {\n\t\t\tt.Errorf(\"Serialize #%d\\n got: %s want: %s\", i,\n\t\t\t\tspew.Sdump(buf.Bytes()), spew.Sdump(test.buf))\n\t\t\tcontinue\n\t\t}\n\n\t\t// Deserialize the block header.\n\t\tvar bh BlockHeader\n\t\trbuf := bytes.NewReader(test.buf)\n\t\terr = bh.Deserialize(rbuf)\n\t\tif err != nil {\n\t\t\tt.Errorf(\"Deserialize #%d error %v\", i, err)\n\t\t\tcontinue\n\t\t}\n\t\tif !reflect.DeepEqual(&bh, test.out) {\n\t\t\tt.Errorf(\"Deserialize #%d\\n got: %s want: %s\", i,\n\t\t\t\tspew.Sdump(&bh), spew.Sdump(test.out))\n\t\t\tcontinue\n\t\t}\n\t}\n}", "func makeGenesisBlock() Block {\n\ttext := \"Welcome to this Go-Blockchain!\"\n\tblock := Block{}\n\n\t//Find suitable hash\n\tblock.HashPoW, block.textNoncePoW = \"0\", \"0\"\n\n\t//Define Header elements\n\tblock.Index = 0\n\tblock.Timestamp = time.Now()\n\tblock.PrevHashHeader = \"0\"\n\n\t//make hash of Header elements\n\tblock.blockHash = makeBlockHash(block)\n\n\t//Define Data\n\tblock.payload = text\n\treturn block\n}", "func CreateNewBlock(txs []*model.Transaction, prevHash string, reward float64, height int64, pk []byte, l *model.Ledger, difficulty int, ctl chan commands.Command) (*model.Block, commands.Command, []*model.Transaction, error) {\n\torigL := GetLedgerDeepCopy(l)\n\n\terrTxs, err := HandleTransactions(txs, l)\n\tif err != nil {\n\t\treturn nil, commands.NewDefaultCommand(), errTxs, err\n\t}\n\n\t// All transactions are valid if reached here, calculate transaction fee on the original ledger.\n\tfee, err := CalcTxFee(txs, origL)\n\tif err != nil {\n\t\tlog.Fatalln(\"there should never be a case where handle transaction success but fail calcFee\")\n\t}\n\n\tblock := model.Block{\n\t\tPrevHash: prevHash,\n\t\tTxs: txs,\n\t\tCoinbase: CreateCoinbaseTx(reward+fee, pk, height),\n\t}\n\n\tc, err := Mine(&block, difficulty, ctl)\n\treturn &block, c, []*model.Transaction{}, err\n}", "func (_Rootchain *RootchainCaller) HeaderBlocks(opts *bind.CallOpts, arg0 *big.Int) (struct {\n\tRoot [32]byte\n\tStart *big.Int\n\tEnd *big.Int\n\tCreatedAt *big.Int\n\tProposer common.Address\n}, error) {\n\tret := new(struct {\n\t\tRoot [32]byte\n\t\tStart *big.Int\n\t\tEnd *big.Int\n\t\tCreatedAt *big.Int\n\t\tProposer common.Address\n\t})\n\tout := ret\n\terr := _Rootchain.contract.Call(opts, out, \"headerBlocks\", arg0)\n\treturn *ret, err\n}", "func New(genesisHeader util.BlockHeader, db database.ChainStore) (*BlockChain, error) {\n\t// Init genesis header\n\t_, err := db.Headers().GetBest()\n\tif err != nil {\n\t\tstoreHeader := &util.Header{BlockHeader: genesisHeader, TotalWork: new(big.Int)}\n\t\tif err := db.Headers().Put(storeHeader, true); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\treturn &BlockChain{db: db}, nil\n}", "func (b *Block) NewBlock(height int32, parentHash string, value p1.MerklePatriciaTrie) {\n\n var header Header\n mptAsBytes := getBytes(value)\n\n header.Height = height\n header.Timestamp = int64(time.Now().Unix())\n header.ParentHash = parentHash\n header.Size = int32(len(mptAsBytes))\n header.Nonce = \"\"\n hashString := string(header.Height) + string(header.Timestamp) + header.ParentHash + value.Root + string(header.Size)\n sum := sha3.Sum256([]byte(hashString))\n header.Hash = hex.EncodeToString(sum[:])\n\n b.Header = header\n b.Mpt = value\n}", "func NewBlockHeaderFromStr(headerStr string) (*BlockHeader, error) {\r\n\tif len(headerStr) != 160 {\r\n\t\treturn nil, errors.New(\"block header should be 80 bytes long\")\r\n\t}\r\n\r\n\theaderBytes, err := hex.DecodeString(headerStr)\r\n\tif err != nil {\r\n\t\treturn nil, err\r\n\t}\r\n\r\n\treturn NewBlockHeaderFromBytes(headerBytes)\r\n}", "func NewBlockhashStoreSpec(spec *job.BlockhashStoreSpec) *BlockhashStoreSpec {\n\treturn &BlockhashStoreSpec{\n\t\tCoordinatorV1Address: spec.CoordinatorV1Address,\n\t\tCoordinatorV2Address: spec.CoordinatorV2Address,\n\t\tCoordinatorV2PlusAddress: spec.CoordinatorV2PlusAddress,\n\t\tWaitBlocks: spec.WaitBlocks,\n\t\tLookbackBlocks: spec.LookbackBlocks,\n\t\tBlockhashStoreAddress: spec.BlockhashStoreAddress,\n\t\tTrustedBlockhashStoreAddress: spec.TrustedBlockhashStoreAddress,\n\t\tTrustedBlockhashStoreBatchSize: spec.TrustedBlockhashStoreBatchSize,\n\t\tPollPeriod: spec.PollPeriod,\n\t\tRunTimeout: spec.RunTimeout,\n\t\tEVMChainID: spec.EVMChainID,\n\t\tFromAddresses: spec.FromAddresses,\n\t}\n}", "func (*GetBlocksRequest) Descriptor() ([]byte, []int) {\n\treturn file_service_proto_rawDescGZIP(), []int{14}\n}", "func FormatBlock(\n\theader tmtypes.Header, size int, gasLimit int64,\n\tgasUsed *big.Int, transactions interface{}, bloom ethtypes.Bloom,\n) map[string]interface{} {\n\tif len(header.DataHash) == 0 {\n\t\theader.DataHash = tmbytes.HexBytes(common.Hash{}.Bytes())\n\t}\n\n\treturn map[string]interface{}{\n\t\t\"number\": hexutil.Uint64(header.Height),\n\t\t\"hash\": hexutil.Bytes(header.Hash()),\n\t\t\"parentHash\": hexutil.Bytes(header.LastBlockID.Hash),\n\t\t\"nonce\": hexutil.Uint64(0), // PoW specific\n\t\t\"sha3Uncles\": common.Hash{}, // No uncles in Tendermint\n\t\t\"logsBloom\": bloom,\n\t\t\"transactionsRoot\": hexutil.Bytes(header.DataHash),\n\t\t\"stateRoot\": hexutil.Bytes(header.AppHash),\n\t\t\"miner\": common.Address{},\n\t\t\"mixHash\": common.Hash{},\n\t\t\"difficulty\": 0,\n\t\t\"totalDifficulty\": 0,\n\t\t\"extraData\": hexutil.Uint64(0),\n\t\t\"size\": hexutil.Uint64(size),\n\t\t\"gasLimit\": hexutil.Uint64(gasLimit), // Static gas limit\n\t\t\"gasUsed\": (*hexutil.Big)(gasUsed),\n\t\t\"timestamp\": hexutil.Uint64(header.Time.Unix()),\n\t\t\"transactions\": transactions.([]common.Hash),\n\t\t\"uncles\": []string{},\n\t\t\"receiptsRoot\": common.Hash{},\n\t}\n}", "func (*GetBlockByHeightRequest) Descriptor() ([]byte, []int) {\n\treturn file_github_com_yahuizhan_dappley_metrics_go_api_rpc_pb_rpc_proto_rawDescGZIP(), []int{13}\n}", "func Block(b models.Block) *genModels.BlocksRow {\n\tts := b.Timestamp.Unix()\n\n\tgenBlock := genModels.BlocksRow{\n\t\tLevel: b.Level.Ptr(),\n\t\tProto: b.Proto.Ptr(),\n\t\tBlockTime: b.BlockTime,\n\t\tPredecessor: b.Predecessor.Ptr(),\n\t\tTimestamp: &ts,\n\t\tValidationPass: b.ValidationPass.Ptr(),\n\t\tFitness: b.Fitness.Ptr(),\n\t\tContext: b.Context,\n\t\tSignature: b.Signature,\n\t\tProtocol: b.Protocol.Ptr(),\n\t\tPriority: b.Priority.Ptr(),\n\t\tChainID: b.ChainID,\n\t\tHash: b.Hash.Ptr(),\n\t\tReward: &b.Reward,\n\t\tDeposit: b.Deposit,\n\t\tOperationsHash: b.OperationsHash,\n\t\tPeriodKind: b.PeriodKind,\n\t\tCurrentExpectedQuorum: b.CurrentExpectedQuorum,\n\t\tActiveProposal: b.ActiveProposal,\n\t\tBaker: b.Baker,\n\t\tBakerName: b.BakerName,\n\t\tNonceHash: b.NonceHash,\n\t\tConsumedGas: b.ConsumedGas,\n\t\tMetaLevel: b.MetaLevel,\n\t\tMetaLevelPosition: b.MetaLevelPosition,\n\t\tMetaCycle: b.MetaCycle,\n\t\tMetaCyclePosition: b.MetaCyclePosition,\n\t\tMetaVotingPeriod: b.MetaVotingPeriod,\n\t\tMetaVotingPeriodPosition: b.MetaVotingPeriodPosition,\n\t\tExpectedCommitment: b.ExpectedCommitment,\n\t}\n\n\tif b.BlockAggregation != nil {\n\t\tgenBlock.Volume = b.BlockAggregation.Volume\n\t\tgenBlock.Fees = b.BlockAggregation.Fees\n\t\tgenBlock.Endorsements = b.BlockAggregation.Endorsements\n\t\tgenBlock.Proposals = b.BlockAggregation.Proposals\n\t\tgenBlock.SeedNonceRevelations = b.BlockAggregation.SeedNonceRevelations\n\t\tgenBlock.Delegations = b.BlockAggregation.Delegations\n\t\tgenBlock.Transactions = b.BlockAggregation.Transactions\n\t\tgenBlock.ActivateAccounts = b.BlockAggregation.ActivateAccounts\n\t\tgenBlock.Ballots = b.BlockAggregation.Ballots\n\t\tgenBlock.Originations = b.BlockAggregation.Originations\n\t\tgenBlock.Reveals = b.BlockAggregation.Reveals\n\t\tgenBlock.DoubleBakingEvidence = b.BlockAggregation.DoubleBakingEvidences\n\t\tgenBlock.DoubleEndorsementEvidence = b.BlockAggregation.DoubleEndorsementEvidences\n\t\tgenBlock.NumberOfOperations = b.BlockAggregation.NumberOfOperations\n\t}\n\n\treturn &genBlock\n}", "func (_Rootchain *RootchainCallerSession) HeaderBlocks(arg0 *big.Int) (struct {\n\tRoot [32]byte\n\tStart *big.Int\n\tEnd *big.Int\n\tCreatedAt *big.Int\n\tProposer common.Address\n}, error) {\n\treturn _Rootchain.Contract.HeaderBlocks(&_Rootchain.CallOpts, arg0)\n}", "func (*GetBlockByHashRequest) Descriptor() ([]byte, []int) {\n\treturn file_github_com_yahuizhan_dappley_metrics_go_api_rpc_pb_rpc_proto_rawDescGZIP(), []int{12}\n}", "func (_Rootchain *RootchainSession) HeaderBlocks(arg0 *big.Int) (struct {\n\tRoot [32]byte\n\tStart *big.Int\n\tEnd *big.Int\n\tCreatedAt *big.Int\n\tProposer common.Address\n}, error) {\n\treturn _Rootchain.Contract.HeaderBlocks(&_Rootchain.CallOpts, arg0)\n}", "func NewBlock(b *block.Block, chain blockchainer.Blockchainer) Block {\n\tres := Block{\n\t\tBlock: *b,\n\t\tBlockMetadata: BlockMetadata{\n\t\t\tSize: io.GetVarSize(b),\n\t\t\tConfirmations: chain.BlockHeight() - b.Index + 1,\n\t\t},\n\t}\n\n\thash := chain.GetHeaderHash(int(b.Index) + 1)\n\tif !hash.Equals(util.Uint256{}) {\n\t\tres.NextBlockHash = &hash\n\t}\n\n\treturn res\n}", "func newBlock(prevHash [32]byte, prevHashWithoutTx [32]byte, commitmentProof [crypto.COMM_PROOF_LENGTH]byte, height uint32) *protocol.Block {\n\tblock := new(protocol.Block)\n\tblock.PrevHash = prevHash\n\tblock.PrevHashWithoutTx = prevHashWithoutTx\n\tblock.CommitmentProof = commitmentProof\n\tblock.Height = height\n\tblock.StateCopy = make(map[[32]byte]*protocol.Account)\n\tblock.Aggregated = false\n\n\treturn block\n}", "func BeginBlocker(ctx sdk.Context, k keeper.Keeper) {\n\tlogger := k.Logger(ctx)\n\t// Get block BFT time and block height\n\tblockTime := ctx.BlockHeader().Time\n\tminter := k.GetMinter(ctx)\n\tif ctx.BlockHeight() <= 1 { // don't inflate token in the first block\n\t\tminter.LastUpdate = blockTime\n\t\tk.SetMinter(ctx, minter)\n\t\treturn\n\t}\n\n\t// Calculate block mint amount\n\tparams := k.GetParamSet(ctx)\n\tlogger.Info(\"Mint parameters\", \"inflation_rate\", params.Inflation.String(), \"mint_denom\", params.MintDenom)\n\n\tmintedCoin := minter.BlockProvision(params)\n\tlogger.Info(\"Mint result\", \"block_provisions\", mintedCoin.String(), \"time\", blockTime.String())\n\n\tmintedCoins := sdk.NewCoins(mintedCoin)\n\t// mint coins to submodule account\n\tif err := k.MintCoins(ctx, mintedCoins); err != nil {\n\t\tpanic(err)\n\t}\n\n\t// send the minted coins to the fee collector account\n\tif err := k.AddCollectedFees(ctx, mintedCoins); err != nil {\n\t\tpanic(err)\n\t}\n\n\t// Update last block BFT time\n\tlastInflationTime := minter.LastUpdate\n\tminter.LastUpdate = blockTime\n\tk.SetMinter(ctx, minter)\n\n\tctx.EventManager().EmitEvent(\n\t\tsdk.NewEvent(\n\t\t\ttypes.EventTypeMint,\n\t\t\tsdk.NewAttribute(types.AttributeKeyLastInflationTime, lastInflationTime.String()),\n\t\t\tsdk.NewAttribute(types.AttributeKeyInflationTime, blockTime.String()),\n\t\t\tsdk.NewAttribute(types.AttributeKeyMintCoin, mintedCoin.Amount.String()),\n\t\t),\n\t)\n}", "func (*BlockRequest) Descriptor() ([]byte, []int) {\n\treturn file_eth_v1alpha1_validator_proto_rawDescGZIP(), []int{14}\n}", "func NewDeltasReqByBlock(code string, table string, scope string, payer string, first int64, last int64) *DeltasReq {\n\treturn ndr(code, table, scope, payer, \"\", \"\", first, last)\n}", "func (s *SimpleBlockFactory) BlockFactory() consensus.BlockFactory {\n\treturn s\n}", "func (*BlockHeader) Descriptor() ([]byte, []int) {\n\treturn file_message_proto_rawDescGZIP(), []int{16}\n}", "func (g *testGenerator) ancestorBlock(block *wire.MsgBlock, height uint32, f func(*wire.MsgBlock)) *wire.MsgBlock {\n\t// Nothing to do if the requested height is outside of the valid\n\t// range.\n\tif block == nil || height > block.Header.Height {\n\t\treturn nil\n\t}\n\n\t// Iterate backwards until the requested height is reached.\n\tfor block != nil && block.Header.Height > height {\n\t\tblock = g.blocks[block.Header.PrevBlock]\n\t\tif f != nil && block != nil {\n\t\t\tf(block)\n\t\t}\n\t}\n\n\treturn block\n}", "func InitBeer(service geting.Service) *cobra.Command {\n\tbeerCmd := &cobra.Command{\n\t\tUse: \"beers\",\n\t\tShort: \"Print all beers\",\n\t\tRun: runBeers(services),\n\t}\n\n\tbeerCmd.Flags().StringP(idFlag, \"i\", \"\", \"Id of the beer\")\n\tbeerCmd.Flags().StringP(nameFileFlag, \"c\", \"\", \"Name CSV File\")\n\n\treturn beerCmd\n}", "func (*BlockParams) Descriptor() ([]byte, []int) {\n\treturn file_tm_replay_proto_rawDescGZIP(), []int{11}\n}", "func TestBlock(t *testing.T) {\n\tl := &Leading{\n\t\tMagic: 0x7425,\n\t\tLenBlock: 0,\n\t}\n\n\tfmt.Println(l)\n\n\th := &chunk.Header{}\n\tfmt.Println(hex.Dump(h.Marshal()))\n\n\tcaller := &chunk.Quater{\n\t\tMain: \"1LVfRcj31E9mGujxUD3nTJjsUPtcczqJnX\",\n\t\tSub: \"send\",\n\t}\n\tcallee := &chunk.Quater{\n\t\tMain: \"1LVfRcj31E9mGujxUD3nTJjsUPtcczqJnX\",\n\t\tSub: \"recv\",\n\t}\n\n\tr := chunk.NewRouting(caller, callee)\n\tfmt.Println(hex.Dump(r.Marshal()))\n\n\tc := &chunk.Content{\n\t\tSha: chunk.GetHash('0'),\n\t\tMime: 0xff,\n\t\tCipher: []byte{},\n\t\tBody: []byte(\"Hello bitmsg\"),\n\t}\n\t//fmt.Println(c)\n\tc.Marshal()\n\tb := NewBlock(*h, *r)\n\tb.AddContent(*c)\n\tfmt.Println(hex.Dump(b.Header.ShaMerkle[:]))\n\tfmt.Println(hex.Dump(b.Marshal()))\n}", "func NewChain(\n\tcv ConfigValidator,\n\tselfID uint64,\n\tconfig types.Configuration,\n\twalDir string,\n\tblockPuller BlockPuller,\n\tcomm cluster.Communicator,\n\tsignerSerializer signerSerializer,\n\tpolicyManager policies.Manager,\n\tsupport consensus.ConsenterSupport,\n\tmetrics *Metrics,\n\tmetricsBFT *api.Metrics,\n\tmetricsWalBFT *wal.Metrics,\n\tbccsp bccsp.BCCSP,\n) (*BFTChain, error) {\n\trequestInspector := &RequestInspector{\n\t\tValidateIdentityStructure: func(_ *msp.SerializedIdentity) error {\n\t\t\treturn nil\n\t\t},\n\t}\n\n\tlogger := flogging.MustGetLogger(\"orderer.consensus.smartbft.chain\").With(zap.String(\"channel\", support.ChannelID()))\n\n\tc := &BFTChain{\n\t\tRuntimeConfig: &atomic.Value{},\n\t\tChannel: support.ChannelID(),\n\t\tConfig: config,\n\t\tWALDir: walDir,\n\t\tComm: comm,\n\t\tsupport: support,\n\t\tSignerSerializer: signerSerializer,\n\t\tPolicyManager: policyManager,\n\t\tBlockPuller: blockPuller,\n\t\tLogger: logger,\n\t\tconsensusRelation: types2.ConsensusRelationConsenter,\n\t\tstatus: types2.StatusActive,\n\t\tMetrics: &Metrics{\n\t\t\tClusterSize: metrics.ClusterSize.With(\"channel\", support.ChannelID()),\n\t\t\tCommittedBlockNumber: metrics.CommittedBlockNumber.With(\"channel\", support.ChannelID()),\n\t\t\tIsLeader: metrics.IsLeader.With(\"channel\", support.ChannelID()),\n\t\t\tLeaderID: metrics.LeaderID.With(\"channel\", support.ChannelID()),\n\t\t},\n\t\tMetricsBFT: metricsBFT.With(\"channel\", support.ChannelID()),\n\t\tMetricsWalBFT: metricsWalBFT.With(\"channel\", support.ChannelID()),\n\t\tbccsp: bccsp,\n\t}\n\n\tlastBlock := LastBlockFromLedgerOrPanic(support, c.Logger)\n\tlastConfigBlock := LastConfigBlockFromLedgerOrPanic(support, c.Logger)\n\n\trtc := RuntimeConfig{\n\t\tlogger: logger,\n\t\tid: selfID,\n\t}\n\trtc, err := rtc.BlockCommitted(lastConfigBlock, bccsp)\n\tif err != nil {\n\t\treturn nil, errors.Wrap(err, \"failed constructing RuntimeConfig\")\n\t}\n\trtc, err = rtc.BlockCommitted(lastBlock, bccsp)\n\tif err != nil {\n\t\treturn nil, errors.Wrap(err, \"failed constructing RuntimeConfig\")\n\t}\n\n\tc.RuntimeConfig.Store(rtc)\n\n\tc.verifier = buildVerifier(cv, c.RuntimeConfig, support, requestInspector, policyManager)\n\tc.consensus = bftSmartConsensusBuild(c, requestInspector)\n\n\t// Setup communication with list of remotes notes for the new channel\n\tc.Comm.Configure(c.support.ChannelID(), rtc.RemoteNodes)\n\n\tif err = c.consensus.ValidateConfiguration(rtc.Nodes); err != nil {\n\t\treturn nil, errors.Wrap(err, \"failed to verify SmartBFT-Go configuration\")\n\t}\n\n\tlogger.Infof(\"SmartBFT-v3 is now servicing chain %s\", support.ChannelID())\n\n\treturn c, nil\n}", "func NewBlockHeaderFromBytes(headerBytes []byte) (*BlockHeader, error) {\r\n\tif len(headerBytes) != 80 {\r\n\t\treturn nil, errors.New(\"block header should be 80 bytes long\")\r\n\t}\r\n\r\n\treturn &BlockHeader{\r\n\t\tVersion: binary.LittleEndian.Uint32(headerBytes[:4]),\r\n\t\tHashPrevBlock: bt.ReverseBytes(headerBytes[4:36]),\r\n\t\tHashMerkleRoot: bt.ReverseBytes(headerBytes[36:68]),\r\n\t\tTime: binary.LittleEndian.Uint32(headerBytes[68:72]),\r\n\t\tBits: bt.ReverseBytes(headerBytes[72:76]),\r\n\t\tNonce: binary.LittleEndian.Uint32(headerBytes[76:]),\r\n\t}, nil\r\n}", "func (s *BasevhdlListener) EnterBlock_specification(ctx *Block_specificationContext) {}", "func (*CreatecustomerParameters) Descriptor() ([]byte, []int) {\n\treturn file_bookstore_proto_rawDescGZIP(), []int{4}\n}", "func newMockBlockHeaderStore() *mockBlockHeaderStore {\n\treturn &mockBlockHeaderStore{\n\t\theaders: make(map[chainhash.Hash]wire.BlockHeader),\n\t\theights: make(map[uint32]wire.BlockHeader),\n\t}\n}", "func (*PropagateBlockRequest) Descriptor() ([]byte, []int) {\n\treturn file_service_proto_rawDescGZIP(), []int{10}\n}", "func newFilterBlocksRequest(batch []wtxmgr.BlockMeta,\n\tscopedMgrs map[waddrmgr.KeyScope]*waddrmgr.ScopedKeyManager,\n\trecoveryState *RecoveryState) *chain.FilterBlocksRequest {\n\n\tfilterReq := &chain.FilterBlocksRequest{\n\t\tBlocks: batch,\n\t\tExternalAddrs: make(map[waddrmgr.ScopedIndex]btcutil.Address),\n\t\tInternalAddrs: make(map[waddrmgr.ScopedIndex]btcutil.Address),\n\t\tImportedAddrs: make([]btcutil.Address, 0),\n\t\tWatchedOutPoints: recoveryState.WatchedOutPoints(),\n\t}\n\n\t// Populate the external and internal addresses by merging the addresses\n\t// sets belong to all currently tracked scopes.\n\tfor scope := range scopedMgrs {\n\t\tscopeState := recoveryState.StateForScope(scope)\n\t\tfor index, addr := range scopeState.ExternalBranch.Addrs() {\n\t\t\tscopedIndex := waddrmgr.ScopedIndex{\n\t\t\t\tScope: scope,\n\t\t\t\tIndex: index,\n\t\t\t}\n\t\t\tfilterReq.ExternalAddrs[scopedIndex] = addr\n\t\t}\n\t\tfor index, addr := range scopeState.InternalBranch.Addrs() {\n\t\t\tscopedIndex := waddrmgr.ScopedIndex{\n\t\t\t\tScope: scope,\n\t\t\t\tIndex: index,\n\t\t\t}\n\t\t\tfilterReq.InternalAddrs[scopedIndex] = addr\n\t\t}\n\t}\n\n\treturn filterReq\n}", "func TestBlockHeaderEncoding(t *testing.T) {\n\tnonce := uint64(123123) // 0x000000000001e0f3\n\tpver := ProtocolVersion\n\n\t// baseBlockHdr is used in the various tests as a baseline BlockHeader.\n\tbits := uint32(0x1d00ffff)\n\tbaseBlockHdr := &BlockHeader{\n\t\tVersion: 1,\n\t\tParentHashes: []*daghash.Hash{mainnetGenesisHash, simnetGenesisHash},\n\t\tHashMerkleRoot: mainnetGenesisMerkleRoot,\n\t\tAcceptedIDMerkleRoot: exampleAcceptedIDMerkleRoot,\n\t\tUTXOCommitment: exampleUTXOCommitment,\n\t\tTimestamp: mstime.UnixMilliseconds(0x17315ed0f99),\n\t\tBits: bits,\n\t\tNonce: nonce,\n\t}\n\n\t// baseBlockHdrEncoded is the domainmessage encoded bytes of baseBlockHdr.\n\tbaseBlockHdrEncoded := []byte{\n\t\t0x01, 0x00, 0x00, 0x00, // Version 1\n\t\t0x02, // NumParentBlocks\n\t\t0xdc, 0x5f, 0x5b, 0x5b, 0x1d, 0xc2, 0xa7, 0x25, // mainnetGenesisHash\n\t\t0x49, 0xd5, 0x1d, 0x4d, 0xee, 0xd7, 0xa4, 0x8b,\n\t\t0xaf, 0xd3, 0x14, 0x4b, 0x56, 0x78, 0x98, 0xb1,\n\t\t0x8c, 0xfd, 0x9f, 0x69, 0xdd, 0xcf, 0xbb, 0x63,\n\t\t0xf6, 0x7a, 0xd7, 0x69, 0x5d, 0x9b, 0x66, 0x2a, // simnetGenesisHash\n\t\t0x72, 0xff, 0x3d, 0x8e, 0xdb, 0xbb, 0x2d, 0xe0,\n\t\t0xbf, 0xa6, 0x7b, 0x13, 0x97, 0x4b, 0xb9, 0x91,\n\t\t0x0d, 0x11, 0x6d, 0x5c, 0xbd, 0x86, 0x3e, 0x68,\n\t\t0x4a, 0x5e, 0x1e, 0x4b, 0xaa, 0xb8, 0x9f, 0x3a, // HashMerkleRoot\n\t\t0x32, 0x51, 0x8a, 0x88, 0xc3, 0x1b, 0xc8, 0x7f,\n\t\t0x61, 0x8f, 0x76, 0x67, 0x3e, 0x2c, 0xc7, 0x7a,\n\t\t0xb2, 0x12, 0x7b, 0x7a, 0xfd, 0xed, 0xa3, 0x3b,\n\t\t0x09, 0x3B, 0xC7, 0xE3, 0x67, 0x11, 0x7B, 0x3C, // AcceptedIDMerkleRoot\n\t\t0x30, 0xC1, 0xF8, 0xFD, 0xD0, 0xD9, 0x72, 0x87,\n\t\t0x7F, 0x16, 0xC5, 0x96, 0x2E, 0x8B, 0xD9, 0x63,\n\t\t0x65, 0x9C, 0x79, 0x3C, 0xE3, 0x70, 0xD9, 0x5F,\n\t\t0x10, 0x3B, 0xC7, 0xE3, 0x67, 0x11, 0x7B, 0x3C, // UTXOCommitment\n\t\t0x30, 0xC1, 0xF8, 0xFD, 0xD0, 0xD9, 0x72, 0x87,\n\t\t0x7F, 0x16, 0xC5, 0x96, 0x2E, 0x8B, 0xD9, 0x63,\n\t\t0x65, 0x9C, 0x79, 0x3C, 0xE3, 0x70, 0xD9, 0x5F,\n\t\t0x99, 0x0f, 0xed, 0x15, 0x73, 0x01, 0x00, 0x00, // Timestamp\n\t\t0xff, 0xff, 0x00, 0x1d, // Bits\n\t\t0xf3, 0xe0, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, // Fake Nonce. TODO: (Ori) Replace to a real nonce\n\t}\n\n\ttests := []struct {\n\t\tin *BlockHeader // Data to encode\n\t\tout *BlockHeader // Expected decoded data\n\t\tbuf []byte // Encoded data\n\t\tpver uint32 // Protocol version for domainmessage encoding\n\t}{\n\t\t// Latest protocol version.\n\t\t{\n\t\t\tbaseBlockHdr,\n\t\t\tbaseBlockHdr,\n\t\t\tbaseBlockHdrEncoded,\n\t\t\tProtocolVersion,\n\t\t},\n\t}\n\n\tt.Logf(\"Running %d tests\", len(tests))\n\tfor i, test := range tests {\n\t\t// Encode to domainmessage format.\n\t\tvar buf bytes.Buffer\n\t\terr := writeBlockHeader(&buf, test.pver, test.in)\n\t\tif err != nil {\n\t\t\tt.Errorf(\"writeBlockHeader #%d error %v\", i, err)\n\t\t\tcontinue\n\t\t}\n\t\tif !bytes.Equal(buf.Bytes(), test.buf) {\n\t\t\tt.Errorf(\"writeBlockHeader #%d\\n got: %s want: %s\", i,\n\t\t\t\tspew.Sdump(buf.Bytes()), spew.Sdump(test.buf))\n\t\t\tcontinue\n\t\t}\n\n\t\tbuf.Reset()\n\t\terr = test.in.KaspaEncode(&buf, pver)\n\t\tif err != nil {\n\t\t\tt.Errorf(\"KaspaEncode #%d error %v\", i, err)\n\t\t\tcontinue\n\t\t}\n\t\tif !bytes.Equal(buf.Bytes(), test.buf) {\n\t\t\tt.Errorf(\"KaspaEncode #%d\\n got: %s want: %s\", i,\n\t\t\t\tspew.Sdump(buf.Bytes()), spew.Sdump(test.buf))\n\t\t\tcontinue\n\t\t}\n\n\t\t// Decode the block header from domainmessage format.\n\t\tvar bh BlockHeader\n\t\trbuf := bytes.NewReader(test.buf)\n\t\terr = readBlockHeader(rbuf, test.pver, &bh)\n\t\tif err != nil {\n\t\t\tt.Errorf(\"readBlockHeader #%d error %v\", i, err)\n\t\t\tcontinue\n\t\t}\n\t\tif !reflect.DeepEqual(&bh, test.out) {\n\t\t\tt.Errorf(\"readBlockHeader #%d\\n got: %s want: %s\", i,\n\t\t\t\tspew.Sdump(&bh), spew.Sdump(test.out))\n\t\t\tcontinue\n\t\t}\n\n\t\trbuf = bytes.NewReader(test.buf)\n\t\terr = bh.KaspaDecode(rbuf, pver)\n\t\tif err != nil {\n\t\t\tt.Errorf(\"KaspaDecode #%d error %v\", i, err)\n\t\t\tcontinue\n\t\t}\n\t\tif !reflect.DeepEqual(&bh, test.out) {\n\t\t\tt.Errorf(\"KaspaDecode #%d\\n got: %s want: %s\", i,\n\t\t\t\tspew.Sdump(&bh), spew.Sdump(test.out))\n\t\t\tcontinue\n\t\t}\n\t}\n}", "func New(cfg *config.Config, hub *component.ComponentHub) (*SimpleBlockFactory, error) {\n\tconsensus.InitBlockInterval(cfg.Consensus.BlockInterval)\n\n\ts := &SimpleBlockFactory{\n\t\tComponentHub: hub,\n\t\tjobQueue: make(chan interface{}, slotQueueMax),\n\t\tblockInterval: consensus.BlockInterval,\n\t\tmaxBlockBodySize: chain.MaxBlockBodySize(),\n\t\tquit: make(chan interface{}),\n\t}\n\n\ts.txOp = chain.NewCompTxOp(\n\t\tchain.TxOpFn(func(txIn *types.Tx) (*types.BlockState, error) {\n\t\t\tselect {\n\t\t\tcase <-s.quit:\n\t\t\t\treturn nil, chain.ErrQuit\n\t\t\tdefault:\n\t\t\t\treturn nil, nil\n\t\t\t}\n\t\t}),\n\t)\n\n\treturn s, nil\n}", "func (*BlockRequestPayload) Descriptor() ([]byte, []int) {\n\treturn file_message_proto_rawDescGZIP(), []int{10}\n}", "func NewBlockMeta(height uint64, producer string, mintTime time.Time) *BlockMeta {\n\treturn &BlockMeta{\n\t\tHeight: height,\n\t\tProducer: producer,\n\t\tMintTime: mintTime.UTC(),\n\t}\n}", "func NewBlock() (*Block, error) {\n\tn, err := findLast()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\th, err := ftoh(n)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tfmt.Println(\"Hash: \" + h)\n\n\treturn &Block{Number: n + 1, PreviousHash: h}, nil\n}", "func (*BlocksRequest) Descriptor() ([]byte, []int) {\n\treturn file_resources_proto_rawDescGZIP(), []int{3}\n}", "func (app *BurrowMint) BeginBlock(hash []byte, header *abci.Header) {\n\n}", "func NewBlock(t *testing.T, bc blockchainer.Blockchainer, offset uint32, primary uint32, txs ...*transaction.Transaction) *block.Block {\n\twitness := transaction.Witness{VerificationScript: MultisigVerificationScript()}\n\theight := bc.BlockHeight()\n\th := bc.GetHeaderHash(int(height))\n\thdr, err := bc.GetHeader(h)\n\trequire.NoError(t, err)\n\tb := &block.Block{\n\t\tHeader: block.Header{\n\t\t\tPrevHash: hdr.Hash(),\n\t\t\tTimestamp: (uint64(time.Now().UTC().Unix()) + uint64(hdr.Index)) * 1000,\n\t\t\tIndex: hdr.Index + offset,\n\t\t\tPrimaryIndex: byte(primary),\n\t\t\tNextConsensus: witness.ScriptHash(),\n\t\t\tScript: witness,\n\t\t},\n\t\tTransactions: txs,\n\t}\n\tb.RebuildMerkleRoot()\n\n\tb.Script.InvocationScript = Sign(b)\n\treturn b\n}", "func (*BlockVerifyRequest) Descriptor() ([]byte, []int) {\n\treturn file_vm_vm_proto_rawDescGZIP(), []int{15}\n}", "func NewBlock(sigKey ed25519.PrivateKey, previousBlock BlockID, txs []*Transaction) (*Block, error) {\n\trand_bytes := make([]byte, 8)\n\t_, err := rand.Read(rand_bytes)\n\tif err != nil {\n\t\treturn nil, errors.Wrap(err, \"failed to get Random data\")\n\t}\n\ttemp := binary.LittleEndian.Uint64(rand_bytes)\n\tb := &Block{\n\t\tHeader: &BlockHeader{\n\t\t\tVersion: 0,\n\t\t\tPreviousBlock: previousBlock,\n\t\t\tTimestamp: 0, // XXX: Populate this correctly.\n\t\t\tRandom: temp,\n\t\t},\n\t\tTransactions: &Transactions{Transactions: txs},\n\t}\n\n\tb.Header.MerkleRoot, err = b.MerkleRoot()\n\tif err != nil {\n\t\treturn nil, errors.Wrap(err, \"failed to compute merkle root\")\n\t}\n\n\tbid := b.BlockID()\n\tb.Header.Signature = ed25519.Sign(sigKey, bid[:])\n\n\treturn b, nil\n}", "func (*MessageHubBlockUserRequest) Descriptor() ([]byte, []int) {\n\treturn file_messagehub_proto_rawDescGZIP(), []int{10}\n}", "func (*BlockRequest) Descriptor() ([]byte, []int) {\n\treturn file_resources_proto_rawDescGZIP(), []int{30}\n}", "func (*GetBlockIDAtHeightRequest) Descriptor() ([]byte, []int) {\n\treturn file_vm_vm_proto_rawDescGZIP(), []int{35}\n}", "func TableReportHeaderBlock(ctx context.Context, tbl *gotable.Table, rn, funcname string, ri *ReporterInfo) error {\n\tif ri.Xbiz == nil {\n\t\tri.Xbiz = new(rlib.XBusiness)\n\t\tif err := rlib.GetXBusiness(ctx, ri.Bid, ri.Xbiz); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn TableReportHeader(ctx, tbl, rn, funcname, ri)\n}", "func BeginBlocker(ctx sdk.Context, k Keeper) {\n\n\t// fetch stored minter & params\n\tminter := k.GetMinter(ctx)\n\t//params := k.GetParams(ctx)\n\n\t// recalculate inflation rate\n\tvar provisionAmt sdk.Dec\n\tminter.AnnualProvisions, minter.Inflation, provisionAmt = calcParams(ctx, k)\n\n\tk.SetMinter(ctx, minter)\n\n\t// mint coins, add to collected fees, update supply\n\t//fmt.Printf(\"AnnualProvisions: %s, Inflation: %s, provisionAmt: %s\\n\", minter.AnnualProvisions.String(), minter.Inflation.String(), provisionAmt.TruncateInt().String())\n\tmintedCoin := sdk.NewCoin(sdk.DefaultDenom, provisionAmt.TruncateInt())\n\tk.fck.AddCollectedFees(ctx, sdk.Coins{mintedCoin})\n\tk.sk.InflateSupply(ctx, mintedCoin.Amount)\n\n}", "func (bc *BlockChain) CreateNewBlock(bPrev *Block) *Block {\n\tbPrev.hash = bPrev.createHash()\n\tb2 := &Block{Index: bPrev.Index + 1, Timestamp: time.Now(), prevHash: bPrev.hash}\n\tbc.AddBlock(bPrev)\n\treturn b2\n}", "func NewTableBuilder(stream streamclient.StreamClient) *Builder {\n\tb := &Builder{\n\t\ttableIndex: &pspb.TableIndex{},\n\t\tkeyHashes: make([]uint64, 0, 1024), // Avoid some malloc calls.\n\t\tstream: stream,\n\t\twriteCh: make(chan writeBlock, 16),\n\t\tstopper: utils.NewStopper(),\n\t\tcurrentBlock: &pb.Block{Data: make([]byte, 64*KB)},\n\t}\n\n\tb.stopper.RunWorker(func() {\n\t\tvar blocks []*pb.Block\n\t\tvar size uint32\n\t\tvar baseKeys [][]byte\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase wBlock, ok := <-b.writeCh:\n\t\t\t\tif !ok {\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\tslurpLoop:\n\t\t\t\tfor {\n\t\t\t\t\tblocks = append(blocks, wBlock.b)\n\t\t\t\t\tsize += uint32(wBlock.b.Size())\n\t\t\t\t\tbaseKeys = append(baseKeys, wBlock.baseKey)\n\t\t\t\t\tif size > 10*MB {\n\t\t\t\t\t\tbreak slurpLoop\n\t\t\t\t\t}\n\n\t\t\t\t\t//if channel is closed or no new blocks coming, break\n\t\t\t\t\tselect {\n\t\t\t\t\tcase wBlock, ok = <-b.writeCh:\n\t\t\t\t\t\tif !ok {\n\t\t\t\t\t\t\tbreak slurpLoop\n\t\t\t\t\t\t}\n\t\t\t\t\tdefault:\n\t\t\t\t\t\tbreak slurpLoop\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t\tif len(blocks) == 0 {\n\t\t\t\t\treturn\n\t\t\t\t}\n\n\t\t\t\textentID, offsets, _, err := b.stream.Append(context.Background(), blocks)\n\t\t\t\tutils.Check(err)\n\n\t\t\t\tfor i, offset := range offsets {\n\t\t\t\t\t//在写入block之后, 把block的sz, baseKey, offset写入metablock\n\t\t\t\t\tb.addBlockToIndex(baseKeys[i], extentID, offset)\n\t\t\t\t}\n\t\t\t\tblocks = nil\n\t\t\t\tsize = 0\n\t\t\t\tbaseKeys = nil\n\t\t\t}\n\t\t}\n\t})\n\n\treturn b\n}", "func NewBlock(prev Block, currentTime uint64, uxHash cipher.SHA256, txns Transactions, calc FeeCalculator) (*Block, error) {\n\tif len(txns) == 0 {\n\t\treturn nil, fmt.Errorf(\"Refusing to create block with no transactions\")\n\t}\n\n\tfee, err := txns.Fees(calc)\n\tif err != nil {\n\t\t// This should have been caught earlier\n\t\treturn nil, fmt.Errorf(\"Invalid transaction fees: %v\", err)\n\t}\n\n\tbody := BlockBody{txns}\n\thead := NewBlockHeader(prev.Head, uxHash, currentTime, fee, body)\n\treturn &Block{\n\t\tHead: head,\n\t\tBody: body,\n\t}, nil\n}", "func (*GetTxsByBlockHashRequest) Descriptor() ([]byte, []int) {\n\treturn file_service_proto_rawDescGZIP(), []int{8}\n}", "func GetBlockHeaderByHeight(hostURL string, hostPort int, height int) *bytes.Buffer {\n\tparams := make(map[string]interface{})\n\tparams[\"height\"] = height\n\treturn makePostRequest(hostURL, hostPort, \"getblockheaderbyheight\", params)\n}", "func (s *BasevhdlListener) EnterBlock_header(ctx *Block_headerContext) {}", "func NewBlockManager(ntmgr notify.Notify, indexManager blockchain.IndexManager, db database.DB,\n\ttimeSource blockchain.MedianTimeSource, sigCache *txscript.SigCache,\n\tcfg *config.Config, par *params.Params,\n\tinterrupt <-chan struct{}, events *event.Feed, peerServer *p2p.Service) (*BlockManager, error) {\n\tbm := BlockManager{\n\t\tconfig: cfg,\n\t\tparams: par,\n\t\tnotify: ntmgr,\n\t\tprogressLogger: progresslog.NewBlockProgressLogger(\"Processed\", log),\n\t\tmsgChan: make(chan interface{}, cfg.MaxPeers*3),\n\t\theaderList: list.New(),\n\t\tquit: make(chan struct{}),\n\t\tpeerServer: peerServer,\n\t}\n\n\t// Create a new block chain instance with the appropriate configuration.\n\tvar err error\n\tbm.chain, err = blockchain.New(&blockchain.Config{\n\t\tDB: db,\n\t\tInterrupt: interrupt,\n\t\tChainParams: par,\n\t\tTimeSource: timeSource,\n\t\tEvents: events,\n\t\tSigCache: sigCache,\n\t\tIndexManager: indexManager,\n\t\tDAGType: cfg.DAGType,\n\t\tCacheInvalidTx: cfg.CacheInvalidTx,\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tbest := bm.chain.BestSnapshot()\n\tbm.chain.DisableCheckpoints(cfg.DisableCheckpoints)\n\tif !cfg.DisableCheckpoints {\n\t\t// Initialize the next checkpoint based on the current height.\n\t\tbm.nextCheckpoint = bm.findNextHeaderCheckpoint(uint64(best.GraphState.GetMainHeight()))\n\t\tif bm.nextCheckpoint != nil {\n\t\t\tbm.resetHeaderState(&best.Hash, uint64(best.GraphState.GetMainHeight()))\n\t\t}\n\t} else {\n\t\tlog.Info(\"Checkpoints are disabled\")\n\t}\n\n\tif cfg.DumpBlockchain != \"\" {\n\t\terr = bm.chain.DumpBlockChain(cfg.DumpBlockchain, par, uint64(best.GraphState.GetTotal())-1)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\treturn nil, fmt.Errorf(\"closing after dumping blockchain\")\n\t}\n\n\tbm.zmqNotify = zmq.NewZMQNotification(cfg)\n\n\tbm.chain.Subscribe(bm.handleNotifyMsg)\n\treturn &bm, nil\n}", "func CreateBlockForTests(gen genesis.Genesis, additional uint, buckets []buckets.Bucket) Block {\n\tcreatedOn := time.Now().UTC()\n\tins, err := NewBuilder().Create().\n\t\tWithGenesis(gen).\n\t\tWithAdditional(additional).\n\t\tWithBuckets(buckets).\n\t\tCreatedOn(createdOn).\n\t\tNow()\n\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\treturn ins\n}", "func NewHeader(h *block.Header, chain blockchainer.Blockchainer) Header {\n\tres := Header{\n\t\tHeader: *h,\n\t\tBlockMetadata: BlockMetadata{\n\t\t\tSize: io.GetVarSize(h),\n\t\t\tConfirmations: chain.BlockHeight() - h.Index + 1,\n\t\t},\n\t}\n\n\thash := chain.GetHeaderHash(int(h.Index) + 1)\n\tif !hash.Equals(util.Uint256{}) {\n\t\tres.NextBlockHash = &hash\n\t}\n\treturn res\n}", "func (*LastBlockHeightRequest) Descriptor() ([]byte, []int) {\n\treturn file_api_trading_proto_rawDescGZIP(), []int{150}\n}", "func (*GetBlockRequest) Descriptor() ([]byte, []int) {\n\treturn file_vm_vm_proto_rawDescGZIP(), []int{12}\n}", "func (*BodyNewBk) Descriptor() ([]byte, []int) {\n\treturn file_github_com_getamis_alice_crypto_tss_addshare_message_proto_rawDescGZIP(), []int{2}\n}", "func createTestHeader(t *testing.T, txType common.HeaderType, channelId string, creator []byte, useGoodTxid bool) (*common.Header, error) {\n\tnonce := []byte(\"nonce-abc-12345\")\n\n\t// useGoodTxid is used to for testing purpose. When it is true, we use a bad value for txid\n\ttxid := \"bad\"\n\tif useGoodTxid {\n\t\ttxid = protoutil.ComputeTxID(nonce, creator)\n\t}\n\n\tchdr := &common.ChannelHeader{\n\t\tType: int32(txType),\n\t\tChannelId: channelId,\n\t\tTxId: txid,\n\t\tEpoch: uint64(0),\n\t}\n\n\tshdr := &common.SignatureHeader{\n\t\tCreator: creator,\n\t\tNonce: nonce,\n\t}\n\n\treturn &common.Header{\n\t\tChannelHeader: protoMarshal(t, chdr),\n\t\tSignatureHeader: protoMarshal(t, shdr),\n\t}, nil\n}", "func (*GetBlocksRequest) Descriptor() ([]byte, []int) {\n\treturn file_github_com_yahuizhan_dappley_metrics_go_api_rpc_pb_rpc_proto_rawDescGZIP(), []int{11}\n}", "func NewBlock(index int, data interface{}, date time.Time) *Block {\n\treturn &Block{\n\t\tIndex: index,\n\t\tDate: date,\n\t\tData: data,\n\t}\n}", "func (*BuildBlockRequest) Descriptor() ([]byte, []int) {\n\treturn file_vm_vm_proto_rawDescGZIP(), []int{8}\n}", "func (l *Ledger) FormatMinerBlock(txList []*pb.Transaction,\n\tproposer []byte, ecdsaPk *ecdsa.PrivateKey, /*矿工的公钥私钥*/\n\ttimestamp int64, curTerm int64, curBlockNum int64,\n\tpreHash []byte, targetBits int32, utxoTotal *big.Int,\n\tqc *pb.QuorumCert, failedTxs map[string]string, blockHeight int64) (*pb.InternalBlock, error) {\n\treturn l.formatBlock(txList, proposer, ecdsaPk, timestamp, curTerm, curBlockNum, preHash, targetBits, utxoTotal, true, qc, failedTxs, blockHeight)\n}", "func (cm *chainManager) MintNewBlock(timestamp time.Time) (*block.Block, error) {\n\treturn cm.bc.MintNewBlock(timestamp)\n}", "func (_EthCrossChain *EthCrossChainFilterer) FilterInitGenesisBlockEvent(opts *bind.FilterOpts) (*EthCrossChainInitGenesisBlockEventIterator, error) {\n\n\tlogs, sub, err := _EthCrossChain.contract.FilterLogs(opts, \"InitGenesisBlockEvent\")\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &EthCrossChainInitGenesisBlockEventIterator{contract: _EthCrossChain.contract, event: \"InitGenesisBlockEvent\", logs: logs, sub: sub}, nil\n}", "func NewFakeBlockData(size uint64, index int64, onWire bool) graphsync.BlockData {\n\treturn &fakeBlkData{\n\t\tlink: cidlink.Link{Cid: testutil.GenerateCids(1)[0]},\n\t\tsize: size,\n\t\tindex: index,\n\t\tonWire: onWire,\n\t}\n}", "func New(\n\tdatabaseContext model.DBManager,\n\n\tdifficultyManager model.DifficultyManager,\n\tpastMedianTimeManager model.PastMedianTimeManager,\n\tcoinbaseManager model.CoinbaseManager,\n\tconsensusStateManager model.ConsensusStateManager,\n\tghostdagManager model.GHOSTDAGManager,\n\ttransactionValidator model.TransactionValidator,\n\n\tacceptanceDataStore model.AcceptanceDataStore,\n\tblockRelationStore model.BlockRelationStore,\n\tmultisetStore model.MultisetStore,\n\tghostdagDataStore model.GHOSTDAGDataStore,\n) model.BlockBuilder {\n\n\treturn &blockBuilder{\n\t\tdatabaseContext: databaseContext,\n\t\tdifficultyManager: difficultyManager,\n\t\tpastMedianTimeManager: pastMedianTimeManager,\n\t\tcoinbaseManager: coinbaseManager,\n\t\tconsensusStateManager: consensusStateManager,\n\t\tghostdagManager: ghostdagManager,\n\t\ttransactionValidator: transactionValidator,\n\n\t\tacceptanceDataStore: acceptanceDataStore,\n\t\tblockRelationStore: blockRelationStore,\n\t\tmultisetStore: multisetStore,\n\t\tghostdagDataStore: ghostdagDataStore,\n\t}\n}", "func TestGenesisBlockParser(t *testing.T) {\n\tblockFile, err := os.Open(\"../testdata/mainnet_genesis\")\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tdefer blockFile.Close()\n\n\tscan := bufio.NewScanner(blockFile)\n\tfor i := 0; scan.Scan(); i++ {\n\t\tblockDataHex := scan.Text()\n\t\tblockData, err := hex.DecodeString(blockDataHex)\n\t\tif err != nil {\n\t\t\tt.Error(err)\n\t\t\tcontinue\n\t\t}\n\n\t\tblock := NewBlock()\n\t\tblockData, err = block.ParseFromSlice(blockData)\n\t\tif err != nil {\n\t\t\tt.Error(err)\n\t\t\tcontinue\n\t\t}\n\n\t\t// Some basic sanity checks\n\t\tif block.hdr.Version != 4 {\n\t\t\tt.Error(\"Read wrong version in genesis block.\")\n\t\t\tbreak\n\t\t}\n\n\t\tif block.GetHeight() != i {\n\t\t\tt.Errorf(\"Got wrong height for block %d: %d\", i, block.GetHeight())\n\t\t}\n\t}\n}", "func mockFirstIntermediateBlock(prevBlockHeader *block.Header) (*block.Block, error) {\n\tblk := block.NewBlock()\n\tblk.Header.Seed = make([]byte, 33)\n\tblk.Header.Height = 1\n\t// Something above the genesis timestamp\n\tblk.Header.Timestamp = 1570000000\n\tblk.SetPrevBlock(prevBlockHeader)\n\n\ttx := mockDeterministicCoinbase()\n\tblk.AddTx(tx)\n\troot, err := blk.CalculateRoot()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tblk.Header.TxRoot = root\n\n\thash, err := blk.CalculateHash()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tblk.Header.Hash = hash\n\n\treturn blk, nil\n}", "func New(b *beat.Beat, cfg *common.Config) (beat.Beater, error) {\n\tc := defaultConfig\n\tif err := cfg.Unpack(&c); err != nil {\n\t\treturn nil, errors.Wrap(err, \"failed to unpack config\")\n\t}\n\n\tbt := &Flowbeat{\n\t\tdone: make(chan struct{}),\n\t\tconfig: c,\n\t}\n\treturn bt, nil\n}", "func (brf *BlockReaderFactory) newBlockReader(peer DomainPeer) (BlockReader, error) {\n\n\t// A read request to a datanode:\n\t// +-----------------------------------------------------------+\n\t// | Data Transfer Protocol Version, int16 |\n\t// +-----------------------------------------------------------+\n\t// | Op code, 1 byte (READ_BLOCK = 0x51) |\n\t// +-----------------------------------------------------------+\n\t// | varint length + OpReadBlockProto |\n\t// +-----------------------------------------------------------+\n\n\tproto := &OpReadBlockProto{\n\t\tHeader: &ClientOperationHeaderProto{\n\t\t\tBaseHeader: &BaseHeaderProto{\n\t\t\t\tBlock: &brf.block,\n\t\t\t\tToken: &brf.blockToken,\n\t\t\t},\n\t\t\tClientName: proto.String(brf.clientName),\n\t\t},\n\t\tOffset: proto.Uint64(uint64(brf.startOffset)),\n\t\tLen: proto.Uint64(uint64(brf.length)),\n\t\tSendChecksums: proto.Bool(brf.verifyChecksum),\n\t}\n\tif err := WriteBlockOpRequest(peer.out, ReadBlockOp, proto); err != nil {\n\t\t//todo\n\t\treturn nil, err\n\t} else {\n\t\tif status, err := ReadBlockOpResponse(peer.in); err != nil {\n\t\t\t//todo\n\t\t\treturn nil, err\n\t\t} else {\n\t\t\tif *status.Status != Status_SUCCESS {\n\t\t\t\tif *status.Status == Status_ERROR_ACCESS_TOKEN {\n\t\t\t\t\treturn nil, fmt.Errorf(\"Got access token error,status message %s \\n\", status.GetMessage())\n\t\t\t\t} else {\n\t\t\t\t\treturn nil, fmt.Errorf(\"Got error,status message %s \\n\", status.GetMessage())\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\tchecksumInfo := status.GetReadOpChecksumInfo()\n\t\t\t\tchecksum := checksumInfo.GetChecksum()\n\n\t\t\t\tfirstChunkOffset := int64(checksumInfo.GetChunkOffset())\n\n\t\t\t\tif firstChunkOffset < 0 || firstChunkOffset > brf.startOffset || firstChunkOffset <= (brf.startOffset-int64(checksum.GetBytesPerChecksum())) {\n\t\t\t\t\t//todo\n\t\t\t\t\treturn nil, fmt.Errorf(\"BlockReader: error in first chunk offset ( %d ) startOffset is %d for file %s\\n\", firstChunkOffset, brf.startOffset, brf.fileName)\n\t\t\t\t}\n\n\t\t\t\tif dataChecksum, err := NewDataChecksum(checksum); err != nil {\n\t\t\t\t\t//todo\n\t\t\t\t\treturn nil, fmt.Errorf(\"BlockReader: error in NewDataChecksum err is %s \\n\", err)\n\t\t\t\t} else {\n\t\t\t\t\tblockReader := NewRemoteBlockReader(brf.fileName, int64(brf.block.GetBlockId()), dataChecksum, brf.verifyChecksum,\n\t\t\t\t\t\tbrf.startOffset, firstChunkOffset, brf.length, peer, *brf.datanode.GetId())\n\t\t\t\t\treturn blockReader, nil\n\t\t\t\t}\n\t\t\t}\n\n\t\t}\n\n\t}\n\n}", "func NewGetBlockLatestParams() *GetBlockLatestParams {\n\tvar (\n\t\ttxEncodingDefault = string(\"json\")\n\t)\n\treturn &GetBlockLatestParams{\n\t\tTxEncoding: &txEncodingDefault,\n\n\t\ttimeout: cr.DefaultTimeout,\n\t}\n}", "func (b *Block) fillHeader() {\n\tif b.LastCommitHash == nil {\n\t\tb.LastCommitHash = b.LastCommit.Hash()\n\t}\n\tif b.DataHash == nil {\n\t\tb.DataHash = b.Data.Hash()\n\t}\n\tif b.EvidenceHash == nil {\n\t\tb.EvidenceHash = b.Evidence.Hash()\n\t}\n}" ]
[ "0.56433254", "0.53405935", "0.5309693", "0.52485263", "0.5207438", "0.52066755", "0.5181882", "0.51282775", "0.5117454", "0.50633496", "0.5005211", "0.4995157", "0.49901152", "0.4985155", "0.4977487", "0.49115044", "0.4903588", "0.4873314", "0.48645684", "0.48605022", "0.48539826", "0.4795968", "0.47925395", "0.47925225", "0.47770184", "0.4769889", "0.4769372", "0.47310376", "0.47132444", "0.47076237", "0.4696137", "0.46916774", "0.46743956", "0.4645341", "0.4642502", "0.46423134", "0.46413916", "0.46401292", "0.4626441", "0.46211764", "0.46117267", "0.46055305", "0.46048948", "0.46040243", "0.45867023", "0.45804057", "0.4579024", "0.45736274", "0.457194", "0.45533398", "0.454797", "0.45439392", "0.45099348", "0.45055506", "0.45044467", "0.45031226", "0.45003843", "0.44995493", "0.44821033", "0.44676855", "0.4466118", "0.44654116", "0.44596213", "0.4455541", "0.44526964", "0.4443945", "0.44425577", "0.4440525", "0.44300666", "0.4430035", "0.44231233", "0.44161066", "0.44096723", "0.44022402", "0.4395993", "0.43933275", "0.43908843", "0.43901446", "0.43899018", "0.43799284", "0.4373054", "0.43586004", "0.43544123", "0.4351171", "0.4349755", "0.4348558", "0.43471774", "0.43436363", "0.43430263", "0.43384993", "0.43304193", "0.4326439", "0.43243757", "0.43211997", "0.43179566", "0.4315837", "0.4311869", "0.43046826", "0.42969787", "0.42966834" ]
0.7923484
0
NewBootstrapSpec initializes a new BootstrapSpec from a job.BootstrapSpec
func NewBootstrapSpec(spec *job.BootstrapSpec) *BootstrapSpec { return &BootstrapSpec{ ContractID: spec.ContractID, Relay: spec.Relay, RelayConfig: spec.RelayConfig, BlockchainTimeout: spec.BlockchainTimeout, ContractConfigTrackerPollInterval: spec.ContractConfigTrackerPollInterval, ContractConfigConfirmations: spec.ContractConfigConfirmations, CreatedAt: spec.CreatedAt, UpdatedAt: spec.UpdatedAt, } }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func newBootstrapTemplate(e2eCtx *E2EContext) *cfn_bootstrap.Template {\n\tBy(\"Creating a bootstrap AWSIAMConfiguration\")\n\tt := cfn_bootstrap.NewTemplate()\n\tt.Spec.BootstrapUser.Enable = true\n\tt.Spec.SecureSecretsBackends = []v1alpha3.SecretBackend{\n\t\tv1alpha3.SecretBackendSecretsManager,\n\t\tv1alpha3.SecretBackendSSMParameterStore,\n\t}\n\tregion, err := credentials.ResolveRegion(\"\")\n\tExpect(err).NotTo(HaveOccurred())\n\tt.Spec.Region = region\n\tstr, err := yaml.Marshal(t.Spec)\n\tExpect(err).NotTo(HaveOccurred())\n\tExpect(ioutil.WriteFile(path.Join(e2eCtx.Settings.ArtifactFolder, \"awsiamconfiguration.yaml\"), str, 0644)).To(Succeed())\n\tcfnData, err := t.RenderCloudFormation().YAML()\n\tExpect(err).NotTo(HaveOccurred())\n\tExpect(ioutil.WriteFile(path.Join(e2eCtx.Settings.ArtifactFolder, \"cloudformation.yaml\"), cfnData, 0644)).To(Succeed())\n\treturn &t\n}", "func NewBootstrap(router *mux.Router) *Bootstrap {\n\treturn &Bootstrap{\n\t\trouter: router,\n\t}\n}", "func NewBootstrap(router *echo.Echo, serviceName string) *Bootstrap {\n\treturn &Bootstrap{\n\t\trouter: router,\n\t\tserviceName: serviceName,\n\t}\n}", "func NewSpec(details *SpecDetails) *Spec {\n\treturn &Spec{\n\t\tDetails: details,\n\t\tServices: NewServiceList(),\n\t\tStatus: SpecWaiting,\n\t}\n}", "func NewSpec(api *gmail.Service, db *db.DB) (*Spec, error) {\n\tlog.SetLevel(log.DebugLevel)\n\tlog.Info(\"starting new spec\")\n\n\tbytes, err := ioutil.ReadFile(\"./spec.yaml\")\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to read file: %v\", err)\n\t}\n\n\tspec := &Spec{\n\t\tapi: api,\n\t\tdb: db,\n\t}\n\n\terr = yaml.Unmarshal(bytes, spec)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to unmarshal: %v\", err)\n\t}\n\n\treturn spec, nil\n}", "func (bsc BootstrapConfiguration) New(\n\trsOpts result.Options,\n\topts storage.Options,\n\ttopoMapProvider topology.MapProvider,\n\torigin topology.Host,\n\tadminClient client.AdminClient,\n) (bootstrap.ProcessProvider, error) {\n\tidxOpts := opts.IndexOptions()\n\tcompactor, err := compaction.NewCompactor(idxOpts.MetadataArrayPool(),\n\t\tindex.MetadataArrayPoolCapacity,\n\t\tidxOpts.SegmentBuilderOptions(),\n\t\tidxOpts.FSTSegmentOptions(),\n\t\tcompaction.CompactorOptions{\n\t\t\tFSTWriterOptions: &fst.WriterOptions{\n\t\t\t\t// DisableRegistry is set to true to trade a larger FST size\n\t\t\t\t// for a faster FST compaction since we want to reduce the end\n\t\t\t\t// to end latency for time to first index a metric.\n\t\t\t\tDisableRegistry: true,\n\t\t\t},\n\t\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar (\n\t\tbs bootstrap.BootstrapperProvider\n\t\tfsOpts = opts.CommitLogOptions().FilesystemOptions()\n\t\torderedBootstrappers = bsc.orderedBootstrappers()\n\t)\n\t// Start from the end of the list because the bootstrappers are ordered by precedence in descending order.\n\t// I.e. each bootstrapper wraps the preceding bootstrapper, and so the outer-most bootstrapper is run first.\n\tfor i := len(orderedBootstrappers) - 1; i >= 0; i-- {\n\t\tswitch orderedBootstrappers[i] {\n\t\tcase bootstrapper.NoOpAllBootstrapperName:\n\t\t\tbs = bootstrapper.NewNoOpAllBootstrapperProvider()\n\t\tcase bootstrapper.NoOpNoneBootstrapperName:\n\t\t\tbs = bootstrapper.NewNoOpNoneBootstrapperProvider()\n\t\tcase bfs.FileSystemBootstrapperName:\n\t\t\tfsCfg := bsc.filesystemConfig()\n\t\t\tfsbOpts := bfs.NewOptions().\n\t\t\t\tSetInstrumentOptions(opts.InstrumentOptions()).\n\t\t\t\tSetResultOptions(rsOpts).\n\t\t\t\tSetFilesystemOptions(fsOpts).\n\t\t\t\tSetIndexOptions(opts.IndexOptions()).\n\t\t\t\tSetPersistManager(opts.PersistManager()).\n\t\t\t\tSetIndexClaimsManager(opts.IndexClaimsManager()).\n\t\t\t\tSetCompactor(compactor).\n\t\t\t\tSetRuntimeOptionsManager(opts.RuntimeOptionsManager()).\n\t\t\t\tSetIdentifierPool(opts.IdentifierPool()).\n\t\t\t\tSetMigrationOptions(fsCfg.migration().NewOptions()).\n\t\t\t\tSetStorageOptions(opts).\n\t\t\t\tSetIndexSegmentsVerify(bsc.VerifyOrDefault().VerifyIndexSegmentsOrDefault())\n\t\t\tif v := bsc.IndexSegmentConcurrency; v != nil {\n\t\t\t\tfsbOpts = fsbOpts.SetIndexSegmentConcurrency(*v)\n\t\t\t}\n\t\t\tif err := fsbOpts.Validate(); err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\tbs, err = bfs.NewFileSystemBootstrapperProvider(fsbOpts, bs)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\tcase commitlog.CommitLogBootstrapperName:\n\t\t\tcCfg := bsc.commitlogConfig()\n\t\t\tcOpts := commitlog.NewOptions().\n\t\t\t\tSetResultOptions(rsOpts).\n\t\t\t\tSetCommitLogOptions(opts.CommitLogOptions()).\n\t\t\t\tSetRuntimeOptionsManager(opts.RuntimeOptionsManager()).\n\t\t\t\tSetReturnUnfulfilledForCorruptCommitLogFiles(cCfg.ReturnUnfulfilledForCorruptCommitLogFiles)\n\t\t\tif err := cOpts.Validate(); err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\tinspection, err := fs.InspectFilesystem(fsOpts)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\tbs, err = commitlog.NewCommitLogBootstrapperProvider(cOpts, inspection, bs)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\tcase peers.PeersBootstrapperName:\n\t\t\tpCfg := bsc.peersConfig()\n\t\t\tpOpts := peers.NewOptions().\n\t\t\t\tSetResultOptions(rsOpts).\n\t\t\t\tSetFilesystemOptions(fsOpts).\n\t\t\t\tSetIndexOptions(opts.IndexOptions()).\n\t\t\t\tSetAdminClient(adminClient).\n\t\t\t\tSetPersistManager(opts.PersistManager()).\n\t\t\t\tSetIndexClaimsManager(opts.IndexClaimsManager()).\n\t\t\t\tSetCompactor(compactor).\n\t\t\t\tSetRuntimeOptionsManager(opts.RuntimeOptionsManager()).\n\t\t\t\tSetContextPool(opts.ContextPool())\n\t\t\tif pCfg.StreamShardConcurrency != nil {\n\t\t\t\tpOpts = pOpts.SetDefaultShardConcurrency(*pCfg.StreamShardConcurrency)\n\t\t\t}\n\t\t\tif pCfg.StreamPersistShardConcurrency != nil {\n\t\t\t\tpOpts = pOpts.SetShardPersistenceConcurrency(*pCfg.StreamPersistShardConcurrency)\n\t\t\t}\n\t\t\tif pCfg.StreamPersistShardFlushConcurrency != nil {\n\t\t\t\tpOpts = pOpts.SetShardPersistenceFlushConcurrency(*pCfg.StreamPersistShardFlushConcurrency)\n\t\t\t}\n\t\t\tif v := bsc.IndexSegmentConcurrency; v != nil {\n\t\t\t\tpOpts = pOpts.SetIndexSegmentConcurrency(*v)\n\t\t\t}\n\t\t\tif err := pOpts.Validate(); err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\tbs, err = peers.NewPeersBootstrapperProvider(pOpts, bs)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\tcase uninitialized.UninitializedTopologyBootstrapperName:\n\t\t\tuOpts := uninitialized.NewOptions().\n\t\t\t\tSetResultOptions(rsOpts).\n\t\t\t\tSetInstrumentOptions(opts.InstrumentOptions())\n\t\t\tif err := uOpts.Validate(); err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\tbs = uninitialized.NewUninitializedTopologyBootstrapperProvider(uOpts, bs)\n\t\tdefault:\n\t\t\treturn nil, fmt.Errorf(\"unknown bootstrapper: %s\", orderedBootstrappers[i])\n\t\t}\n\t}\n\n\tproviderOpts := bootstrap.NewProcessOptions().\n\t\tSetTopologyMapProvider(topoMapProvider).\n\t\tSetOrigin(origin)\n\tif bsc.CacheSeriesMetadata != nil {\n\t\tproviderOpts = providerOpts.SetCacheSeriesMetadata(*bsc.CacheSeriesMetadata)\n\t}\n\treturn bootstrap.NewProcessProvider(bs, providerOpts, rsOpts, fsOpts)\n}", "func NewBootstrapCommand(rootSettings *environment.AirshipCTLSettings) *cobra.Command {\n\tbootstrapRootCmd := &cobra.Command{\n\t\tUse: \"bootstrap\",\n\t\tShort: \"Bootstrap ephemeral Kubernetes cluster\",\n\t}\n\n\tISOGenCmd := NewISOGenCommand(bootstrapRootCmd, rootSettings)\n\tbootstrapRootCmd.AddCommand(ISOGenCmd)\n\n\tremoteDirectCmd := NewRemoteDirectCommand(rootSettings)\n\tbootstrapRootCmd.AddCommand(remoteDirectCmd)\n\n\treturn bootstrapRootCmd\n}", "func New() Generator {\n\tspec := rspec.Spec{\n\t\tVersion: rspec.Version,\n\t\tPlatform: rspec.Platform{\n\t\t\tOS: runtime.GOOS,\n\t\t\tArch: runtime.GOARCH,\n\t\t},\n\t\tRoot: rspec.Root{\n\t\t\tPath: \"\",\n\t\t\tReadonly: false,\n\t\t},\n\t\tProcess: rspec.Process{\n\t\t\tTerminal: false,\n\t\t\tUser: rspec.User{},\n\t\t\tArgs: []string{\n\t\t\t\t\"sh\",\n\t\t\t},\n\t\t\tEnv: []string{\n\t\t\t\t\"PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\",\n\t\t\t\t\"TERM=xterm\",\n\t\t\t},\n\t\t\tCwd: \"/\",\n\t\t\tCapabilities: []string{\n\t\t\t\t\"CAP_CHOWN\",\n\t\t\t\t\"CAP_DAC_OVERRIDE\",\n\t\t\t\t\"CAP_FSETID\",\n\t\t\t\t\"CAP_FOWNER\",\n\t\t\t\t\"CAP_MKNOD\",\n\t\t\t\t\"CAP_NET_RAW\",\n\t\t\t\t\"CAP_SETGID\",\n\t\t\t\t\"CAP_SETUID\",\n\t\t\t\t\"CAP_SETFCAP\",\n\t\t\t\t\"CAP_SETPCAP\",\n\t\t\t\t\"CAP_NET_BIND_SERVICE\",\n\t\t\t\t\"CAP_SYS_CHROOT\",\n\t\t\t\t\"CAP_KILL\",\n\t\t\t\t\"CAP_AUDIT_WRITE\",\n\t\t\t},\n\t\t\tRlimits: []rspec.Rlimit{\n\t\t\t\t{\n\t\t\t\t\tType: \"RLIMIT_NOFILE\",\n\t\t\t\t\tHard: uint64(1024),\n\t\t\t\t\tSoft: uint64(1024),\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\tHostname: \"mrsdalloway\",\n\t\tMounts: []rspec.Mount{\n\t\t\t{\n\t\t\t\tDestination: \"/proc\",\n\t\t\t\tType: \"proc\",\n\t\t\t\tSource: \"proc\",\n\t\t\t\tOptions: nil,\n\t\t\t},\n\t\t\t{\n\t\t\t\tDestination: \"/dev\",\n\t\t\t\tType: \"tmpfs\",\n\t\t\t\tSource: \"tmpfs\",\n\t\t\t\tOptions: []string{\"nosuid\", \"strictatime\", \"mode=755\", \"size=65536k\"},\n\t\t\t},\n\t\t\t{\n\t\t\t\tDestination: \"/dev/pts\",\n\t\t\t\tType: \"devpts\",\n\t\t\t\tSource: \"devpts\",\n\t\t\t\tOptions: []string{\"nosuid\", \"noexec\", \"newinstance\", \"ptmxmode=0666\", \"mode=0620\", \"gid=5\"},\n\t\t\t},\n\t\t\t{\n\t\t\t\tDestination: \"/dev/shm\",\n\t\t\t\tType: \"tmpfs\",\n\t\t\t\tSource: \"shm\",\n\t\t\t\tOptions: []string{\"nosuid\", \"noexec\", \"nodev\", \"mode=1777\", \"size=65536k\"},\n\t\t\t},\n\t\t\t{\n\t\t\t\tDestination: \"/dev/mqueue\",\n\t\t\t\tType: \"mqueue\",\n\t\t\t\tSource: \"mqueue\",\n\t\t\t\tOptions: []string{\"nosuid\", \"noexec\", \"nodev\"},\n\t\t\t},\n\t\t\t{\n\t\t\t\tDestination: \"/sys\",\n\t\t\t\tType: \"sysfs\",\n\t\t\t\tSource: \"sysfs\",\n\t\t\t\tOptions: []string{\"nosuid\", \"noexec\", \"nodev\", \"ro\"},\n\t\t\t},\n\t\t},\n\t\tLinux: &rspec.Linux{\n\t\t\tResources: &rspec.Resources{\n\t\t\t\tDevices: []rspec.DeviceCgroup{\n\t\t\t\t\t{\n\t\t\t\t\t\tAllow: false,\n\t\t\t\t\t\tAccess: strPtr(\"rwm\"),\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\tNamespaces: []rspec.Namespace{\n\t\t\t\t{\n\t\t\t\t\tType: \"pid\",\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tType: \"network\",\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tType: \"ipc\",\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tType: \"uts\",\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tType: \"mount\",\n\t\t\t\t},\n\t\t\t},\n\t\t\tDevices: []rspec.Device{},\n\t\t},\n\t}\n\treturn Generator{\n\t\tspec: &spec,\n\t}\n}", "func NewBootstrapper(client client.Client, namespace string) component.DeployWaiter {\n\treturn &bootstrapper{\n\t\tclient: client,\n\t\tnamespace: namespace,\n\t}\n}", "func NewSpec(yamlConfig string) (*Spec, error) {\n\ts := &Spec{\n\t\tyamlConfig: yamlConfig,\n\t}\n\n\tmeta := &MetaSpec{}\n\terr := yaml.Unmarshal([]byte(yamlConfig), meta)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"unmarshal failed: %v\", err)\n\t}\n\tvr := v.Validate(meta, []byte(yamlConfig))\n\tif !vr.Valid() {\n\t\treturn nil, fmt.Errorf(\"validate metadata failed: \\n%s\", vr)\n\t}\n\n\trootObject, exists := objectRegistry[meta.Kind]\n\tif !exists {\n\t\treturn nil, fmt.Errorf(\"kind %s not found\", meta.Kind)\n\t}\n\n\ts.meta, s.objectSpec = meta, rootObject.DefaultSpec()\n\n\terr = yaml.Unmarshal([]byte(yamlConfig), s.objectSpec)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"unmarshal failed: %v\", err)\n\t}\n\tvr = v.Validate(s.objectSpec, []byte(yamlConfig))\n\tif !vr.Valid() {\n\t\treturn nil, fmt.Errorf(\"validate spec failed: \\n%s\", vr)\n\t}\n\n\treturn s, nil\n}", "func NewBootstrapController(\n\tkubeClient kubernetes.Interface,\n\tklusterletInformer operatorinformer.KlusterletInformer,\n\tsecretInformer coreinformer.SecretInformer,\n\trecorder events.Recorder) factory.Controller {\n\tcontroller := &bootstrapController{\n\t\tkubeClient: kubeClient,\n\t\tklusterletLister: klusterletInformer.Lister(),\n\t\tsecretLister: secretInformer.Lister(),\n\t}\n\treturn factory.New().WithSync(controller.sync).\n\t\tWithInformersQueueKeyFunc(bootstrapSecretQueueKeyFunc(controller.klusterletLister), secretInformer.Informer()).\n\t\tToController(\"BootstrapController\", recorder)\n}", "func NewBootstrapCommand(rootSettings *environment.AirshipCTLSettings) *cobra.Command {\n\tbootstrapRootCmd := &cobra.Command{\n\t\tUse: \"bootstrap\",\n\t\tShort: \"bootstraps airshipctl\",\n\t\tRun: func(cmd *cobra.Command, args []string) {\n\t\t\tout := cmd.OutOrStdout()\n\t\t\tfmt.Fprintf(out, \"Under construction\\n\")\n\t\t},\n\t}\n\n\treturn bootstrapRootCmd\n}", "func (c *kubernetesDeploymentManifest) Bootstrap(ctx context.Context, profile ServiceRequest, env map[string]string, waitCB func() error) error {\n\tspan, _ := apm.StartSpanOptions(ctx, \"Bootstrapping kubernetes deployment\", \"kubernetes.manifest.bootstrap\", apm.SpanOptions{\n\t\tParent: apm.SpanFromContext(ctx).TraceContext(),\n\t})\n\tdefer span.End()\n\n\terr := cluster.Initialize(ctx, \"../../../cli/config/kubernetes/kind.yaml\")\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// TODO: we would need to understand how to pass the environment argument to anything running in the namespace\n\tkubectl = cluster.Kubectl().WithNamespace(ctx, getNamespaceFromProfile(profile))\n\t_, err = kubectl.Run(ctx, \"apply\", \"-k\", \"../../../cli/config/kubernetes/base\")\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = waitCB()\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}", "func MachinePoolSpec(ctx context.Context, inputGetter func() MachinePoolSpecInput) {\n\tinput := inputGetter()\n\tExpect(input.E2EConfig).ToNot(BeNil(), \"Invalid argument. input.E2EConfig can't be nil\")\n\tExpect(input.ConfigClusterFn).ToNot(BeNil(), \"Invalid argument. input.ConfigClusterFn can't be nil\")\n\tExpect(input.BootstrapClusterProxy).ToNot(BeNil(), \"Invalid argument. input.BootstrapClusterProxy can't be nil\")\n\tExpect(input.AWSSession).ToNot(BeNil(), \"Invalid argument. input.AWSSession can't be nil\")\n\tExpect(input.Namespace).NotTo(BeNil(), \"Invalid argument. input.Namespace can't be nil\")\n\tExpect(input.ClusterName).ShouldNot(BeEmpty(), \"Invalid argument. input.ClusterName can't be empty\")\n\tExpect(input.Flavor).ShouldNot(BeEmpty(), \"Invalid argument. input.Flavor can't be empty\")\n\n\tginkgo.By(fmt.Sprintf(\"getting cluster with name %s\", input.ClusterName))\n\tcluster := framework.GetClusterByName(ctx, framework.GetClusterByNameInput{\n\t\tGetter: input.BootstrapClusterProxy.GetClient(),\n\t\tNamespace: input.Namespace.Name,\n\t\tName: input.ClusterName,\n\t})\n\tExpect(cluster).NotTo(BeNil(), \"couldn't find CAPI cluster\")\n\n\tginkgo.By(fmt.Sprintf(\"creating an applying the %s template\", input.Flavor))\n\tconfigCluster := input.ConfigClusterFn(input.ClusterName, input.Namespace.Name)\n\tconfigCluster.Flavor = input.Flavor\n\tconfigCluster.WorkerMachineCount = pointer.Int64(1)\n\tworkloadClusterTemplate := shared.GetTemplate(ctx, configCluster)\n\tif input.UsesLaunchTemplate {\n\t\tuserDataTemplate := `#!/bin/bash\n/etc/eks/bootstrap.sh %s \\\n --container-runtime containerd\n`\n\t\teksClusterName := getEKSClusterName(input.Namespace.Name, input.ClusterName)\n\t\tuserData := fmt.Sprintf(userDataTemplate, eksClusterName)\n\t\tuserDataEncoded := base64.StdEncoding.EncodeToString([]byte(userData))\n\t\tworkloadClusterTemplate = []byte(strings.ReplaceAll(string(workloadClusterTemplate), \"USER_DATA\", userDataEncoded))\n\t}\n\tginkgo.By(string(workloadClusterTemplate))\n\tginkgo.By(fmt.Sprintf(\"Applying the %s cluster template yaml to the cluster\", configCluster.Flavor))\n\terr := input.BootstrapClusterProxy.Apply(ctx, workloadClusterTemplate)\n\tExpect(err).ShouldNot(HaveOccurred())\n\n\tginkgo.By(\"Waiting for the machine pool to be running\")\n\tmp := framework.DiscoveryAndWaitForMachinePools(ctx, framework.DiscoveryAndWaitForMachinePoolsInput{\n\t\tLister: input.BootstrapClusterProxy.GetClient(),\n\t\tGetter: input.BootstrapClusterProxy.GetClient(),\n\t\tCluster: cluster,\n\t}, input.E2EConfig.GetIntervals(\"\", \"wait-worker-nodes\")...)\n\tExpect(len(mp)).To(Equal(1))\n\n\tginkgo.By(\"Check the status of the node group\")\n\teksClusterName := getEKSClusterName(input.Namespace.Name, input.ClusterName)\n\tif input.ManagedMachinePool {\n\t\tvar nodeGroupName string\n\t\tif input.UsesLaunchTemplate {\n\t\t\tnodeGroupName = getEKSNodegroupWithLaunchTemplateName(input.Namespace.Name, input.ClusterName)\n\t\t} else {\n\t\t\tnodeGroupName = getEKSNodegroupName(input.Namespace.Name, input.ClusterName)\n\t\t}\n\t\tverifyManagedNodeGroup(eksClusterName, nodeGroupName, true, input.AWSSession)\n\t} else {\n\t\tasgName := getASGName(input.ClusterName)\n\t\tverifyASG(eksClusterName, asgName, true, input.AWSSession)\n\t}\n\n\tif input.IncludeScaling { // TODO (richardcase): should this be a separate spec?\n\t\tginkgo.By(\"Scaling the machine pool up\")\n\t\tframework.ScaleMachinePoolAndWait(ctx, framework.ScaleMachinePoolAndWaitInput{\n\t\t\tClusterProxy: input.BootstrapClusterProxy,\n\t\t\tCluster: cluster,\n\t\t\tReplicas: 2,\n\t\t\tMachinePools: mp,\n\t\t\tWaitForMachinePoolToScale: input.E2EConfig.GetIntervals(\"\", \"wait-worker-nodes\"),\n\t\t})\n\n\t\tginkgo.By(\"Scaling the machine pool down\")\n\t\tframework.ScaleMachinePoolAndWait(ctx, framework.ScaleMachinePoolAndWaitInput{\n\t\t\tClusterProxy: input.BootstrapClusterProxy,\n\t\t\tCluster: cluster,\n\t\t\tReplicas: 1,\n\t\t\tMachinePools: mp,\n\t\t\tWaitForMachinePoolToScale: input.E2EConfig.GetIntervals(\"\", \"wait-worker-nodes\"),\n\t\t})\n\t}\n\n\tif input.Cleanup {\n\t\tdeleteMachinePool(ctx, deleteMachinePoolInput{\n\t\t\tDeleter: input.BootstrapClusterProxy.GetClient(),\n\t\t\tMachinePool: mp[0],\n\t\t})\n\n\t\twaitForMachinePoolDeleted(ctx, waitForMachinePoolDeletedInput{\n\t\t\tGetter: input.BootstrapClusterProxy.GetClient(),\n\t\t\tMachinePool: mp[0],\n\t\t}, input.E2EConfig.GetIntervals(\"\", \"wait-delete-machine-pool\")...)\n\t}\n}", "func NewBootstrapper(config *genesisconfig.Profile) (*Bootstrapper, error) {\n\tif err := HasSkippedForeignOrgs(config); err != nil {\n\t\treturn nil, errors.WithMessage(err, \"all org definitions must be local during bootstrapping\")\n\t}\n\n\tchannelGroup, err := NewChannelGroup(config)\n\tif err != nil {\n\t\treturn nil, errors.WithMessage(err, \"could not create channel group\")\n\t}\n\n\treturn &Bootstrapper{\n\t\tchannelGroup: channelGroup,\n\t}, nil\n}", "func New(config *genesisconfig.Profile) *Bootstrapper {\n\tbs, err := NewBootstrapper(config)\n\tif err != nil {\n\t\tlogger.Panicf(\"Error creating bootsrapper: %s\", err)\n\t}\n\treturn bs\n}", "func New(cfg Config) eks_tester.Tester {\n\tcfg.Logger.Info(\"creating tester\", zap.String(\"tester\", pkgName))\n\treturn &tester{cfg: cfg, busyboxImg: \"busybox\"}\n}", "func Boot(ctx context.Context, settings *Settings, cfg *config.Config) (Bootstrap, error) {\n\t// NOTE(longsleep): Ensure to use same salt length as the hash size.\n\t// See https://www.ietf.org/mail-archive/web/jose/current/msg02901.html for\n\t// reference and https://github.com/golang-jwt/jwt/v4/issues/285 for\n\t// the issue in upstream jwt-go.\n\tfor _, alg := range []string{jwt.SigningMethodPS256.Name, jwt.SigningMethodPS384.Name, jwt.SigningMethodPS512.Name} {\n\t\tsm := jwt.GetSigningMethod(alg)\n\t\tif signingMethodRSAPSS, ok := sm.(*jwt.SigningMethodRSAPSS); ok {\n\t\t\tsigningMethodRSAPSS.Options.SaltLength = rsa.PSSSaltLengthEqualsHash\n\t\t}\n\t}\n\n\tbs := &bootstrap{\n\t\tconfig: &Config{\n\t\t\tConfig: cfg,\n\t\t\tSettings: settings,\n\t\t},\n\t}\n\n\terr := bs.initialize(settings)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\terr = bs.setup(ctx, settings)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn bs, nil\n}", "func New(id string) *Spec {\n\treturn &Spec{ID: id, Target: make(map[string]string)}\n}", "func NewSpecGenerator(arg string, rootfs bool) *SpecGenerator {\n\tcsc := ContainerStorageConfig{}\n\tif rootfs {\n\t\tcsc.Rootfs = arg\n\t\t// check if rootfs should use overlay\n\t\tlastColonIndex := strings.LastIndex(csc.Rootfs, \":\")\n\t\tif lastColonIndex != -1 {\n\t\t\tlastPart := csc.Rootfs[lastColonIndex+1:]\n\t\t\tif lastPart == \"O\" {\n\t\t\t\tcsc.RootfsOverlay = true\n\t\t\t\tcsc.Rootfs = csc.Rootfs[:lastColonIndex]\n\t\t\t} else if lastPart == \"idmap\" || strings.HasPrefix(lastPart, \"idmap=\") {\n\t\t\t\tcsc.RootfsMapping = &lastPart\n\t\t\t\tcsc.Rootfs = csc.Rootfs[:lastColonIndex]\n\t\t\t}\n\t\t}\n\t} else {\n\t\tcsc.Image = arg\n\t}\n\treturn &SpecGenerator{\n\t\tContainerStorageConfig: csc,\n\t}\n}", "func NewCreateSpec(s *api.JobUpdateSettings) *stateless.CreateSpec {\n\tu := NewUpdateSpec(s, false)\n\treturn &stateless.CreateSpec{\n\t\tBatchSize: u.BatchSize,\n\t\tMaxInstanceRetries: u.MaxInstanceRetries,\n\t\tMaxTolerableInstanceFailures: u.MaxTolerableInstanceFailures,\n\t\tStartPaused: u.StartPaused,\n\t}\n}", "func NewBootstrap(config config.Config) {\n\tchatServer = chat.NewServer()\n\tchatServer.Listen()\n\tlogs.Info(\"Chat Server started\")\n\n\tif config.TelnetAddress != \"\" {\n\t\tlogs.FatalIfErrf(startTelnet(config), \"Could not start telnet server.\")\n\t} else {\n\t\tlogs.Warnf(\"TelnetAddress is empty, not running Telnet Driver\")\n\t}\n\n\tif config.WebAddress != \"\" {\n\t\tstartWeb(config)\n\t} else {\n\t\tlogs.Warnf(\"WebAddress is empty, not running Web Drivers\")\n\t}\n}", "func NewBootstrapper(cfg configuration.Configuration) (*Bootstrapper, error) {\n\tbootstrapper := &Bootstrapper{}\n\tbootstrapper.rootKeysFile = cfg.Bootstrap.RootKeys\n\tbootstrapper.rootBalance = cfg.Bootstrap.RootBalance\n\tbootstrapper.rootDomainRef = &core.RecordRef{}\n\treturn bootstrapper, nil\n}", "func (m MgmtCluster) CreateBootstrap() error {\n\tvar err error\n\tm.EventStream.Publish(&progress.StatusEvent{\n\t\tType: \"progress\",\n\t\tMsg: \"kind create cluster (bootstrap cluster)\",\n\t})\n\n\targs := []string{\n\t\t\"create\",\n\t\t\"cluster\",\n\t}\n\terr = cmd.GenericExecute(nil, string(kind), args, nil)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tm.EventStream.Publish(&progress.StatusEvent{\n\t\tType: \"progress\",\n\t\tMsg: \"getting and writing bootstrap cluster kubeconfig to disk\",\n\t})\n\targs = []string{\n\t\t\"get\",\n\t\t\"kubeconfig\",\n\t}\n\tc := cmd.NewCommandLine(nil, string(kind), args, nil)\n\tstdout, stderr, err := c.Program().Execute()\n\tif err != nil || string(stderr) != \"\" {\n\t\treturn fmt.Errorf(\"err: %v, stderr: %v\", err, string(stderr))\n\t}\n\n\terr = writeToDisk(m.ClusterName, bootstrapKubeconfig, []byte(stdout), 0644)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// TODO wait for cluster components to be running\n\tm.EventStream.Publish(&progress.StatusEvent{\n\t\tType: \"progress\",\n\t\tMsg: \"sleeping 20 seconds, need to fix this\",\n\t})\n\ttime.Sleep(20 * time.Second)\n\n\treturn err\n}", "func toK8SPodSpec(podSpec *pbpod.PodSpec) *corev1.Pod {\n\t// Create pod template spec and apply configurations to spec.\n\tlabels := make(map[string]string)\n\tfor _, label := range podSpec.GetLabels() {\n\t\tlabels[label.GetKey()] = label.GetValue()\n\t}\n\n\ttermGracePeriod := int64(podSpec.GetKillGracePeriodSeconds())\n\n\tpodTemp := corev1.PodTemplateSpec{\n\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\tLabels: labels,\n\t\t},\n\t\tSpec: corev1.PodSpec{\n\t\t\tContainers: toK8SContainerSpecs(podSpec.GetContainers()),\n\t\t\tInitContainers: toK8SContainerSpecs(podSpec.GetInitContainers()),\n\t\t\tRestartPolicy: \"Never\",\n\t\t\tTerminationGracePeriodSeconds: &termGracePeriod,\n\t\t},\n\t}\n\n\t// Bind node and create pod.\n\treturn &corev1.Pod{\n\t\tObjectMeta: podTemp.ObjectMeta,\n\t\tSpec: podTemp.Spec,\n\t}\n}", "func (b *Bootstrapper) Bootstrap() *Bootstrapper {\n\treturn b\n}", "func NewFromSpec(spec *rspec.Spec) Generator {\n\treturn Generator{\n\t\tspec: spec,\n\t}\n}", "func (m *Manager) bootstrap() error {\n\tvar err error\n\tif m.store, err = getStore(); err != nil {\n\t\treturn err\n\t}\n\n\tlog.Debugf(\"Instantiating buildah.Builder with '%s' base image\", m.fromImage)\n\topts := m.builderOptions()\n\tm.b, err = buildah.NewBuilder(m.ctx, m.store, opts)\n\treturn err\n}", "func NewBootstrapper(cfg configuration.Configuration) (*Bootstrapper, error) {\n\tbootstrapper := &Bootstrapper{}\n\tbootstrapper.rootDomainRef = &core.RecordRef{}\n\treturn bootstrapper, nil\n}", "func (b Bootstrapper) Bootstrap(ctx map[string]interface{}) error {\n\tdocSrv, ok := ctx[documents.BootstrappedDocumentService].(documents.Service)\n\tif !ok {\n\t\treturn errors.New(\"failed to get %s\", documents.BootstrappedDocumentService)\n\t}\n\n\tjobsMan, ok := ctx[jobs.BootstrappedService].(jobs.Manager)\n\tif !ok {\n\t\treturn errors.New(\"failed to get %s\", jobs.BootstrappedService)\n\t}\n\n\tnftSrv, ok := ctx[bootstrap.BootstrappedNFTService].(nft.Service)\n\tif !ok {\n\t\treturn errors.New(\"failed to get %s\", bootstrap.BootstrappedNFTService)\n\t}\n\n\taccountSrv, ok := ctx[config.BootstrappedConfigStorage].(config.Service)\n\tif !ok {\n\t\treturn errors.New(\"failed to get %s\", config.BootstrappedConfigStorage)\n\t}\n\n\tctx[BootstrappedCoreAPIService] = Service{\n\t\tdocSrv: docSrv,\n\t\tjobsSrv: jobsMan,\n\t\tnftSrv: nftSrv,\n\t\taccountsSrv: accountSrv,\n\t}\n\treturn nil\n}", "func New(cfgs ...Configurator) *Bootstrapper {\n\tb := &Bootstrapper{\n\t\tAppSpawnDate: time.Now(),\n\t\tApplication: iris.New(),\n\t}\n\n\tfor _, cfg := range cfgs {\n\t\tcfg(b)\n\t}\n\n\t// Adding the conf file(merged?) to the iris app instance\n\tb.Application.Configure(iris.WithConfiguration(b.getConfig()))\n\tb.AppName = b.Application.ConfigurationReadOnly().GetOther()[\"appName\"].(string)\n\n\tb.bootstrap()\n\n\treturn b\n}", "func NewWebhookSpec(spec *job.WebhookSpec) *WebhookSpec {\n\treturn &WebhookSpec{\n\t\tCreatedAt: spec.CreatedAt,\n\t\tUpdatedAt: spec.UpdatedAt,\n\t}\n}", "func TestSelfBootstrap(t *testing.T) {\n\tdefer leaktest.AfterTest(t)()\n\ts, err := serverutils.StartServerRaw(base.TestServerArgs{})\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tdefer s.Stopper().Stop()\n}", "func createClusterSpecificSpec(app apptypes.AppType, b *troubleshootv1beta2.SupportBundle, clientset kubernetes.Interface) (*troubleshootv1beta2.SupportBundle, error) {\n\tsupportBundle, err := staticspecs.GetClusterSpecificSpec()\n\tif err != nil {\n\t\tlogger.Errorf(\"Failed to load cluster specific support bundle spec: %v\", err)\n\t\treturn nil, err\n\t}\n\n\tsupportBundle = addDiscoveredSpecs(supportBundle, app, clientset)\n\treturn supportBundle, nil\n}", "func BuildJobSpec(pod *podtemplatespec.Builder) *jobspec.Builder {\n\tjobSpecObj := jobspec.NewBuilder().\n\t\tWithPodTemplateSpecBuilder(pod)\n\t_, err := jobSpecObj.Build()\n\tif err != nil {\n\t\tlog.Errorln(err)\n\t}\n\treturn jobSpecObj\n}", "func (o *Options) createExternalBootstrapConfig() clientcmdapiv1.Config {\n\treturn clientcmdapiv1.Config{\n\t\t// Define a cluster stanza based on the bootstrap kubeconfig.\n\t\tClusters: []clientcmdapiv1.NamedCluster{\n\t\t\t{\n\t\t\t\tName: \"hub\",\n\t\t\t\tCluster: clientcmdapiv1.Cluster{\n\t\t\t\t\tServer: o.hubAPIServer,\n\t\t\t\t\tInsecureSkipTLSVerify: true,\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t// Define auth based on the obtained client cert.\n\t\tAuthInfos: []clientcmdapiv1.NamedAuthInfo{\n\t\t\t{\n\t\t\t\tName: \"bootstrap\",\n\t\t\t\tAuthInfo: clientcmdapiv1.AuthInfo{\n\t\t\t\t\tToken: string(o.token),\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t// Define a context that connects the auth info and cluster, and set it as the default\n\t\tContexts: []clientcmdapiv1.NamedContext{\n\t\t\t{\n\t\t\t\tName: \"bootstrap\",\n\t\t\t\tContext: clientcmdapiv1.Context{\n\t\t\t\t\tCluster: \"hub\",\n\t\t\t\t\tAuthInfo: \"bootstrap\",\n\t\t\t\t\tNamespace: \"default\",\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\tCurrentContext: \"bootstrap\",\n\t}\n}", "func (r *Microk8sConfigReconciler) setupWorkerBootstrapData(ctx context.Context, microk8sconfig *bootstrapv1alpha1.Microk8sConfig) (ctrl.Result, error) {\n\tswitch {\n\t// Migrate plaintext data to secret.\n\tcase microk8sconfig.Status.BootstrapData == nil:\n\t\tmicrok8sconfig.Status.BootstrapData = []byte(r.renderTemplate(microk8sconfig, WorkerInitTemplate))\n\t\tmicrok8sconfig.Status.Ready = true\n\t\treturn ctrl.Result{}, nil\n\t}\n\n\treturn ctrl.Result{}, nil\n}", "func TestSPECjbb(t *testing.T) {\n\tlog.SetLevel(log.ErrorLevel)\n\tspecjbbLoadGeneratorConfig := specjbb.DefaultLoadGeneratorConfig()\n\tspecjbbLoadGeneratorConfig.JVMHeapMemoryGBs = 1\n\tif _, err := exec.LookPath(specjbbLoadGeneratorConfig.PathToBinary); err != nil {\n\t\tt.Logf(\"Skipping test due to an error %s\", err)\n\t\tt.Skip(\"SPECjbb binary is not distributed with Swan. It requires license and should be purchased \" +\n\t\t\t\"separately (see README for details).\")\n\t}\n\n\tConvey(\"While using default config\", t, func() {\n\t\tConvey(\"And launching SPECjbb load\", func() {\n\t\t\tvar transactionInjectors []executor.Executor\n\t\t\ttransactionInjector := executor.NewLocal()\n\t\t\ttransactionInjectors = append(transactionInjectors, transactionInjector)\n\n\t\t\tloadGeneratorLauncher := specjbb.NewLoadGenerator(executor.NewLocal(),\n\t\t\t\ttransactionInjectors, specjbbLoadGeneratorConfig)\n\t\t\tloadGeneratorTaskHandle, err := loadGeneratorLauncher.Load(load, loadDuration)\n\n\t\t\tConvey(\"Proper handle should be returned\", func() {\n\t\t\t\tSo(err, ShouldBeNil)\n\t\t\t\tSo(loadGeneratorTaskHandle, ShouldNotBeNil)\n\n\t\t\t\tReset(func() {\n\t\t\t\t\tloadGeneratorTaskHandle.Stop()\n\t\t\t\t\tloadGeneratorTaskHandle.EraseOutput()\n\t\t\t\t})\n\n\t\t\t\tConvey(\"And after adding the SPECjbb backend\", func() {\n\t\t\t\t\tbackendConfig := specjbb.DefaultSPECjbbBackendConfig()\n\t\t\t\t\tbackendConfig.JVMHeapMemoryGBs = 1\n\t\t\t\t\tbackendLauncher := specjbb.NewBackend(executor.NewLocal(), backendConfig)\n\t\t\t\t\tbackendTaskHandle, err := backendLauncher.Launch()\n\n\t\t\t\t\tReset(func() {\n\t\t\t\t\t\tbackendTaskHandle.Stop()\n\t\t\t\t\t\tloadGeneratorTaskHandle.EraseOutput()\n\t\t\t\t\t})\n\n\t\t\t\t\tConvey(\"Proper handle should be returned\", func() {\n\t\t\t\t\t\tSo(err, ShouldBeNil)\n\t\t\t\t\t\tSo(backendTaskHandle, ShouldNotBeNil)\n\n\t\t\t\t\t\tConvey(\"And should work for at least as long as given load duration\", func() {\n\t\t\t\t\t\t\tloadIsTerminated, err := loadGeneratorTaskHandle.Wait(loadDuration)\n\t\t\t\t\t\t\tSo(err, ShouldBeNil)\n\t\t\t\t\t\t\tSo(loadIsTerminated, ShouldBeFalse)\n\t\t\t\t\t\t\tbackendIsTerminated, err := backendTaskHandle.Wait(loadDuration)\n\t\t\t\t\t\t\tSo(err, ShouldBeNil)\n\t\t\t\t\t\t\tSo(backendIsTerminated, ShouldBeFalse)\n\n\t\t\t\t\t\t\t// Now wait for backend and transaction injectors to finish.\n\t\t\t\t\t\t\tloadGeneratorTaskHandle.Wait(0)\n\t\t\t\t\t\t\tbackendTaskHandle.Wait(0)\n\n\t\t\t\t\t\t\toutput, err := loadGeneratorTaskHandle.StdoutFile()\n\t\t\t\t\t\t\tSo(err, ShouldBeNil)\n\t\t\t\t\t\t\tfile, err := os.Open(output.Name())\n\t\t\t\t\t\t\tdefer file.Close()\n\t\t\t\t\t\t\tscanner := bufio.NewScanner(file)\n\n\t\t\t\t\t\t\t// When SPECjbb composite mode is successfully started, the output is:\n\t\t\t\t\t\t\t//1s: Agent GRP1.Backend.specjbbbackend1 has attached to Controller\n\t\t\t\t\t\t\t// 1s: Agent GRP1.TxInjector.JVM1 has attached to Controller\n\t\t\t\t\t\t\t// 1s:\n\t\t\t\t\t\t\t// 1s: All agents have connected.\n\t\t\t\t\t\t\t// 1s:\n\t\t\t\t\t\t\t// 1s: Attached agents info:\n\t\t\t\t\t\t\t// Group \"GRP1\"\n\t\t\t\t\t\t\t// TxInjectors:\n\t\t\t\t\t\t\t// JVM1, includes { Driver } @ [127.0.0.1:40910, 127.0.0.1:41757, 127.0.0.1:41462]\n\t\t\t\t\t\t\t// Backends:\n\t\t\t\t\t\t\t// specjbbbackend1, includes { SM(2),SP(2) } @ [127.0.0.1:38571, 127.0.0.1:45981, 127.0.0.1:35478]\n\t\t\t\t\t\t\t//\n\t\t\t\t\t\t\t//1s: Initializing... (init) OK\n\t\t\t\t\t\t\t// We should look for the proper lines to be sure that our configuration works.\n\t\t\t\t\t\t\tsubstringInitialization := \"Initializing... (init) OK\"\n\t\t\t\t\t\t\tsubstringBackend := \"Agent GRP1.Backend.specjbbbackend1 has attached to Controller\"\n\t\t\t\t\t\t\tsubstringTxI := \"Agent GRP1.TxInjector.JVM1 has attached to Controller\"\n\t\t\t\t\t\t\tvar initializationSuccessful, backendAttachedToController, transactionInjectorAttachedToController bool\n\t\t\t\t\t\t\tfor scanner.Scan() {\n\t\t\t\t\t\t\t\tline := scanner.Text()\n\t\t\t\t\t\t\t\tif result := strings.Contains(line, substringInitialization); result {\n\t\t\t\t\t\t\t\t\tinitializationSuccessful = result\n\t\t\t\t\t\t\t\t} else if result := strings.Contains(line, substringBackend); result {\n\t\t\t\t\t\t\t\t\tbackendAttachedToController = result\n\t\t\t\t\t\t\t\t} else if result := strings.Contains(line, substringTxI); result {\n\t\t\t\t\t\t\t\t\ttransactionInjectorAttachedToController = result\n\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\terr = scanner.Err()\n\t\t\t\t\t\t\tSo(err, ShouldBeNil)\n\t\t\t\t\t\t\tSo(initializationSuccessful, ShouldBeTrue)\n\t\t\t\t\t\t\tSo(backendAttachedToController, ShouldBeTrue)\n\t\t\t\t\t\t\tSo(transactionInjectorAttachedToController, ShouldBeTrue)\n\n\t\t\t\t\t\t\tConvey(\"And I should be able to stop with no problem and be terminated\", func() {\n\t\t\t\t\t\t\t\terr = loadGeneratorTaskHandle.Stop()\n\t\t\t\t\t\t\t\tSo(err, ShouldBeNil)\n\t\t\t\t\t\t\t\terr = backendTaskHandle.Stop()\n\t\t\t\t\t\t\t\tSo(err, ShouldBeNil)\n\n\t\t\t\t\t\t\t\tstate := loadGeneratorTaskHandle.Status()\n\t\t\t\t\t\t\t\tSo(state, ShouldEqual, executor.TERMINATED)\n\t\t\t\t\t\t\t\tstate = backendTaskHandle.Status()\n\t\t\t\t\t\t\t\tSo(state, ShouldEqual, executor.TERMINATED)\n\t\t\t\t\t\t\t})\n\t\t\t\t\t\t})\n\n\t\t\t\t\t})\n\n\t\t\t\t})\n\t\t\t})\n\t\t})\n\n\t})\n\tConvey(\"While using default config\", t, func() {\n\t\tConvey(\"And launching SPECjbb load\", func() {\n\t\t\tvar transactionInjectors []executor.Executor\n\t\t\ttransactionInjector := executor.NewLocal()\n\t\t\ttransactionInjectors = append(transactionInjectors, transactionInjector)\n\n\t\t\tloadGeneratorLauncher := specjbb.NewLoadGenerator(executor.NewLocal(),\n\t\t\t\ttransactionInjectors, specjbbLoadGeneratorConfig)\n\t\t\tloadGeneratorTaskHandle, err := loadGeneratorLauncher.Load(load, loadDuration)\n\t\t\tConvey(\"Proper handle should be returned\", func() {\n\t\t\t\tSo(err, ShouldBeNil)\n\t\t\t\tSo(loadGeneratorTaskHandle, ShouldNotBeNil)\n\n\t\t\t\tReset(func() {\n\t\t\t\t\tloadGeneratorTaskHandle.Stop()\n\t\t\t\t\tloadGeneratorTaskHandle.EraseOutput()\n\t\t\t\t})\n\n\t\t\t\tConvey(\"And after adding the SPECjbb backend\", func() {\n\t\t\t\t\tbackendConfig := specjbb.DefaultSPECjbbBackendConfig()\n\t\t\t\t\tbackendConfig.JVMHeapMemoryGBs = 1\n\t\t\t\t\tbackendLauncher := specjbb.NewBackend(executor.NewLocal(), backendConfig)\n\t\t\t\t\tbackendTaskHandle, err := backendLauncher.Launch()\n\n\t\t\t\t\tReset(func() {\n\t\t\t\t\t\tbackendTaskHandle.Stop()\n\t\t\t\t\t\tbackendTaskHandle.EraseOutput()\n\t\t\t\t\t})\n\n\t\t\t\t\tConvey(\"Proper handle should be returned\", func() {\n\t\t\t\t\t\tSo(err, ShouldBeNil)\n\t\t\t\t\t\tSo(backendTaskHandle, ShouldNotBeNil)\n\n\t\t\t\t\t\tConvey(\"And when I stop backend prematurely, \"+\n\t\t\t\t\t\t\t\"both backend and load generator should be terminated\", func() {\n\t\t\t\t\t\t\t// Wait for backend to be registered.\n\t\t\t\t\t\t\ttime.Sleep(20 * time.Second)\n\t\t\t\t\t\t\terr = backendTaskHandle.Stop()\n\t\t\t\t\t\t\tSo(err, ShouldBeNil)\n\t\t\t\t\t\t\tSo(backendTaskHandle.Status(), ShouldEqual, executor.TERMINATED)\n\t\t\t\t\t\t\tloadGeneratorTaskHandle.Wait(0)\n\t\t\t\t\t\t\tSo(loadGeneratorTaskHandle.Status(), ShouldEqual, executor.TERMINATED)\n\t\t\t\t\t\t})\n\n\t\t\t\t\t})\n\n\t\t\t\t})\n\t\t\t})\n\t\t})\n\n\t})\n\t//TODO(skonefal): Consider deleting this case.\n\tConvey(\"While using default config\", t, func() {\n\t\tConvey(\"And launching SPECjbb load\", func() {\n\t\t\tvar transactionInjectors []executor.Executor\n\t\t\ttransactionInjector := executor.NewLocal()\n\t\t\ttransactionInjectors = append(transactionInjectors, transactionInjector)\n\n\t\t\tloadGeneratorLauncher := specjbb.NewLoadGenerator(executor.NewLocal(),\n\t\t\t\ttransactionInjectors, specjbbLoadGeneratorConfig)\n\t\t\tloadGeneratorTaskHandle, err := loadGeneratorLauncher.Load(load, loadDuration)\n\n\t\t\tConvey(\"Proper handle should be returned\", func() {\n\t\t\t\tSo(err, ShouldBeNil)\n\t\t\t\tSo(loadGeneratorTaskHandle, ShouldNotBeNil)\n\n\t\t\t\tReset(func() {\n\t\t\t\t\tloadGeneratorTaskHandle.Stop()\n\t\t\t\t\tloadGeneratorTaskHandle.EraseOutput()\n\t\t\t\t})\n\n\t\t\t\toutput, err := loadGeneratorTaskHandle.StdoutFile()\n\t\t\t\tSo(err, ShouldBeNil)\n\t\t\t\tfile, err := os.Open(output.Name())\n\t\t\t\tdefer file.Close()\n\t\t\t\tConvey(\"But when the SPECjbb backend is not added, controller should not have information about it in its logs\", func() {\n\t\t\t\t\tloadGeneratorTaskHandle.Wait(loadDuration)\n\t\t\t\t\tscanner := bufio.NewScanner(file)\n\t\t\t\t\tsubstringWithoutBackend := \"Agent GRP1.Backend.specjbbbackend1 has attached to Controller\"\n\t\t\t\t\tvar matchWithoutBackend bool\n\t\t\t\t\tfor scanner.Scan() {\n\t\t\t\t\t\terr := scanner.Err()\n\t\t\t\t\t\tSo(err, ShouldBeNil)\n\t\t\t\t\t\tline := scanner.Text()\n\t\t\t\t\t\tif result := strings.Contains(line, substringWithoutBackend); result {\n\t\t\t\t\t\t\tmatchWithoutBackend = result\n\t\t\t\t\t\t\tbreak\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t\tSo(matchWithoutBackend, ShouldBeFalse)\n\t\t\t\t\tConvey(\"And I should be able to stop with no problem and be terminated\", func() {\n\t\t\t\t\t\terr = loadGeneratorTaskHandle.Stop()\n\t\t\t\t\t\tSo(err, ShouldBeNil)\n\n\t\t\t\t\t\tstate := loadGeneratorTaskHandle.Status()\n\t\t\t\t\t\tSo(state, ShouldEqual, executor.TERMINATED)\n\t\t\t\t\t})\n\t\t\t\t})\n\n\t\t\t})\n\n\t\t})\n\t})\n\tConvey(\"While using default config\", t, func() {\n\t\tspecjbbLoadGeneratorConfig := specjbb.DefaultLoadGeneratorConfig()\n\n\t\tConvey(\"And launching SPECjbb load without transaction injectors\", func() {\n\t\t\tvar transactionInjectors []executor.Executor\n\t\t\tloadGeneratorLauncher := specjbb.NewLoadGenerator(executor.NewLocal(),\n\t\t\t\ttransactionInjectors, specjbbLoadGeneratorConfig)\n\t\t\tloadGeneratorTaskHandle, err := loadGeneratorLauncher.Load(load, loadDuration)\n\t\t\tConvey(\"Should return error.\", func() {\n\t\t\t\tSo(loadGeneratorTaskHandle, ShouldBeNil)\n\t\t\t\tSo(err, ShouldNotBeNil)\n\t\t\t})\n\t\t})\n\t})\n}", "func createSpec(name string) (*TestSpec, error) {\n\t// File must be a yaml file.\n\tif filepath.Ext(name) != \".yml\" {\n\t\treturn nil, fmt.Errorf(\"Cannot parse non-yaml file: %s\", name)\n\t}\n\n\t// Read testspec yaml file contents.\n\tcontents, err := ioutil.ReadFile(name)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"Unable to read yaml test spec: %s. Error: %v\", name, err)\n\t}\n\n\t// Unmarshals testspec yaml file contents into struct.\n\ttest := &TestSpec{}\n\tif err = yaml.Unmarshal(contents, &test); err != nil {\n\t\treturn nil, fmt.Errorf(\"Unable to unmarshal yaml test spec: %s. Error: %v\", name, err)\n\t}\n\n\t// Instanstiates cache for templating.\n\ttest.Cache = make(map[string]string)\n\n\t// Assigns default values for commands.\n\tfor i := range test.Commands {\n\t\t// Skip command by removing from command list.\n\t\tif test.Commands[i].Skip == true {\n\t\t\ttest.Commands = append(test.Commands[:i], test.Commands[i+1:]...)\n\t\t}\n\n\t\t// Default commandspec timeout.\n\t\tif test.Commands[i].Timeout == \"\" {\n\t\t\ttest.Commands[i].Timeout = test.CmdTimeout\n\t\t}\n\t}\n\treturn test, nil\n}", "func NewJenkinsBootstrapper() admission.Interface {\n\treturn &jenkinsBootstrapper{\n\t\tHandler: admission.NewHandler(admission.Create),\n\t}\n}", "func (cli *FakeDatabaseClient) BootstrapStandby(ctx context.Context, in *dbdpb.BootstrapStandbyRequest, opts ...grpc.CallOption) (*dbdpb.BootstrapStandbyResponse, error) {\n\tpanic(\"implement me\")\n}", "func bootstrap() string {\n\treturn \"@START\\n0;JMP\\n\" +\n\t\teqFunction +\n\t\tltFunction +\n\t\tgtFunction +\n\t\t\"(START)\\n\"\n}", "func NewOceanLaunchSpec(ctx *pulumi.Context,\n\tname string, args *OceanLaunchSpecArgs, opts ...pulumi.ResourceOption) (*OceanLaunchSpec, error) {\n\tif args == nil {\n\t\treturn nil, errors.New(\"missing one or more required arguments\")\n\t}\n\n\tif args.OceanId == nil {\n\t\treturn nil, errors.New(\"invalid value for required argument 'OceanId'\")\n\t}\n\topts = internal.PkgResourceDefaultOpts(opts)\n\tvar resource OceanLaunchSpec\n\terr := ctx.RegisterResource(\"spotinst:gke/oceanLaunchSpec:OceanLaunchSpec\", name, args, &resource, opts...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &resource, nil\n}", "func RegisterBootstrapConfigSpec(spec ConfigSpec) {\n\tkey := spec.ConfigKey()\n\tfor _, s := range bootstrapSpecs {\n\t\tif s.ConfigKey() == key {\n\t\t\treturn\n\t\t}\n\t}\n\tbootstrapSpecs = append(bootstrapSpecs, spec)\n}", "func New(rom []Byte, options Options) Jibi {\n\tcart := NewCartridge(rom)\n\tmmu := NewMmu(cart)\n\tcpu := NewCpu(mmu, bios)\n\tlcd := NewLcd(options.Squash)\n\tgpu := NewGpu(mmu, lcd, cpu.Clock())\n\tkp := NewKeypad(mmu, options.Keypad)\n\n\tif options.Skipbios {\n\t\tcpu.RunCommand(CmdUnloadBios, nil)\n\t}\n\tif !options.Render {\n\t\tlcd.DisableRender()\n\t}\n\n\treturn Jibi{options, mmu, cpu, lcd, gpu, cart, kp}\n}", "func (b *Bootstrap) Bootstrap(hostname string) error {\n\tlog.Infof(\"Mounting proc\")\n\tif err := syscall.Mount(\"none\", \"/proc\", \"proc\", 0, \"\"); err != nil {\n\t\treturn err\n\t}\n\n\tif err := syscall.Mount(\"none\", \"/dev\", \"devtmpfs\", 0, \"\"); err != nil {\n\t\treturn err\n\t}\n\n\tif err := syscall.Mount(\"none\", \"/dev/pts\", \"devpts\", 0, \"\"); err != nil {\n\t\treturn err\n\t}\n\n\tif err := updateHostname(hostname); err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}", "func (o *snapshotter) constructOverlayBDSpec(ctx context.Context, key string, writable bool) error {\n\tid, info, _, err := storage.GetInfo(ctx, key)\n\tif err != nil {\n\t\treturn errors.Wrapf(err, \"failed to get info for snapshot %s\", key)\n\t}\n\n\tstype, err := o.identifySnapshotStorageType(id, info)\n\tif err != nil {\n\t\treturn errors.Wrapf(err, \"failed to identify storage of snapshot %s\", key)\n\t}\n\n\tconfigJSON := OverlayBDBSConfig{\n\t\tLowers: []OverlayBDBSConfigLower{},\n\t\tResultFile: o.tgtOverlayBDInitDebuglogPath(id),\n\t}\n\n\t// load the parent's config and reuse the lowerdir\n\tif info.Parent != \"\" {\n\t\tparentConfJSON, err := o.loadBackingStoreConfig(ctx, info.Parent)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tconfigJSON.Lowers = parentConfJSON.Lowers\n\t}\n\n\tswitch stype {\n\tcase storageTypeRemoteBlock:\n\t\tif writable {\n\t\t\treturn errors.Errorf(\"remote block device is readonly, not support writable\")\n\t\t}\n\n\t\tblobSize, err := strconv.Atoi(info.Labels[labelKeyOverlayBDBlobSize])\n\t\tif err != nil {\n\t\t\treturn errors.Wrapf(err, \"failed to parse value of label %s of snapshot %s\", labelKeyOverlayBDBlobSize, key)\n\t\t}\n\n\t\tblobDigest := info.Labels[labelKeyOverlayBDBlobDigest]\n\t\tblobPrefixURL, err := o.constructImageBlobURL(info.Labels[labelKeyImageRef])\n\t\tif err != nil {\n\t\t\treturn errors.Wrapf(err, \"failed to construct image blob prefix url for snapshot %s\", key)\n\t\t}\n\n\t\tconfigJSON.RepoBlobURL = blobPrefixURL\n\t\tconfigJSON.Lowers = append(configJSON.Lowers, OverlayBDBSConfigLower{\n\t\t\tDigest: blobDigest,\n\t\t\tSize: int64(blobSize),\n\t\t\tDir: o.upperPath(id),\n\t\t})\n\n\tcase storageTypeLocalBlock:\n\t\tif writable {\n\t\t\treturn errors.Errorf(\"local block device is readonly, not support writable\")\n\t\t}\n\n\t\tconfigJSON.Lowers = append(configJSON.Lowers, OverlayBDBSConfigLower{\n\t\t\tDir: o.upperPath(id),\n\t\t})\n\n\tdefault:\n\t\tif !writable || info.Parent == \"\" {\n\t\t\treturn errors.Errorf(\"unexpect storage %v of snapshot %v during construct overlaybd spec(writable=%v, parent=%s)\", stype, key, writable, info.Parent)\n\t\t}\n\n\t\tif err := o.prepareWritableOverlaybd(ctx, id); err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tconfigJSON.Upper = OverlayBDBSConfigUpper{\n\t\t\tIndex: o.tgtOverlayBDWritableIndexPath(id),\n\t\t\tData: o.tgtOverlayBDWritableDataPath(id),\n\t\t}\n\t}\n\treturn o.atomicWriteBackingStoreAndTargetConfig(ctx, id, key, configJSON)\n}", "func New(appName, appOwner string, cfgs ...Configurator) *Bootstrapper {\n\tb := &Bootstrapper{\n\t\tAppName: appName,\n\t\tAppOwner: appOwner,\n\t\tAppSpawnDate: time.Now(),\n\t\tEngine: gin.New(),\n\t}\n\n\tfor _, cfg := range cfgs {\n\t\tcfg(b)\n\t}\n\n\treturn b\n}", "func NewK8sProbe(k8sClusterScraper *cluster.ClusterScraper, config *configs.ProbeConfig) (*K8sProbe, error) {\n\t// First try to get cluster ID.\n\tif ClusterID == \"\" {\n\t\tid, err := k8sClusterScraper.GetKubernetesServiceID()\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"Error trying to get cluster ID:%s\", err)\n\t\t}\n\t\tClusterID = id\n\t}\n\tstitchingManager := stitching.NewStitchingManager(config.StitchingPropertyType)\n\treturn &K8sProbe{\n\t\tstitchingManager: stitchingManager,\n\t\tk8sClusterScraper: k8sClusterScraper,\n\t\tconfig: config,\n\t}, nil\n}", "func newContainer(rspec *spec.Spec, lockDir string) (*Container, error) {\n\tif rspec == nil {\n\t\treturn nil, errors.Wrapf(ErrInvalidArg, \"must provide a valid runtime spec to create container\")\n\t}\n\n\tctr := new(Container)\n\tctr.config = new(ContainerConfig)\n\tctr.state = new(containerRuntimeInfo)\n\n\tctr.config.ID = stringid.GenerateNonCryptoID()\n\tctr.config.Name = namesgenerator.GetRandomName(0)\n\n\tctr.config.Spec = new(spec.Spec)\n\tdeepcopier.Copy(rspec).To(ctr.config.Spec)\n\tctr.config.CreatedTime = time.Now()\n\n\t// Path our lock file will reside at\n\tlockPath := filepath.Join(lockDir, ctr.config.ID)\n\t// Grab a lockfile at the given path\n\tlock, err := storage.GetLockfile(lockPath)\n\tif err != nil {\n\t\treturn nil, errors.Wrapf(err, \"error creating lockfile for new container\")\n\t}\n\tctr.lock = lock\n\n\treturn ctr, nil\n}", "func New(v interface{}) (provider.Provider, error) {\n\ts := Spec{}\n\treturn &s, ioutil.Intermarshal(v, &s)\n}", "func Bootstrap(b BootstrapConfig) Option {\n\treturn func(o *Options) error {\n\t\to.BootstrapConfig = b\n\t\treturn nil\n\t}\n}", "func toK8SContainerSpec(c *pbpod.ContainerSpec) corev1.Container {\n\t// TODO:\n\t// add ports, health check, readiness check, affinity\n\tvar kEnvs []corev1.EnvVar\n\tfor _, e := range c.GetEnvironment() {\n\t\tkEnvs = append(kEnvs, corev1.EnvVar{\n\t\t\tName: e.GetName(),\n\t\t\tValue: e.GetValue(),\n\t\t})\n\t}\n\n\tvar ports []corev1.ContainerPort\n\tfor _, p := range c.GetPorts() {\n\t\tports = append(ports, corev1.ContainerPort{\n\t\t\tName: p.GetName(),\n\t\t\tContainerPort: int32(p.GetValue()),\n\t\t})\n\t}\n\n\tcname := c.GetName()\n\tif cname == \"\" {\n\t\tcname = uuid.New()\n\t}\n\n\tcimage := c.GetImage()\n\tif cimage == \"\" {\n\t\tcimage = _defaultImageName\n\t}\n\n\tmemMb := c.GetResource().GetMemLimitMb()\n\tif memMb < _defaultMinMemMb {\n\t\tmemMb = _defaultMinMemMb\n\t}\n\n\tk8sSpec := corev1.Container{\n\t\tName: cname,\n\t\tImage: cimage,\n\t\tEnv: kEnvs,\n\t\tPorts: ports,\n\t\tResources: corev1.ResourceRequirements{\n\t\t\tLimits: corev1.ResourceList{\n\t\t\t\tcorev1.ResourceCPU: *resource.NewMilliQuantity(\n\t\t\t\t\tint64(c.GetResource().GetCpuLimit()*1000),\n\t\t\t\t\tresource.DecimalSI,\n\t\t\t\t),\n\t\t\t\tcorev1.ResourceMemory: *resource.NewMilliQuantity(\n\t\t\t\t\tint64(memMb*1000000000),\n\t\t\t\t\tresource.DecimalSI,\n\t\t\t\t),\n\t\t\t},\n\t\t\tRequests: corev1.ResourceList{\n\t\t\t\tcorev1.ResourceCPU: *resource.NewMilliQuantity(\n\t\t\t\t\tint64(c.GetResource().GetCpuLimit()*1000),\n\t\t\t\t\tresource.DecimalSI,\n\t\t\t\t),\n\t\t\t\tcorev1.ResourceMemory: *resource.NewMilliQuantity(\n\t\t\t\t\tint64(memMb*1000000000),\n\t\t\t\t\tresource.DecimalSI,\n\t\t\t\t),\n\t\t\t},\n\t\t},\n\t}\n\n\tif c.GetEntrypoint().GetValue() != \"\" {\n\t\tk8sSpec.Command = []string{c.GetEntrypoint().GetValue()}\n\t\tk8sSpec.Args = c.GetEntrypoint().GetArguments()\n\t}\n\n\treturn k8sSpec\n}", "func NewBootstrapMethod(code []byte, classLoader Any) (*Method) {\n method := &Method{}\n method.class = &Class{name:\"~jvmgo\", classLoader:classLoader.(*ClassLoader)}\n method.name = \"<bootstrap>\"\n method.accessFlags = ACC_STATIC\n method.maxStack = 8\n method.maxLocals = 8\n method.code = code\n return method\n}", "func createVendorSpec(b *troubleshootv1beta2.SupportBundle) (*troubleshootv1beta2.SupportBundle, error) {\n\tsupportBundle, err := staticspecs.GetVendorSpec()\n\tif err != nil {\n\t\tlogger.Errorf(\"Failed to load vendor support bundle spec: %v\", err)\n\t\treturn nil, err\n\t}\n\n\tif b.Spec.Collectors != nil {\n\t\tsupportBundle.Spec.Collectors = b.DeepCopy().Spec.Collectors\n\t}\n\tif b.Spec.Analyzers != nil {\n\t\tsupportBundle.Spec.Analyzers = b.DeepCopy().Spec.Analyzers\n\t}\n\treturn supportBundle, nil\n}", "func NewPipelineSpec(spec *pipeline.Spec) PipelineSpec {\n\treturn PipelineSpec{\n\t\tID: spec.ID,\n\t\tJobID: spec.JobID,\n\t\tDotDAGSource: spec.DotDagSource,\n\t}\n}", "func newTestKubelet(t *testing.T, controllerAttachDetachEnabled bool) *TestKubelet {\n\timageList := []kubecontainer.Image{\n\t\t{\n\t\t\tID: \"abc\",\n\t\t\tRepoTags: []string{\"k8s.gcr.io:v1\", \"k8s.gcr.io:v2\"},\n\t\t\tSize: 123,\n\t\t},\n\t\t{\n\t\t\tID: \"efg\",\n\t\t\tRepoTags: []string{\"k8s.gcr.io:v3\", \"k8s.gcr.io:v4\"},\n\t\t\tSize: 456,\n\t\t},\n\t}\n\n\treturn newTestKubeletWithImageList(t, imageList, controllerAttachDetachEnabled, true /*initFakeVolumePlugin*/)\n}", "func generateBootstrapContents(t *testing.T, serverURI string, ignoreResourceDeletion bool, nodeID string) []byte {\n\tt.Helper()\n\tbootstrapContents, err := bootstrap.Contents(bootstrap.Options{\n\t\tNodeID: nodeID,\n\t\tServerURI: serverURI,\n\t\tServerListenerResourceNameTemplate: e2e.ServerListenerResourceNameTemplate,\n\t\tIgnoreResourceDeletion: ignoreResourceDeletion,\n\t})\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\treturn bootstrapContents\n}", "func newPrometheusSpec(name, addr string) cap.SupervisorSpec {\n\treturn cap.NewSupervisorSpec(\n\t\tname,\n\t\t// this function builds an HTTP Server, this functionality requires more\n\t\t// than a goroutine given the only way to stop a http server is to call the\n\t\t// http.Shutdown function on a seperate goroutine\n\t\tfunc() ([]cap.Node, cap.CleanupResourcesFn, error) {\n\t\t\tserver := buildPrometheusHTTPServer(addr)\n\n\t\t\t// CAUTION: The order here matters, we need waitUntilDone to start last so\n\t\t\t// that it can terminate first, if this is not the case the\n\t\t\t// listenAndServeHTTPWorker child will never terminate.\n\t\t\t//\n\t\t\t// DISCLAIMER: The caution above _is not_ a capataz requirement, but a\n\t\t\t// requirement of net/https' API\n\t\t\tnodes := []cap.Node{\n\t\t\t\tlistenAndServeHTTPWorker(server),\n\t\t\t\twaitUntilDoneHTTPWorker(server),\n\t\t\t}\n\n\t\t\tcleanupServer := func() error {\n\t\t\t\treturn server.Close()\n\t\t\t}\n\n\t\t\treturn nodes, cleanupServer, nil\n\t\t},\n\t)\n}", "func Bootstrap() (*cobra.Command, func()) {\n\tvar cfg string\n\n\t// Get root command\n\troot := cmd.NewRootCmd()\n\n\t// Determine config path from commandline\n\troot.PersistentFlags().StringVar(&cfg, \"config\", \"./config.yaml\", \"config file\")\n\t_ = root.PersistentFlags().Parse(os.Args[1:])\n\n\t// Setup core with config path\n\tc := core.Default(core.WithYamlFile(cfg))\n\n\t// Setup global dependencies and register modules\n\tfor _, option := range config.Register() {\n\t\toption(c)\n\t}\n\n\t// Apply root command and register commands from modules\n\tc.ApplyRootCommand(root)\n\n\treturn root, func() {\n\t\tc.Shutdown()\n\t}\n}", "func (o *RunOptions) RunBootJob() error {\n\trequirements, gitURL, err := o.findRequirementsAndGitURL()\n\tif err != nil {\n\t\treturn err\n\t}\n\tif gitURL == \"\" {\n\t\treturn util.MissingOption(\"git-url\")\n\t}\n\n\tclusterName := requirements.Cluster.ClusterName\n\tlog.Logger().Infof(\"running helmboot Job for cluster %s with git URL %s\", util.ColorInfo(clusterName), util.ColorInfo(gitURL))\n\n\t// TODO while the chart is released lets do a local clone....\n\ttempDir, err := ioutil.TempDir(\"\", \"jx-boot-\")\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"failed to create temp dir\")\n\t}\n\n\tinstallerGitURL := \"https://github.com/jenkins-x-labs/jenkins-x-installer.git\"\n\tlog.Logger().Infof(\"cloning %s to %s\", installerGitURL, tempDir)\n\terr = o.Git().Clone(installerGitURL, tempDir)\n\tif err != nil {\n\t\treturn errors.Wrapf(err, \"failed to git clone %s to dir %s\", installerGitURL, tempDir)\n\t}\n\n\tflag, err := o.hasHelmRelease(\"jx-boot\")\n\tif err != nil {\n\t\treturn err\n\t}\n\tif flag {\n\t\tlog.Logger().Info(\"uninstalling old jx-boot chart ...\")\n\t\tc := util.Command{\n\t\t\tDir: tempDir,\n\t\t\tName: \"helm\",\n\t\t\tArgs: []string{\"uninstall\", \"jx-boot\"},\n\t\t}\n\t\t_, err = c.RunWithoutRetry()\n\t\tif err != nil {\n\t\t\treturn errors.Wrapf(err, \"failed to remove old jx-boot chart\")\n\t\t}\n\t}\n\n\tc := reqhelpers.GetBootJobCommand(requirements, gitURL)\n\tc.Dir = tempDir\n\n\tcommandLine := fmt.Sprintf(\"%s %s\", c.Name, strings.Join(c.Args, \" \"))\n\n\tlog.Logger().Infof(\"running the command:\\n\\n%s\\n\\n\", util.ColorInfo(commandLine))\n\n\t_, err = c.RunWithoutRetry()\n\tif err != nil {\n\t\treturn errors.Wrapf(err, \"failed to run command %s\", commandLine)\n\t}\n\n\treturn o.tailJobLogs()\n}", "func NewProbe(config *config.Config, opts Opts) (*Probe, error) {\n\topts.normalize()\n\n\tnerpc, err := erpc.NewERPC()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tctx, cancel := context.WithCancel(context.Background())\n\n\tp := &Probe{\n\t\tOpts: opts,\n\t\tConfig: config,\n\t\tctx: ctx,\n\t\tcancelFnc: cancel,\n\t\tStatsdClient: opts.StatsdClient,\n\t\tdiscarderRateLimiter: rate.NewLimiter(rate.Every(time.Second/5), 100),\n\t\tPlatformProbe: PlatformProbe{\n\t\t\tapprovers: make(map[eval.EventType]kfilters.ActiveApprovers),\n\t\t\tmanagerOptions: ebpf.NewDefaultOptions(),\n\t\t\tErpc: nerpc,\n\t\t\terpcRequest: &erpc.ERPCRequest{},\n\t\t\tisRuntimeDiscarded: !opts.DontDiscardRuntime,\n\t\t\tuseFentry: config.Probe.EventStreamUseFentry,\n\t\t},\n\t}\n\n\tp.event = NewEvent(p.fieldHandlers)\n\n\tif err := p.detectKernelVersion(); err != nil {\n\t\t// we need the kernel version to start, fail if we can't get it\n\t\treturn nil, err\n\t}\n\n\tif err := p.sanityChecks(); err != nil {\n\t\treturn nil, err\n\t}\n\n\tif err := p.VerifyOSVersion(); err != nil {\n\t\tseclog.Warnf(\"the current kernel isn't officially supported, some features might not work properly: %v\", err)\n\t}\n\n\tif err := p.VerifyEnvironment(); err != nil {\n\t\tseclog.Warnf(\"the current environment may be misconfigured: %v\", err)\n\t}\n\n\tuseRingBuffers := p.UseRingBuffers()\n\tuseMmapableMaps := p.kernelVersion.HaveMmapableMaps()\n\n\tp.Manager = ebpf.NewRuntimeSecurityManager(useRingBuffers, p.useFentry)\n\n\tp.ensureConfigDefaults()\n\n\tp.monitor = NewMonitor(p)\n\n\tnumCPU, err := utils.NumCPU()\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to parse CPU count: %w\", err)\n\t}\n\n\tp.managerOptions.MapSpecEditors = probes.AllMapSpecEditors(numCPU, probes.MapSpecEditorOpts{\n\t\tTracedCgroupSize: p.Config.RuntimeSecurity.ActivityDumpTracedCgroupsCount,\n\t\tUseRingBuffers: useRingBuffers,\n\t\tUseMmapableMaps: useMmapableMaps,\n\t\tRingBufferSize: uint32(p.Config.Probe.EventStreamBufferSize),\n\t\tPathResolutionEnabled: p.Opts.PathResolutionEnabled,\n\t\tSecurityProfileMaxCount: p.Config.RuntimeSecurity.SecurityProfileMaxCount,\n\t})\n\n\tif config.RuntimeSecurity.ActivityDumpEnabled {\n\t\tfor _, e := range config.RuntimeSecurity.ActivityDumpTracedEventTypes {\n\t\t\tif e == model.SyscallsEventType {\n\t\t\t\t// Add syscall monitor probes\n\t\t\t\tp.managerOptions.ActivatedProbes = append(p.managerOptions.ActivatedProbes, probes.SyscallMonitorSelectors...)\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t}\n\n\tp.constantOffsets, err = p.GetOffsetConstants()\n\tif err != nil {\n\t\tseclog.Warnf(\"constant fetcher failed: %v\", err)\n\t\treturn nil, err\n\t}\n\t// the constant fetching mechanism can be quite memory intensive, between kernel header downloading,\n\t// runtime compilation, BTF parsing...\n\t// let's ensure the GC has run at this point before doing further memory intensive stuff\n\truntime.GC()\n\n\tp.managerOptions.ConstantEditors = append(p.managerOptions.ConstantEditors, constantfetch.CreateConstantEditors(p.constantOffsets)...)\n\n\tareCGroupADsEnabled := config.RuntimeSecurity.ActivityDumpTracedCgroupsCount > 0\n\n\t// Add global constant editors\n\tp.managerOptions.ConstantEditors = append(p.managerOptions.ConstantEditors,\n\t\tmanager.ConstantEditor{\n\t\t\tName: \"runtime_pid\",\n\t\t\tValue: uint64(utils.Getpid()),\n\t\t},\n\t\tmanager.ConstantEditor{\n\t\t\tName: \"do_fork_input\",\n\t\t\tValue: getDoForkInput(p.kernelVersion),\n\t\t},\n\t\tmanager.ConstantEditor{\n\t\t\tName: \"has_usernamespace_first_arg\",\n\t\t\tValue: getHasUsernamespaceFirstArg(p.kernelVersion),\n\t\t},\n\t\tmanager.ConstantEditor{\n\t\t\tName: \"ovl_path_in_ovl_inode\",\n\t\t\tValue: getOvlPathInOvlInode(p.kernelVersion),\n\t\t},\n\t\tmanager.ConstantEditor{\n\t\t\tName: \"mount_id_offset\",\n\t\t\tValue: mount.GetMountIDOffset(p.kernelVersion),\n\t\t},\n\t\tmanager.ConstantEditor{\n\t\t\tName: \"getattr2\",\n\t\t\tValue: getAttr2(p.kernelVersion),\n\t\t},\n\t\tmanager.ConstantEditor{\n\t\t\tName: \"vfs_unlink_dentry_position\",\n\t\t\tValue: mount.GetVFSLinkDentryPosition(p.kernelVersion),\n\t\t},\n\t\tmanager.ConstantEditor{\n\t\t\tName: \"vfs_mkdir_dentry_position\",\n\t\t\tValue: mount.GetVFSMKDirDentryPosition(p.kernelVersion),\n\t\t},\n\t\tmanager.ConstantEditor{\n\t\t\tName: \"vfs_link_target_dentry_position\",\n\t\t\tValue: mount.GetVFSLinkTargetDentryPosition(p.kernelVersion),\n\t\t},\n\t\tmanager.ConstantEditor{\n\t\t\tName: \"vfs_setxattr_dentry_position\",\n\t\t\tValue: mount.GetVFSSetxattrDentryPosition(p.kernelVersion),\n\t\t},\n\t\tmanager.ConstantEditor{\n\t\t\tName: \"vfs_removexattr_dentry_position\",\n\t\t\tValue: mount.GetVFSRemovexattrDentryPosition(p.kernelVersion),\n\t\t},\n\t\tmanager.ConstantEditor{\n\t\t\tName: \"vfs_rename_input_type\",\n\t\t\tValue: mount.GetVFSRenameInputType(p.kernelVersion),\n\t\t},\n\t\tmanager.ConstantEditor{\n\t\t\tName: \"check_helper_call_input\",\n\t\t\tValue: getCheckHelperCallInputType(p.kernelVersion),\n\t\t},\n\t\tmanager.ConstantEditor{\n\t\t\tName: \"cgroup_activity_dumps_enabled\",\n\t\t\tValue: utils.BoolTouint64(config.RuntimeSecurity.ActivityDumpEnabled && areCGroupADsEnabled),\n\t\t},\n\t\tmanager.ConstantEditor{\n\t\t\tName: \"net_struct_type\",\n\t\t\tValue: getNetStructType(p.kernelVersion),\n\t\t},\n\t\tmanager.ConstantEditor{\n\t\t\tName: \"syscall_monitor_event_period\",\n\t\t\tValue: uint64(config.RuntimeSecurity.ActivityDumpSyscallMonitorPeriod.Nanoseconds()),\n\t\t},\n\t\tmanager.ConstantEditor{\n\t\t\tName: \"send_signal\",\n\t\t\tValue: isBPFSendSignalHelperAvailable(p.kernelVersion),\n\t\t},\n\t\tmanager.ConstantEditor{\n\t\t\tName: \"anomaly_syscalls\",\n\t\t\tValue: utils.BoolTouint64(slices.Contains(p.Config.RuntimeSecurity.AnomalyDetectionEventTypes, model.SyscallsEventType)),\n\t\t},\n\t\tmanager.ConstantEditor{\n\t\t\tName: \"monitor_syscalls_map_enabled\",\n\t\t\tValue: utils.BoolTouint64(opts.SyscallsMapMonitorEnabled),\n\t\t},\n\t)\n\n\tp.managerOptions.ConstantEditors = append(p.managerOptions.ConstantEditors, DiscarderConstants...)\n\tp.managerOptions.ConstantEditors = append(p.managerOptions.ConstantEditors, getCGroupWriteConstants())\n\n\t// if we are using tracepoints to probe syscall exits, i.e. if we are using an old kernel version (< 4.12)\n\t// we need to use raw_syscall tracepoints for exits, as syscall are not trace when running an ia32 userspace\n\t// process\n\tif probes.ShouldUseSyscallExitTracepoints() {\n\t\tp.managerOptions.ConstantEditors = append(p.managerOptions.ConstantEditors,\n\t\t\tmanager.ConstantEditor{\n\t\t\t\tName: \"tracepoint_raw_syscall_fallback\",\n\t\t\t\tValue: utils.BoolTouint64(true),\n\t\t\t},\n\t\t)\n\t}\n\n\tif useRingBuffers {\n\t\tp.managerOptions.ConstantEditors = append(p.managerOptions.ConstantEditors,\n\t\t\tmanager.ConstantEditor{\n\t\t\t\tName: \"use_ring_buffer\",\n\t\t\t\tValue: utils.BoolTouint64(true),\n\t\t\t},\n\t\t)\n\t}\n\n\tif p.kernelVersion.HavePIDLinkStruct() {\n\t\tp.managerOptions.ConstantEditors = append(p.managerOptions.ConstantEditors,\n\t\t\tmanager.ConstantEditor{\n\t\t\t\tName: \"kernel_has_pid_link_struct\",\n\t\t\t\tValue: utils.BoolTouint64(true),\n\t\t\t},\n\t\t)\n\t}\n\n\tif p.kernelVersion.HaveLegacyPipeInodeInfoStruct() {\n\t\tp.managerOptions.ConstantEditors = append(p.managerOptions.ConstantEditors,\n\t\t\tmanager.ConstantEditor{\n\t\t\t\tName: \"kernel_has_legacy_pipe_inode_info\",\n\t\t\t\tValue: utils.BoolTouint64(true),\n\t\t\t},\n\t\t)\n\t}\n\n\t// tail calls\n\tp.managerOptions.TailCallRouter = probes.AllTailRoutes(p.Config.Probe.ERPCDentryResolutionEnabled, p.Config.Probe.NetworkEnabled, useMmapableMaps, p.useFentry)\n\tif !p.Config.Probe.ERPCDentryResolutionEnabled || useMmapableMaps {\n\t\t// exclude the programs that use the bpf_probe_write_user helper\n\t\tp.managerOptions.ExcludedFunctions = probes.AllBPFProbeWriteUserProgramFunctions()\n\t}\n\n\tif !p.Config.Probe.NetworkEnabled {\n\t\t// prevent all TC classifiers from loading\n\t\tp.managerOptions.ExcludedFunctions = append(p.managerOptions.ExcludedFunctions, probes.GetAllTCProgramFunctions()...)\n\t}\n\n\tif p.useFentry {\n\t\tafBasedExcluder, err := newAvailableFunctionsBasedExcluder()\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tp.managerOptions.AdditionalExcludedFunctionCollector = afBasedExcluder\n\t}\n\n\tp.scrubber = procutil.NewDefaultDataScrubber()\n\tp.scrubber.AddCustomSensitiveWords(config.Probe.CustomSensitiveWords)\n\n\tresolversOpts := resolvers.ResolversOpts{\n\t\tPathResolutionEnabled: opts.PathResolutionEnabled,\n\t\tTagsResolver: opts.TagsResolver,\n\t\tUseRingBuffer: useRingBuffers,\n\t}\n\tp.resolvers, err = resolvers.NewResolvers(config, p.Manager, p.StatsdClient, p.scrubber, p.Erpc, resolversOpts)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tp.fieldHandlers = &FieldHandlers{resolvers: p.resolvers}\n\n\t// be sure to zero the probe event before everything else\n\tp.zeroEvent()\n\n\tif useRingBuffers {\n\t\tp.eventStream = ringbuffer.New(p.handleEvent)\n\t} else {\n\t\tp.eventStream, err = reorderer.NewOrderedPerfMap(p.ctx, p.handleEvent, p.StatsdClient)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\treturn p, nil\n}", "func (c *Cluster) Bootstrap(regionID uint64, storeIDs, peerIDs []uint64, leaderPeerID uint64) {\n\tc.Lock()\n\tdefer c.Unlock()\n\n\tif len(storeIDs) != len(peerIDs) {\n\t\tpanic(\"len(storeIDs) != len(peerIDs)\")\n\t}\n\tc.regions[regionID] = newRegion(regionID, storeIDs, peerIDs, leaderPeerID)\n}", "func baseTemplate() *datamodel.NodeBootstrappingConfiguration {\n\tvar (\n\t\ttrueConst = true\n\t\tfalseConst = false\n\t)\n\treturn &datamodel.NodeBootstrappingConfiguration{\n\t\tContainerService: &datamodel.ContainerService{\n\t\t\tID: \"\",\n\t\t\tLocation: \"eastus\",\n\t\t\tName: \"\",\n\t\t\tPlan: nil,\n\t\t\tTags: map[string]string(nil),\n\t\t\tType: \"Microsoft.ContainerService/ManagedClusters\",\n\t\t\tProperties: &datamodel.Properties{\n\t\t\t\tClusterID: \"\",\n\t\t\t\tProvisioningState: \"\",\n\t\t\t\tOrchestratorProfile: &datamodel.OrchestratorProfile{\n\t\t\t\t\tOrchestratorType: \"Kubernetes\",\n\t\t\t\t\tOrchestratorVersion: \"1.26.0\",\n\t\t\t\t\tKubernetesConfig: &datamodel.KubernetesConfig{\n\t\t\t\t\t\tKubernetesImageBase: \"\",\n\t\t\t\t\t\tMCRKubernetesImageBase: \"\",\n\t\t\t\t\t\tClusterSubnet: \"\",\n\t\t\t\t\t\tNetworkPolicy: \"\",\n\t\t\t\t\t\tNetworkPlugin: \"kubenet\",\n\t\t\t\t\t\tNetworkMode: \"\",\n\t\t\t\t\t\tContainerRuntime: \"\",\n\t\t\t\t\t\tMaxPods: 0,\n\t\t\t\t\t\tDockerBridgeSubnet: \"\",\n\t\t\t\t\t\tDNSServiceIP: \"\",\n\t\t\t\t\t\tServiceCIDR: \"\",\n\t\t\t\t\t\tUseManagedIdentity: false,\n\t\t\t\t\t\tUserAssignedID: \"\",\n\t\t\t\t\t\tUserAssignedClientID: \"\",\n\t\t\t\t\t\tCustomHyperkubeImage: \"\",\n\t\t\t\t\t\tCustomKubeProxyImage: \"mcr.microsoft.com/oss/kubernetes/kube-proxy:v1.26.0.1\",\n\t\t\t\t\t\tCustomKubeBinaryURL: \"https://acs-mirror.azureedge.net/kubernetes/v1.26.0/binaries/kubernetes-node-linux-amd64.tar.gz\",\n\t\t\t\t\t\tMobyVersion: \"\",\n\t\t\t\t\t\tContainerdVersion: \"\",\n\t\t\t\t\t\tWindowsNodeBinariesURL: \"\",\n\t\t\t\t\t\tWindowsContainerdURL: \"\",\n\t\t\t\t\t\tWindowsSdnPluginURL: \"\",\n\t\t\t\t\t\tUseInstanceMetadata: &trueConst,\n\t\t\t\t\t\tEnableRbac: nil,\n\t\t\t\t\t\tEnableSecureKubelet: nil,\n\t\t\t\t\t\tPrivateCluster: nil,\n\t\t\t\t\t\tGCHighThreshold: 0,\n\t\t\t\t\t\tGCLowThreshold: 0,\n\t\t\t\t\t\tEnableEncryptionWithExternalKms: nil,\n\t\t\t\t\t\tAddons: nil,\n\t\t\t\t\t\tContainerRuntimeConfig: map[string]string(nil),\n\t\t\t\t\t\tControllerManagerConfig: map[string]string(nil),\n\t\t\t\t\t\tSchedulerConfig: map[string]string(nil),\n\t\t\t\t\t\tCloudProviderBackoffMode: \"v2\",\n\t\t\t\t\t\tCloudProviderBackoff: &trueConst,\n\t\t\t\t\t\tCloudProviderBackoffRetries: 6,\n\t\t\t\t\t\tCloudProviderBackoffJitter: 0.0,\n\t\t\t\t\t\tCloudProviderBackoffDuration: 5,\n\t\t\t\t\t\tCloudProviderBackoffExponent: 0.0,\n\t\t\t\t\t\tCloudProviderRateLimit: &trueConst,\n\t\t\t\t\t\tCloudProviderRateLimitQPS: 10.0,\n\t\t\t\t\t\tCloudProviderRateLimitQPSWrite: 10.0,\n\t\t\t\t\t\tCloudProviderRateLimitBucket: 100,\n\t\t\t\t\t\tCloudProviderRateLimitBucketWrite: 100,\n\t\t\t\t\t\tCloudProviderDisableOutboundSNAT: &falseConst,\n\t\t\t\t\t\tNodeStatusUpdateFrequency: \"\",\n\t\t\t\t\t\tLoadBalancerSku: \"Standard\",\n\t\t\t\t\t\tExcludeMasterFromStandardLB: nil,\n\t\t\t\t\t\tAzureCNIURLLinux: \"https://acs-mirror.azureedge.net/azure-cni/v1.1.8/binaries/azure-vnet-cni-linux-amd64-v1.1.8.tgz\",\n\t\t\t\t\t\tAzureCNIURLARM64Linux: \"\",\n\t\t\t\t\t\tAzureCNIURLWindows: \"\",\n\t\t\t\t\t\tMaximumLoadBalancerRuleCount: 250,\n\t\t\t\t\t\tPrivateAzureRegistryServer: \"\",\n\t\t\t\t\t\tNetworkPluginMode: \"\",\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tAgentPoolProfiles: []*datamodel.AgentPoolProfile{\n\t\t\t\t\t{\n\t\t\t\t\t\tName: \"nodepool2\",\n\t\t\t\t\t\tVMSize: \"Standard_DS1_v2\",\n\t\t\t\t\t\tKubeletDiskType: \"\",\n\t\t\t\t\t\tWorkloadRuntime: \"\",\n\t\t\t\t\t\tDNSPrefix: \"\",\n\t\t\t\t\t\tOSType: \"Linux\",\n\t\t\t\t\t\tPorts: nil,\n\t\t\t\t\t\tAvailabilityProfile: \"VirtualMachineScaleSets\",\n\t\t\t\t\t\tStorageProfile: \"ManagedDisks\",\n\t\t\t\t\t\tVnetSubnetID: \"\",\n\t\t\t\t\t\tDistro: \"aks-ubuntu-containerd-18.04-gen2\",\n\t\t\t\t\t\tCustomNodeLabels: map[string]string{\n\t\t\t\t\t\t\t\"kubernetes.azure.com/mode\": \"system\",\n\t\t\t\t\t\t\t\"kubernetes.azure.com/node-image-version\": \"AKSUbuntu-1804gen2containerd-2022.01.19\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tPreprovisionExtension: nil,\n\t\t\t\t\t\tKubernetesConfig: &datamodel.KubernetesConfig{\n\t\t\t\t\t\t\tKubernetesImageBase: \"\",\n\t\t\t\t\t\t\tMCRKubernetesImageBase: \"\",\n\t\t\t\t\t\t\tClusterSubnet: \"\",\n\t\t\t\t\t\t\tNetworkPolicy: \"\",\n\t\t\t\t\t\t\tNetworkPlugin: \"\",\n\t\t\t\t\t\t\tNetworkMode: \"\",\n\t\t\t\t\t\t\tContainerRuntime: \"containerd\",\n\t\t\t\t\t\t\tMaxPods: 0,\n\t\t\t\t\t\t\tDockerBridgeSubnet: \"\",\n\t\t\t\t\t\t\tDNSServiceIP: \"\",\n\t\t\t\t\t\t\tServiceCIDR: \"\",\n\t\t\t\t\t\t\tUseManagedIdentity: false,\n\t\t\t\t\t\t\tUserAssignedID: \"\",\n\t\t\t\t\t\t\tUserAssignedClientID: \"\",\n\t\t\t\t\t\t\tCustomHyperkubeImage: \"\",\n\t\t\t\t\t\t\tCustomKubeProxyImage: \"\",\n\t\t\t\t\t\t\tCustomKubeBinaryURL: \"\",\n\t\t\t\t\t\t\tMobyVersion: \"\",\n\t\t\t\t\t\t\tContainerdVersion: \"\",\n\t\t\t\t\t\t\tWindowsNodeBinariesURL: \"\",\n\t\t\t\t\t\t\tWindowsContainerdURL: \"\",\n\t\t\t\t\t\t\tWindowsSdnPluginURL: \"\",\n\t\t\t\t\t\t\tUseInstanceMetadata: nil,\n\t\t\t\t\t\t\tEnableRbac: nil,\n\t\t\t\t\t\t\tEnableSecureKubelet: nil,\n\t\t\t\t\t\t\tPrivateCluster: nil,\n\t\t\t\t\t\t\tGCHighThreshold: 0,\n\t\t\t\t\t\t\tGCLowThreshold: 0,\n\t\t\t\t\t\t\tEnableEncryptionWithExternalKms: nil,\n\t\t\t\t\t\t\tAddons: nil,\n\t\t\t\t\t\t\tContainerRuntimeConfig: map[string]string(nil),\n\t\t\t\t\t\t\tControllerManagerConfig: map[string]string(nil),\n\t\t\t\t\t\t\tSchedulerConfig: map[string]string(nil),\n\t\t\t\t\t\t\tCloudProviderBackoffMode: \"\",\n\t\t\t\t\t\t\tCloudProviderBackoff: nil,\n\t\t\t\t\t\t\tCloudProviderBackoffRetries: 0,\n\t\t\t\t\t\t\tCloudProviderBackoffJitter: 0.0,\n\t\t\t\t\t\t\tCloudProviderBackoffDuration: 0,\n\t\t\t\t\t\t\tCloudProviderBackoffExponent: 0.0,\n\t\t\t\t\t\t\tCloudProviderRateLimit: nil,\n\t\t\t\t\t\t\tCloudProviderRateLimitQPS: 0.0,\n\t\t\t\t\t\t\tCloudProviderRateLimitQPSWrite: 0.0,\n\t\t\t\t\t\t\tCloudProviderRateLimitBucket: 0,\n\t\t\t\t\t\t\tCloudProviderRateLimitBucketWrite: 0,\n\t\t\t\t\t\t\tCloudProviderDisableOutboundSNAT: nil,\n\t\t\t\t\t\t\tNodeStatusUpdateFrequency: \"\",\n\t\t\t\t\t\t\tLoadBalancerSku: \"\",\n\t\t\t\t\t\t\tExcludeMasterFromStandardLB: nil,\n\t\t\t\t\t\t\tAzureCNIURLLinux: \"\",\n\t\t\t\t\t\t\tAzureCNIURLARM64Linux: \"\",\n\t\t\t\t\t\t\tAzureCNIURLWindows: \"\",\n\t\t\t\t\t\t\tMaximumLoadBalancerRuleCount: 0,\n\t\t\t\t\t\t\tPrivateAzureRegistryServer: \"\",\n\t\t\t\t\t\t\tNetworkPluginMode: \"\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tVnetCidrs: nil,\n\t\t\t\t\t\tWindowsNameVersion: \"\",\n\t\t\t\t\t\tCustomKubeletConfig: nil,\n\t\t\t\t\t\tCustomLinuxOSConfig: nil,\n\t\t\t\t\t\tMessageOfTheDay: \"\",\n\t\t\t\t\t\tNotRebootWindowsNode: nil,\n\t\t\t\t\t\tAgentPoolWindowsProfile: nil,\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tLinuxProfile: &datamodel.LinuxProfile{\n\t\t\t\t\tAdminUsername: \"azureuser\",\n\t\t\t\t\tSSH: struct {\n\t\t\t\t\t\tPublicKeys []datamodel.PublicKey \"json:\\\"publicKeys\\\"\"\n\t\t\t\t\t}{\n\t\t\t\t\t\tPublicKeys: []datamodel.PublicKey{\n\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\tKeyData: \"dummysshkey\",\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\tSecrets: nil,\n\t\t\t\t\tDistro: \"\",\n\t\t\t\t\tCustomSearchDomain: nil,\n\t\t\t\t},\n\t\t\t\tWindowsProfile: nil,\n\t\t\t\tExtensionProfiles: nil,\n\t\t\t\tDiagnosticsProfile: nil,\n\t\t\t\tServicePrincipalProfile: &datamodel.ServicePrincipalProfile{\n\t\t\t\t\tClientID: \"msi\",\n\t\t\t\t\tSecret: \"msi\",\n\t\t\t\t\tObjectID: \"\",\n\t\t\t\t\tKeyvaultSecretRef: nil,\n\t\t\t\t},\n\t\t\t\tCertificateProfile: &datamodel.CertificateProfile{\n\t\t\t\t\tCaCertificate: \"\",\n\t\t\t\t\tAPIServerCertificate: \"\",\n\t\t\t\t\tClientCertificate: \"\",\n\t\t\t\t\tClientPrivateKey: \"\",\n\t\t\t\t\tKubeConfigCertificate: \"\",\n\t\t\t\t\tKubeConfigPrivateKey: \"\",\n\t\t\t\t},\n\t\t\t\tAADProfile: nil,\n\t\t\t\tCustomProfile: nil,\n\t\t\t\tHostedMasterProfile: &datamodel.HostedMasterProfile{\n\t\t\t\t\tFQDN: \"\",\n\t\t\t\t\tIPAddress: \"\",\n\t\t\t\t\tDNSPrefix: \"\",\n\t\t\t\t\tFQDNSubdomain: \"\",\n\t\t\t\t\tSubnet: \"\",\n\t\t\t\t\tAPIServerWhiteListRange: nil,\n\t\t\t\t\tIPMasqAgent: true,\n\t\t\t\t},\n\t\t\t\tAddonProfiles: map[string]datamodel.AddonProfile(nil),\n\t\t\t\tFeatureFlags: nil,\n\t\t\t\tCustomCloudEnv: nil,\n\t\t\t\tCustomConfiguration: nil,\n\t\t\t},\n\t\t},\n\t\tCloudSpecConfig: &datamodel.AzureEnvironmentSpecConfig{\n\t\t\tCloudName: \"AzurePublicCloud\",\n\t\t\tDockerSpecConfig: datamodel.DockerSpecConfig{\n\t\t\t\tDockerEngineRepo: \"https://aptdocker.azureedge.net/repo\",\n\t\t\t\tDockerComposeDownloadURL: \"https://github.com/docker/compose/releases/download\",\n\t\t\t},\n\t\t\tKubernetesSpecConfig: datamodel.KubernetesSpecConfig{\n\t\t\t\tAzureTelemetryPID: \"\",\n\t\t\t\tKubernetesImageBase: \"k8s.gcr.io/\",\n\t\t\t\tTillerImageBase: \"gcr.io/kubernetes-helm/\",\n\t\t\t\tACIConnectorImageBase: \"microsoft/\",\n\t\t\t\tMCRKubernetesImageBase: \"mcr.microsoft.com/\",\n\t\t\t\tNVIDIAImageBase: \"nvidia/\",\n\t\t\t\tAzureCNIImageBase: \"mcr.microsoft.com/containernetworking/\",\n\t\t\t\tCalicoImageBase: \"calico/\",\n\t\t\t\tEtcdDownloadURLBase: \"\",\n\t\t\t\tKubeBinariesSASURLBase: \"https://acs-mirror.azureedge.net/kubernetes/\",\n\t\t\t\tWindowsTelemetryGUID: \"fb801154-36b9-41bc-89c2-f4d4f05472b0\",\n\t\t\t\tCNIPluginsDownloadURL: \"https://acs-mirror.azureedge.net/cni/cni-plugins-amd64-v0.7.6.tgz\",\n\t\t\t\tVnetCNILinuxPluginsDownloadURL: \"https://acs-mirror.azureedge.net/azure-cni/v1.1.3/binaries/azure-vnet-cni-linux-amd64-v1.1.3.tgz\",\n\t\t\t\tVnetCNIWindowsPluginsDownloadURL: \"https://acs-mirror.azureedge.net/azure-cni/v1.1.3/binaries/azure-vnet-cni-singletenancy-windows-amd64-v1.1.3.zip\",\n\t\t\t\tContainerdDownloadURLBase: \"https://storage.googleapis.com/cri-containerd-release/\",\n\t\t\t\tCSIProxyDownloadURL: \"https://acs-mirror.azureedge.net/csi-proxy/v0.1.0/binaries/csi-proxy.tar.gz\",\n\t\t\t\tWindowsProvisioningScriptsPackageURL: \"https://acs-mirror.azureedge.net/aks-engine/windows/provisioning/signedscripts-v0.2.2.zip\",\n\t\t\t\tWindowsPauseImageURL: \"mcr.microsoft.com/oss/kubernetes/pause:1.4.0\",\n\t\t\t\tAlwaysPullWindowsPauseImage: false,\n\t\t\t\tCseScriptsPackageURL: \"https://acs-mirror.azureedge.net/aks/windows/cse/csescripts-v0.0.1.zip\",\n\t\t\t\tCNIARM64PluginsDownloadURL: \"https://acs-mirror.azureedge.net/cni-plugins/v0.8.7/binaries/cni-plugins-linux-arm64-v0.8.7.tgz\",\n\t\t\t\tVnetCNIARM64LinuxPluginsDownloadURL: \"https://acs-mirror.azureedge.net/azure-cni/v1.4.13/binaries/azure-vnet-cni-linux-arm64-v1.4.14.tgz\",\n\t\t\t},\n\t\t\tEndpointConfig: datamodel.AzureEndpointConfig{\n\t\t\t\tResourceManagerVMDNSSuffix: \"cloudapp.azure.com\",\n\t\t\t},\n\t\t\tOSImageConfig: map[datamodel.Distro]datamodel.AzureOSImageConfig(nil),\n\t\t},\n\t\tK8sComponents: &datamodel.K8sComponents{\n\t\t\tPodInfraContainerImageURL: \"mcr.microsoft.com/oss/kubernetes/pause:3.6\",\n\t\t\tHyperkubeImageURL: \"mcr.microsoft.com/oss/kubernetes/\",\n\t\t\tWindowsPackageURL: \"windowspackage\",\n\t\t},\n\t\tAgentPoolProfile: &datamodel.AgentPoolProfile{\n\t\t\tName: \"nodepool2\",\n\t\t\tVMSize: \"Standard_DS1_v2\",\n\t\t\tKubeletDiskType: \"\",\n\t\t\tWorkloadRuntime: \"\",\n\t\t\tDNSPrefix: \"\",\n\t\t\tOSType: \"Linux\",\n\t\t\tPorts: nil,\n\t\t\tAvailabilityProfile: \"VirtualMachineScaleSets\",\n\t\t\tStorageProfile: \"ManagedDisks\",\n\t\t\tVnetSubnetID: \"\",\n\t\t\tDistro: \"aks-ubuntu-containerd-18.04-gen2\",\n\t\t\tCustomNodeLabels: map[string]string{\n\t\t\t\t\"kubernetes.azure.com/mode\": \"system\",\n\t\t\t\t\"kubernetes.azure.com/node-image-version\": \"AKSUbuntu-1804gen2containerd-2022.01.19\",\n\t\t\t},\n\t\t\tPreprovisionExtension: nil,\n\t\t\tKubernetesConfig: &datamodel.KubernetesConfig{\n\t\t\t\tKubernetesImageBase: \"\",\n\t\t\t\tMCRKubernetesImageBase: \"\",\n\t\t\t\tClusterSubnet: \"\",\n\t\t\t\tNetworkPolicy: \"\",\n\t\t\t\tNetworkPlugin: \"\",\n\t\t\t\tNetworkMode: \"\",\n\t\t\t\tContainerRuntime: \"containerd\",\n\t\t\t\tMaxPods: 0,\n\t\t\t\tDockerBridgeSubnet: \"\",\n\t\t\t\tDNSServiceIP: \"\",\n\t\t\t\tServiceCIDR: \"\",\n\t\t\t\tUseManagedIdentity: false,\n\t\t\t\tUserAssignedID: \"\",\n\t\t\t\tUserAssignedClientID: \"\",\n\t\t\t\tCustomHyperkubeImage: \"\",\n\t\t\t\tCustomKubeProxyImage: \"\",\n\t\t\t\tCustomKubeBinaryURL: \"\",\n\t\t\t\tMobyVersion: \"\",\n\t\t\t\tContainerdVersion: \"\",\n\t\t\t\tWindowsNodeBinariesURL: \"\",\n\t\t\t\tWindowsContainerdURL: \"\",\n\t\t\t\tWindowsSdnPluginURL: \"\",\n\t\t\t\tUseInstanceMetadata: nil,\n\t\t\t\tEnableRbac: nil,\n\t\t\t\tEnableSecureKubelet: nil,\n\t\t\t\tPrivateCluster: nil,\n\t\t\t\tGCHighThreshold: 0,\n\t\t\t\tGCLowThreshold: 0,\n\t\t\t\tEnableEncryptionWithExternalKms: nil,\n\t\t\t\tAddons: nil,\n\t\t\t\tContainerRuntimeConfig: map[string]string(nil),\n\t\t\t\tControllerManagerConfig: map[string]string(nil),\n\t\t\t\tSchedulerConfig: map[string]string(nil),\n\t\t\t\tCloudProviderBackoffMode: \"\",\n\t\t\t\tCloudProviderBackoff: nil,\n\t\t\t\tCloudProviderBackoffRetries: 0,\n\t\t\t\tCloudProviderBackoffJitter: 0.0,\n\t\t\t\tCloudProviderBackoffDuration: 0,\n\t\t\t\tCloudProviderBackoffExponent: 0.0,\n\t\t\t\tCloudProviderRateLimit: nil,\n\t\t\t\tCloudProviderRateLimitQPS: 0.0,\n\t\t\t\tCloudProviderRateLimitQPSWrite: 0.0,\n\t\t\t\tCloudProviderRateLimitBucket: 0,\n\t\t\t\tCloudProviderRateLimitBucketWrite: 0,\n\t\t\t\tCloudProviderDisableOutboundSNAT: nil,\n\t\t\t\tNodeStatusUpdateFrequency: \"\",\n\t\t\t\tLoadBalancerSku: \"\",\n\t\t\t\tExcludeMasterFromStandardLB: nil,\n\t\t\t\tAzureCNIURLLinux: \"\",\n\t\t\t\tAzureCNIURLARM64Linux: \"\",\n\t\t\t\tAzureCNIURLWindows: \"\",\n\t\t\t\tMaximumLoadBalancerRuleCount: 0,\n\t\t\t\tPrivateAzureRegistryServer: \"\",\n\t\t\t\tNetworkPluginMode: \"\",\n\t\t\t},\n\t\t\tVnetCidrs: nil,\n\t\t\tWindowsNameVersion: \"\",\n\t\t\tCustomKubeletConfig: nil,\n\t\t\tCustomLinuxOSConfig: nil,\n\t\t\tMessageOfTheDay: \"\",\n\t\t\tNotRebootWindowsNode: nil,\n\t\t\tAgentPoolWindowsProfile: nil,\n\t\t},\n\t\tTenantID: \"\",\n\t\tSubscriptionID: \"\",\n\t\tResourceGroupName: \"\",\n\t\tUserAssignedIdentityClientID: \"\",\n\t\tOSSKU: \"\",\n\t\tConfigGPUDriverIfNeeded: true,\n\t\tDisable1804SystemdResolved: false,\n\t\tEnableGPUDevicePluginIfNeeded: false,\n\t\tEnableKubeletConfigFile: false,\n\t\tEnableNvidia: false,\n\t\tEnableACRTeleportPlugin: false,\n\t\tTeleportdPluginURL: \"\",\n\t\tContainerdVersion: \"\",\n\t\tRuncVersion: \"\",\n\t\tContainerdPackageURL: \"\",\n\t\tRuncPackageURL: \"\",\n\t\tKubeletClientTLSBootstrapToken: nil,\n\t\tFIPSEnabled: false,\n\t\tHTTPProxyConfig: &datamodel.HTTPProxyConfig{\n\t\t\tHTTPProxy: nil,\n\t\t\tHTTPSProxy: nil,\n\t\t\tNoProxy: &[]string{\n\t\t\t\t\"localhost\",\n\t\t\t\t\"127.0.0.1\",\n\t\t\t\t\"168.63.129.16\",\n\t\t\t\t\"169.254.169.254\",\n\t\t\t\t\"10.0.0.0/16\",\n\t\t\t\t\"agentbaker-agentbaker-e2e-t-8ecadf-c82d8251.hcp.eastus.azmk8s.io\",\n\t\t\t},\n\t\t\tTrustedCA: nil,\n\t\t},\n\t\tKubeletConfig: map[string]string{\n\t\t\t\"--address\": \"0.0.0.0\",\n\t\t\t\"--anonymous-auth\": \"false\",\n\t\t\t\"--authentication-token-webhook\": \"true\",\n\t\t\t\"--authorization-mode\": \"Webhook\",\n\t\t\t\"--azure-container-registry-config\": \"/etc/kubernetes/azure.json\",\n\t\t\t\"--cgroups-per-qos\": \"true\",\n\t\t\t\"--client-ca-file\": \"/etc/kubernetes/certs/ca.crt\",\n\t\t\t\"--cloud-config\": \"/etc/kubernetes/azure.json\",\n\t\t\t\"--cloud-provider\": \"azure\",\n\t\t\t\"--cluster-dns\": \"10.0.0.10\",\n\t\t\t\"--cluster-domain\": \"cluster.local\",\n\t\t\t\"--dynamic-config-dir\": \"/var/lib/kubelet\",\n\t\t\t\"--enforce-node-allocatable\": \"pods\",\n\t\t\t\"--event-qps\": \"0\",\n\t\t\t\"--eviction-hard\": \"memory.available<750Mi,nodefs.available<10%,nodefs.inodesFree<5%\",\n\t\t\t\"--feature-gates\": \"RotateKubeletServerCertificate=true\",\n\t\t\t\"--image-gc-high-threshold\": \"85\",\n\t\t\t\"--image-gc-low-threshold\": \"80\",\n\t\t\t\"--keep-terminated-pod-volumes\": \"false\",\n\t\t\t\"--kube-reserved\": \"cpu=100m,memory=1638Mi\",\n\t\t\t\"--kubeconfig\": \"/var/lib/kubelet/kubeconfig\",\n\t\t\t\"--max-pods\": \"110\",\n\t\t\t\"--network-plugin\": \"kubenet\",\n\t\t\t\"--node-status-update-frequency\": \"10s\",\n\t\t\t\"--pod-infra-container-image\": \"mcr.microsoft.com/oss/kubernetes/pause:3.6\",\n\t\t\t\"--pod-manifest-path\": \"/etc/kubernetes/manifests\",\n\t\t\t\"--pod-max-pids\": \"-1\",\n\t\t\t\"--protect-kernel-defaults\": \"true\",\n\t\t\t\"--read-only-port\": \"0\",\n\t\t\t\"--resolv-conf\": \"/run/systemd/resolve/resolv.conf\",\n\t\t\t\"--rotate-certificates\": \"false\",\n\t\t\t\"--streaming-connection-idle-timeout\": \"4h\",\n\t\t\t\"--tls-cert-file\": \"/etc/kubernetes/certs/kubeletserver.crt\",\n\t\t\t\"--tls-cipher-suites\": \"TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256\",\n\t\t\t\"--tls-private-key-file\": \"/etc/kubernetes/certs/kubeletserver.key\",\n\t\t},\n\t\tKubeproxyConfig: map[string]string(nil),\n\t\tEnableRuncShimV2: false,\n\t\tGPUInstanceProfile: \"\",\n\t\tPrimaryScaleSetName: \"\",\n\t\tSIGConfig: datamodel.SIGConfig{\n\t\t\tTenantID: \"tenantID\",\n\t\t\tSubscriptionID: \"subID\",\n\t\t\tGalleries: map[string]datamodel.SIGGalleryConfig{\n\t\t\t\t\"AKSUbuntu\": {\n\t\t\t\t\tGalleryName: \"aksubuntu\",\n\t\t\t\t\tResourceGroup: \"resourcegroup\",\n\t\t\t\t},\n\t\t\t\t\"AKSCBLMariner\": {\n\t\t\t\t\tGalleryName: \"akscblmariner\",\n\t\t\t\t\tResourceGroup: \"resourcegroup\",\n\t\t\t\t},\n\t\t\t\t\"AKSWindows\": {\n\t\t\t\t\tGalleryName: \"AKSWindows\",\n\t\t\t\t\tResourceGroup: \"AKS-Windows\",\n\t\t\t\t},\n\t\t\t\t\"AKSUbuntuEdgeZone\": {\n\t\t\t\t\tGalleryName: \"AKSUbuntuEdgeZone\",\n\t\t\t\t\tResourceGroup: \"AKS-Ubuntu-EdgeZone\",\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\tIsARM64: false,\n\t\tCustomCATrustConfig: nil,\n\t\tDisableUnattendedUpgrades: true,\n\t\tSSHStatus: 0,\n\t\tDisableCustomData: false,\n\t}\n}", "func New(ctx context.Context, clientset client.Clientset, opts Opts) (*MetalLBSpeaker, error) {\n\tctrl, err := newMetalLBSpeaker(ctx, clientset)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tspkr := &MetalLBSpeaker{\n\t\tFencer: fence.Fencer{},\n\t\tspeaker: ctrl,\n\n\t\tannounceLBIP: opts.LoadBalancerIP,\n\t\tannouncePodCIDR: opts.PodCIDR,\n\n\t\tqueue: workqueue.New(),\n\n\t\tservices: make(map[k8s.ServiceID]*slim_corev1.Service),\n\t}\n\n\tgo spkr.run(ctx)\n\n\tlog.Info(\"Started BGP speaker\")\n\n\treturn spkr, nil\n}", "func New(s *session.Session) CodeDeploySpec {\n\treturn CodeDeploySpec{\n\t\tSession: s,\n\t}\n}", "func (Bootstrapper) Bootstrap(ctx map[string]interface{}) error {\n\tdocService, ok := ctx[documents.BootstrappedDocumentService].(documents.Service)\n\tif !ok {\n\t\treturn errors.New(\"document service not initialized\")\n\t}\n\n\tidService, ok := ctx[identity.BootstrappedDIDService].(identity.Service)\n\tif !ok {\n\t\treturn errors.New(\"identity service not initialised\")\n\t}\n\n\tqueueSrv, ok := ctx[bootstrap.BootstrappedQueueServer].(queue.TaskQueuer)\n\tif !ok {\n\t\treturn errors.New(\"queue hasn't been initialized\")\n\t}\n\n\tjobManager, ok := ctx[jobs.BootstrappedService].(jobs.Manager)\n\tif !ok {\n\t\treturn errors.New(\"transactions repository not initialised\")\n\t}\n\n\tclient := ethereum.GetClient()\n\toracleSrv := newService(\n\t\tdocService,\n\t\tidService,\n\t\tclient,\n\t\tqueueSrv,\n\t\tjobManager)\n\tctx[BootstrappedOracleService] = oracleSrv\n\treturn nil\n}", "func NewBootstrapper(ctx context.Context, h lp2phost.Host, d lp2pnet.Dialer, r lp2prouting.Routing, conf *BootstrapConfig, logger *logger.Logger) *Bootstrapper {\n\tb := &Bootstrapper{\n\t\tctx: ctx,\n\t\tconfig: conf,\n\t\thost: h,\n\t\tdialer: d,\n\t\trouting: r,\n\t\tlogger: logger,\n\t}\n\n\taddresses, err := PeerAddrsToAddrInfo(conf.Addresses)\n\tif err != nil {\n\t\tb.logger.Panic(\"couldn't parse bootstrap addresses\", \"addressed\", conf.Addresses)\n\t}\n\n\tb.bootstrapPeers = addresses\n\tb.checkConnectivity()\n\n\treturn b\n}", "func expectedNewInstance(jobID, datasetID string) *dataset.NewInstance {\n\tnewInstance := &dataset.NewInstance{\n\t\tLinks: &dataset.Links{\n\t\t\tDataset: dataset.Link{\n\t\t\t\tURL: \"http://localhost:22000/datasets/\" + datasetID,\n\t\t\t\tID: datasetID,\n\t\t\t},\n\t\t\tJob: dataset.Link{\n\t\t\t\tURL: \"http://import-api/jobs/\" + jobID,\n\t\t\t\tID: jobID,\n\t\t\t},\n\t\t},\n\t\tDimensions: []dataset.CodeList{},\n\t\tImportTasks: &dataset.InstanceImportTasks{\n\t\t\tImportObservations: &dataset.ImportObservationsTask{\n\t\t\t\tState: dataset.StateCreated.String(),\n\t\t\t},\n\t\t\tBuildHierarchyTasks: []*dataset.BuildHierarchyTask{},\n\t\t\tBuildSearchIndexTasks: []*dataset.BuildSearchIndexTask{},\n\t\t},\n\t\tType: \"cantabular_blob\",\n\t}\n\tif datasetID == \"dataset1\" {\n\t\tnewInstance.Dimensions = []dataset.CodeList{{ID: \"codelist11\"}, {ID: \"codelist12\"}}\n\t\tnewInstance.LowestGeography = \"lowest_geo\"\n\t} else if datasetID == \"dataset2\" {\n\t\tnewInstance.Dimensions = []dataset.CodeList{{ID: \"codelist21\"}, {ID: \"codelist22\"}, {ID: \"codelist23\"}}\n\t}\n\treturn newInstance\n}", "func newDeployment(t *testing.T, procUpdates func(ProcessUpdate), kubeClient kubernetes.Interface) *Deployment {\n\tcompList, err := config.NewComponentList(\"../test/data/componentlist.yaml\")\n\tassert.NoError(t, err)\n\tconfig := &config.Config{\n\t\tCancelTimeout: cancelTimeout,\n\t\tQuitTimeout: quitTimeout,\n\t\tBackoffInitialIntervalSeconds: 1,\n\t\tBackoffMaxElapsedTimeSeconds: 1,\n\t\tLog: logger.NewLogger(true),\n\t\tComponentList: compList,\n\t}\n\tcore := newCore(config, &overrides.Builder{}, kubeClient, procUpdates)\n\treturn &Deployment{core}\n}", "func (ans *answer) setBootstrap(c capnp.Client) error {\n\tif ans.ret.HasResults() || len(ans.ret.Message().CapTable) > 0 {\n\t\tpanic(\"setBootstrap called after creating results\")\n\t}\n\t// Add the capability to the table early to avoid leaks if setBootstrap fails.\n\tans.ret.Message().CapTable = []capnp.Client{c}\n\n\tvar err error\n\tans.results, err = ans.ret.NewResults()\n\tif err != nil {\n\t\treturn rpcerr.WrapFailed(\"alloc bootstrap results\", err)\n\t}\n\tiface := capnp.NewInterface(ans.results.Segment(), 0)\n\tif err := ans.results.SetContent(iface.ToPtr()); err != nil {\n\t\treturn rpcerr.WrapFailed(\"alloc bootstrap results\", err)\n\t}\n\treturn nil\n}", "func (rm *resourceManager) newDescribeRequestPayload(\n\tr *resource,\n) (*svcsdk.DescribeModelBiasJobDefinitionInput, error) {\n\tres := &svcsdk.DescribeModelBiasJobDefinitionInput{}\n\n\tif r.ko.Spec.JobDefinitionName != nil {\n\t\tres.SetJobDefinitionName(*r.ko.Spec.JobDefinitionName)\n\t}\n\n\treturn res, nil\n}", "func NewPipelineSpec(spec *pipeline.Spec) PipelineSpec {\n\treturn PipelineSpec{\n\t\tID: spec.ID,\n\t\tDotDAGSource: spec.DotDagSource,\n\t}\n}", "func NewFramework(baseName string) *Framework {\n\tginkgo.By(\"Init framework\")\n\tf := &Framework{\n\t\tBaseName: baseName,\n\t\tContext: context.Background(),\n\t\tSelectorKey: selectorKey,\n\t}\n\n\tginkgo.BeforeEach(f.BeforeEach)\n\tginkgo.AfterEach(f.AfterEach)\n\n\treturn f\n}", "func (m *Master) NewBootstrapController() *Controller {\n\treturn &Controller{\n\t\tNamespaceRegistry: m.namespaceRegistry,\n\t\tServiceRegistry: m.serviceRegistry,\n\t\tMasterCount: m.MasterCount,\n\n\t\tEndpointRegistry: m.endpointRegistry,\n\t\tEndpointInterval: 10 * time.Second,\n\n\t\tServiceClusterIPRegistry: m.serviceClusterIPAllocator,\n\t\tServiceClusterIPRange: m.ServiceClusterIPRange,\n\t\tServiceClusterIPInterval: 3 * time.Minute,\n\n\t\tServiceNodePortRegistry: m.serviceNodePortAllocator,\n\t\tServiceNodePortRange: m.ServiceNodePortRange,\n\t\tServiceNodePortInterval: 3 * time.Minute,\n\n\t\tPublicIP: m.ClusterIP,\n\n\t\tServiceIP: m.ServiceReadWriteIP,\n\t\tServicePort: m.ServiceReadWritePort,\n\t\tExtraServicePorts: m.ExtraServicePorts,\n\t\tExtraEndpointPorts: m.ExtraEndpointPorts,\n\t\tPublicServicePort: m.PublicReadWritePort,\n\t\tKubernetesServiceNodePort: m.KubernetesServiceNodePort,\n\t}\n}", "func (m *messagingSuite) TestProbeBeforeBootstrap() {\n\tvar (\n\t\tserverAddr1 = m.addr\n\t\tserverAddr2 = newaddr(freeport.MustNext())\n\t\tnodeID1 = api.NewNodeId()\n\t\tnodeID2 = api.NewNodeId()\n\t\tsettings2 = transport.DefaultServerSettings(api.NewNode(serverAddr2, nil))\n\t\trpcServer = &transport.Server{\n\t\t\tConfig: &settings2,\n\t\t}\n\t\trequire = m.Require()\n\t\tview = NewView(m.k, nil, nil)\n\t)\n\trequire.NoError(rpcServer.Init())\n\trequire.NoError(rpcServer.Start())\n\trequire.NoError(view.RingAdd(m.ctx, serverAddr1, nodeID1))\n\trequire.NoError(view.RingAdd(m.ctx, serverAddr2, nodeID2))\n\tm.createAndStartMembershipService(\"server-0\", serverAddr1, view)\n\n\tjoinerClient := transport.NewGRPCClient(&settings2.Settings, grpc.WithInsecure())\n\n\tprobeResp1, err := joinerClient.Do(m.ctx, serverAddr1, probeRequest())\n\trequire.NoError(err)\n\trequire.Equal(remoting.NodeStatus_OK, probeResp1.GetProbeResponse().GetStatus())\n\n\tprobeResp2, err := joinerClient.Do(m.ctx, serverAddr2, probeRequest())\n\trequire.NoError(err)\n\trequire.Equal(remoting.NodeStatus_BOOTSTRAPPING, probeResp2.GetProbeResponse().GetStatus())\n}", "func WorkloadNew(homeDirectory string, org string) {\n\n\t// Verify that env vars are set properly and determine the working directory.\n\tdir, err := VerifyEnvironment(homeDirectory, false, false, \"\")\n\tif err != nil {\n\t\tcliutils.Fatal(cliutils.CLI_INPUT_ERROR, \"'%v %v' %v\", WORKLOAD_COMMAND, WORKLOAD_CREATION_COMMAND, err)\n\t}\n\n\tif org == \"\" && os.Getenv(DEVTOOL_HZN_ORG) == \"\" {\n\t\tcliutils.Fatal(cliutils.CLI_INPUT_ERROR, \"'%v %v' must specify either --org or set the %v environment variable.\", WORKLOAD_COMMAND, WORKLOAD_CREATION_COMMAND, DEVTOOL_HZN_ORG)\n\t}\n\n\t// Create the working directory.\n\tif err := CreateWorkingDir(dir); err != nil {\n\t\tcliutils.Fatal(cliutils.CLI_INPUT_ERROR, \"'%v %v' %v\", WORKLOAD_COMMAND, WORKLOAD_CREATION_COMMAND, err)\n\t}\n\n\t// If there are any horizon metadata files already in the directory then we wont create any files.\n\tcmd := fmt.Sprintf(\"%v %v\", WORKLOAD_COMMAND, WORKLOAD_CREATION_COMMAND)\n\tFileNotExist(dir, cmd, USERINPUT_FILE, UserInputExists)\n\tFileNotExist(dir, cmd, WORKLOAD_DEFINITION_FILE, WorkloadDefinitionExists)\n\t//FileNotExist(dir, cmd, DEPENDENCIES_FILE, DependenciesExists)\n\n\tif org == \"\" {\n\t\torg = os.Getenv(DEVTOOL_HZN_ORG)\n\t}\n\n\t// Create the metadata files.\n\tif err := CreateUserInputs(dir, true, false, org); err != nil {\n\t\tcliutils.Fatal(cliutils.CLI_GENERAL_ERROR, \"'%v %v' %v\", WORKLOAD_COMMAND, WORKLOAD_CREATION_COMMAND, err)\n\t} else if err := CreateWorkloadDefinition(dir, org); err != nil {\n\t\tcliutils.Fatal(cliutils.CLI_GENERAL_ERROR, \"'%v %v' %v\", WORKLOAD_COMMAND, WORKLOAD_CREATION_COMMAND, err)\n\t}\n\t// } else if err := CreateDependencies(dir); err != nil {\n\t// \tcliutils.Fatal(cliutils.CLI_GENERAL_ERROR, \"'%v %v' %v\", WORKLOAD_COMMAND, WORKLOAD_CREATION_COMMAND, err)\n\t// }\n\n\tfmt.Printf(\"Created horizon metadata files in %v. Edit these files to define and configure your new %v.\\n\", dir, WORKLOAD_COMMAND)\n\n}", "func drive(t *testing.T, jobName string, etcds []string, ntask uint64, taskBuilder meritop.TaskBuilder) {\n\tbootstrap := framework.NewBootStrap(jobName, etcds, createListener(t), nil)\n\tbootstrap.SetTaskBuilder(taskBuilder)\n\tbootstrap.SetTopology(example.NewTreeTopology(2, ntask))\n\tbootstrap.Start()\n}", "func testSpec1() *v1.EnvironmentSpec {\n\treturn &v1.EnvironmentSpec{\n\t\tInfra: v1.InfraSpec{\n\t\t\tAZ: v1.AZSpec{\n\t\t\t\tSubscription: []v1.AZSubscription{\n\t\t\t\t\t{Name: \"dummy\", ID: \"12345\"},\n\t\t\t\t},\n\t\t\t},\n\t\t\tSource: v1.SourceSpec{\n\t\t\t\tType: \"local\",\n\t\t\t\tURL: \"../e2e/testdata/terraform\", // relative to dir containing this _test.go file.\n\t\t\t},\n\t\t\tMain: \"main.tf\",\n\t\t},\n\t\tDefaults: v1.ClusterSpec{\n\t\t\tInfra: v1.ClusterInfraSpec{\n\t\t\t\tX: map[string]string{\n\t\t\t\t\t\"overridden\": \"default\",\n\t\t\t\t\t\"notOverridden\": \"default\",\n\t\t\t\t},\n\t\t\t},\n\t\t\tAddons: v1.ClusterAddonSpec{\n\t\t\t\tSource: v1.SourceSpec{\n\t\t\t\t\tType: \"local\",\n\t\t\t\t\tURL: \"../e2e/testdata/addons\", // relative to dir containing this _test.go file.\n\t\t\t\t},\n\t\t\t\tJobs: []string{\n\t\t\t\t\t\"cluster/local/minikube/all.yaml\",\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\tClusters: []v1.ClusterSpec{\n\t\t\t{\n\t\t\t\tName: \"cpe\",\n\t\t\t\tInfra: v1.ClusterInfraSpec{\n\t\t\t\t\tX: map[string]string{\n\t\t\t\t\t\t\"overridden\": \"cpe-cluster\",\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t}, {\n\t\t\t\tName: \"second\",\n\t\t\t\tInfra: v1.ClusterInfraSpec{\n\t\t\t\t\tX: map[string]string{\n\t\t\t\t\t\t\"overridden\": \"second-cluster\",\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n}", "func newMock(deps mockDependencies, t testing.TB) (Component, error) {\n\tbackupConfig := config.NewConfig(\"\", \"\", strings.NewReplacer())\n\tbackupConfig.CopyConfig(config.Datadog)\n\n\tconfig.Datadog.CopyConfig(config.NewConfig(\"mock\", \"XXXX\", strings.NewReplacer()))\n\n\tconfig.SetFeatures(t, deps.Params.Features...)\n\n\t// call InitConfig to set defaults.\n\tconfig.InitConfig(config.Datadog)\n\tc := &cfg{\n\t\tConfig: config.Datadog,\n\t}\n\n\tif !deps.Params.SetupConfig {\n\n\t\tif deps.Params.ConfFilePath != \"\" {\n\t\t\tconfig.Datadog.SetConfigType(\"yaml\")\n\t\t\terr := config.Datadog.ReadConfig(strings.NewReader(deps.Params.ConfFilePath))\n\t\t\tif err != nil {\n\t\t\t\t// The YAML was invalid, fail initialization of the mock config.\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t}\n\t} else {\n\t\twarnings, _ := setupConfig(deps)\n\t\tc.warnings = warnings\n\t}\n\n\t// Overrides are explicit and will take precedence over any other\n\t// setting\n\tfor k, v := range deps.Params.Overrides {\n\t\tconfig.Datadog.Set(k, v)\n\t}\n\n\t// swap the existing config back at the end of the test.\n\tt.Cleanup(func() { config.Datadog.CopyConfig(backupConfig) })\n\n\treturn c, nil\n}", "func (backend *TestHTTPBackend) Bootstrap() error {\n\t_, err := backend.Server.JustListen()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tbackend.RealPort = backend.Server.RealPort\n\tbackend.RealAddr = backend.Server.RealAddr\n\n\tbackend.WaitGroup.Add(1)\n\tgo backend.Server.JustServe(backend.WaitGroup)\n\treturn nil\n}", "func TestNew_noMetaOnInit(t *testing.T) {\n\tt.Parallel()\n\n\ttmpDir := t.TempDir()\n\tbucket, err := fileblob.OpenBucket(tmpDir, nil)\n\trequire.NoError(t, err)\n\trequire.NoError(t,\n\t\tbucket.WriteAll(context.Background(), \".pulumi/stacks/dev.json\", []byte(\"bar\"), nil))\n\n\tctx := context.Background()\n\t_, err = New(ctx, diagtest.LogSink(t), \"file://\"+filepath.ToSlash(tmpDir), nil)\n\trequire.NoError(t, err)\n\n\tassert.NoFileExists(t, filepath.Join(tmpDir, \".pulumi\", \"meta.yaml\"))\n}", "func (Bootstrapper) Bootstrap(context map[string]interface{}) error {\n\tcfg, err := configstore.RetrieveConfig(false, context)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\ttxManager, ok := context[jobs.BootstrappedService].(jobs.Manager)\n\tif !ok {\n\t\treturn errors.New(\"transactions repository not initialised\")\n\t}\n\n\tif _, ok := context[bootstrap.BootstrappedQueueServer]; !ok {\n\t\treturn errors.New(\"queue hasn't been initialized\")\n\t}\n\tqueueSrv := context[bootstrap.BootstrappedQueueServer].(*queue.Server)\n\n\tsapi, err := gsrpc.NewSubstrateAPI(cfg.GetCentChainNodeURL())\n\tif err != nil {\n\t\treturn err\n\t}\n\tcentSAPI := &defaultSubstrateAPI{sapi}\n\tclient := NewAPI(centSAPI, cfg, queueSrv)\n\textStatusTask := NewExtrinsicStatusTask(cfg.GetCentChainIntervalRetry(), cfg.GetCentChainMaxRetries(), txManager, centSAPI.GetBlockHash, centSAPI.GetBlock, centSAPI.GetMetadataLatest, centSAPI.GetStorage)\n\tqueueSrv.RegisterTaskType(extStatusTask.TaskTypeName(), extStatusTask)\n\tcontext[BootstrappedCentChainClient] = client\n\n\treturn nil\n}", "func (sm SchedulerModel) createContainerSpec(name string, image string, env map[string]string) (containers []v1.Container) {\n\tenvVars := []v1.EnvVar{}\n\n\tfor envName, envVal := range env {\n\t\tenvVars = append(envVars, v1.EnvVar{\n\t\t\tName: envName,\n\t\t\tValue: envVal,\n\t\t})\n\t}\n\n\tcontainer := v1.Container{\n\t\tName: name,\n\t\tImage: image,\n\t\tEnv: envVars,\n\t}\n\n\treturn []v1.Container{container}\n}", "func NewGetBootstrapParams() *GetBootstrapParams {\n\tvar ()\n\treturn &GetBootstrapParams{\n\n\t\ttimeout: cr.DefaultTimeout,\n\t}\n}", "func New(id string, runtime *runsc.Runsc, stdio stdio.Stdio) *Init {\n\tp := &Init{\n\t\tid: id,\n\t\truntime: runtime,\n\t\tstdio: stdio,\n\t\tstatus: 0,\n\t\twaitBlock: make(chan struct{}),\n\t}\n\tp.initState = &createdState{p: p}\n\treturn p\n}", "func BuildPodTemplateSpec(experiment *ExperimentDetails) *podtemplatespec.Builder {\n\tpodtemplate := podtemplatespec.NewBuilder().\n\t\tWithName(experiment.JobName).\n\t\tWithNamespace(experiment.Namespace).\n\t\tWithLabels(experiment.ExpLabels).\n\t\tWithServiceAccountName(experiment.SvcAccount).\n\t\tWithRestartPolicy(corev1.RestartPolicyOnFailure)\n\n\t// Add VolumeBuilders, if exists\n\tif experiment.VolumeOpts.VolumeBuilders != nil {\n\t\tlog.Info(\"Building Pod with VolumeBuilders\")\n\t\t//log.Info(volumeBuilders)\n\t\tpodtemplate.WithVolumeBuilders(experiment.VolumeOpts.VolumeBuilders)\n\t}\n\n\t_, err := podtemplate.Build()\n\n\tif err != nil {\n\t\tlog.Info(err)\n\t}\n\treturn podtemplate\n}", "func NewBootstrapClient() *BootstrapClient {\n\tconfig := api.DefaultConfig()\n\tconfig.ReadEnvironment()\n\tvaultClient, err := api.NewClient(config)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tpostgresName := os.Getenv(EnvVaultPostgresName)\n\tif postgresName == \"\" {\n\t\tpostgresName = os.Getenv(EnvServiceName)\n\t}\n\tmongoClusterName := os.Getenv(EnvVaultMongoClusterName)\n\tif mongoClusterName == \"\" {\n\t\tmongoClusterName = os.Getenv(EnvServiceName)\n\t}\n\tawsRegion := os.Getenv(EnvVaultAWSRegion)\n\tif awsRegion == \"\" {\n\t\tawsRegion = \"eu-west-1\"\n\t}\n\n\tbootstrapClient := &BootstrapClient{\n\t\tVaultClient: vaultClient,\n\t\tBootstrapConfig: BootstrapConfig{\n\t\t\tEnv: os.Getenv(EnvEnv),\n\t\t\tServiceName: os.Getenv(EnvServiceName),\n\t\t\tVaultRootToken: os.Getenv(EnvVaultRootToken),\n\t\t\tAuthTokenTTL: \"20m\",\n\t\t\tAuthTokenMaxTTL: \"30m\",\n\t\t\tPostgresName: postgresName,\n\t\t\tPostgresRootConnURL: os.Getenv(EnvVaultPostgresRootURL),\n\t\t\tPostgresRoleLeaseTTL: \"1h\",\n\t\t\tPostgresRoleLeaseMaxTTL: \"24h\",\n\t\t\tMongoClusterName: mongoClusterName,\n\t\t\tMongoRootConnURL: os.Getenv(EnvVaultMongoRootURL),\n\t\t\tMongoRoleLeaseTTL: \"1h\",\n\t\t\tMongoRoleLeaseMaxTTL: \"24h\",\n\t\t\tTLSCertName: os.Getenv(EnvVaultTLSCertName),\n\t\t\tTLSCertLeaseTTL: \"24h\",\n\t\t\tAWSRegion: awsRegion,\n\t\t\tAWSRootAccessKey: os.Getenv(EnvVaultAWSRootAccessKey),\n\t\t\tAWSRootSecretKey: os.Getenv(EnvVaultAWSRootSecretKey),\n\t\t\tAWSRoleName: os.Getenv(EnvVaultAWSRoleName),\n\t\t\tAWSAssumedRole: os.Getenv(EnvVaultAWSAssumeRole),\n\t\t},\n\t\tconfig: internalBootstrapConfig{},\n\t}\n\tcertData, err := ioutil.ReadFile(os.Getenv(EnvTLSCertFilePath))\n\tif err == nil {\n\t\tbootstrapClient.BootstrapConfig.TLSCertData = string(certData)\n\t}\n\tcertKeyData, err := ioutil.ReadFile(os.Getenv(EnvTLSCertKeyFilePath))\n\tif err == nil {\n\t\tbootstrapClient.BootstrapConfig.TLSCertKeyData = string(certKeyData)\n\t}\n\tgenericDataJSON, err := ioutil.ReadFile(os.Getenv(EnvGenericDataPath))\n\tif err == nil {\n\t\tbootstrapClient.BootstrapConfig.GenericData = string(genericDataJSON)\n\t}\n\ttransitDataJSON, err := ioutil.ReadFile(os.Getenv(EnvTransitDataPath))\n\tif err == nil {\n\t\tbootstrapClient.BootstrapConfig.TransitData = string(transitDataJSON)\n\t}\n\tawsRolePolicyJSONFilename := os.Getenv(EnvVaultAWSRolePolicyJSON)\n\tif awsRolePolicyJSONFilename != \"\" {\n\t\tawsRolePolicyJSON, err := ioutil.ReadFile(awsRolePolicyJSONFilename)\n\t\tif err == nil {\n\t\t\tbootstrapClient.BootstrapConfig.AWSRolePolicyJSON = string(awsRolePolicyJSON)\n\t\t} else {\n\t\t\tfmt.Println(fmt.Errorf(\"Error %v\", err))\n\t\t}\n\t}\n\n\treturn bootstrapClient\n}", "func NewBootstrapResult() BootstrapResult {\n\treturn &bootstrapResult{\n\t\tresults: make(ShardResults),\n\t\tunfulfilled: make(ShardTimeRanges),\n\t}\n}", "func MakeSpec(\n\tconn, unique string,\n\tneedsUpdate func(db.Specifier, db.Specifier) bool,\n\tnewDBSpec db.Specifier,\n\tnewDBFunc DBMaker,\n\tnewDBError error,\n\tupdateFunc Updater,\n\tupdateErr error,\n) db.Specifier {\n\treturn &Spec{\n\t\tConn: conn,\n\t\tUnique: unique,\n\t\tUpdateNeeded: needsUpdate,\n\t\tNewDBSpec: newDBSpec,\n\t\tNewDBFunc: newDBFunc,\n\t\tNewDBError: newDBError,\n\t\tUpdateFunc: updateFunc,\n\t\tUpdateErr: updateErr,\n\t}\n}", "func newLabelStage(logger log.Logger, configs interface{}) (*labelStage, error) {\n\tcfgs := &LabelsConfig{}\n\terr := mapstructure.Decode(configs, cfgs)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\terr = validateLabelsConfig(*cfgs)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &labelStage{\n\t\tcfgs: *cfgs,\n\t\tlogger: logger,\n\t}, nil\n}", "func NewBootstrapActivity(awsSessionFactory *AWSSessionFactory) *BootstrapActivity {\n\treturn &BootstrapActivity{\n\t\tawsSessionFactory: awsSessionFactory,\n\t}\n}", "func newTaskBuilder(b *jobBuilder, name string) *taskBuilder {\n\tparts, err := b.jobNameSchema.ParseJobName(name)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\treturn &taskBuilder{\n\t\tjobBuilder: b,\n\t\tparts: parts,\n\t\tName: name,\n\t\tSpec: &specs.TaskSpec{},\n\t\trecipeProperties: map[string]string{},\n\t}\n}", "func (cli *FakeDatabaseClient) BootstrapDatabase(ctx context.Context, in *dbdpb.BootstrapDatabaseRequest, opts ...grpc.CallOption) (*dbdpb.BootstrapDatabaseResponse, error) {\n\tpanic(\"implement me\")\n}", "func (*Bootstrap) Descriptor() ([]byte, []int) {\n\treturn file_job_handleim_internal_conf_conf_proto_rawDescGZIP(), []int{0}\n}", "func NewProbe(nsProbe *ns.Probe, lxdURL string) (*Probe, error) {\n\tprobe := &Probe{\n\t\tProbe: nsProbe,\n\t\tstate: common.StoppedState,\n\t\tcontainerMap: make(map[string]containerInfo),\n\t\tquit: make(chan struct{}),\n\t}\n\n\treturn probe, nil\n}", "func newChunk(uniqifier int) *chunkBuilder {\n\tchunkID := sha256.Sum256([]byte(fmt.Sprintf(\"chunk-%v\", uniqifier)))\n\tobjectID := sha256.Sum256([]byte(fmt.Sprintf(\"object-%v\", uniqifier)))\n\tconfig, err := compiledcfg.NewConfig(&configpb.ProjectConfig{\n\t\tLastUpdated: timestamppb.New(time.Date(2022, time.January, 1, 0, 0, 0, 0, time.UTC)),\n\t})\n\tif err != nil {\n\t\t// This should never occur, as the config should be valid.\n\t\tpanic(err)\n\t}\n\treturn &chunkBuilder{\n\t\tproject: \"testproject\",\n\t\tchunkID: hex.EncodeToString(chunkID[:16]),\n\t\tobjectID: hex.EncodeToString(objectID[:16]),\n\t\truleset: cache.NewRuleset(\"\", nil, rules.StartingVersion, time.Time{}),\n\t\tconfig: config,\n\t\toldAlgorithms: false,\n\t}\n}", "func TestNewProbeInfoBuilder(t *testing.T) {\n\tassert := assert.New(t)\n\tprobeType := rand.String(6)\n\tprobeCat := rand.String(7)\n\tvar supplyCS []*sdk.TemplateDTO\n\tvar acctDef []*AccountDefEntry\n\tprobeInfoBldr := NewProbeInfoBuilder(probeType, probeCat, supplyCS, acctDef)\n\tassert.Equal(probeType, *probeInfoBldr.probeInfo.ProbeType)\n}", "func NewCmdInit(parentCommand string) *cobra.Command {\n\topts := kubernetes.CommandInitOption{}\n\tcmd := &cobra.Command{\n\t\tUse: \"init\",\n\t\tShort: \"Install the Karmada control plane in a Kubernetes cluster\",\n\t\tLong: initLong,\n\t\tExample: initExample(parentCommand),\n\t\tSilenceUsage: true,\n\t\tDisableFlagsInUseLine: true,\n\t\tRunE: func(cmd *cobra.Command, args []string) error {\n\t\t\tif err := opts.Validate(parentCommand); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tif err := opts.Complete(); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tif err := opts.RunInit(parentCommand); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\treturn nil\n\t\t},\n\t\tArgs: func(cmd *cobra.Command, args []string) error {\n\t\t\tfor _, arg := range args {\n\t\t\t\tif len(arg) > 0 {\n\t\t\t\t\treturn fmt.Errorf(\"%q does not take any arguments, got %q\", cmd.CommandPath(), args)\n\t\t\t\t}\n\t\t\t}\n\t\t\treturn nil\n\t\t},\n\t\tAnnotations: map[string]string{\n\t\t\tutil.TagCommandGroup: util.GroupClusterRegistration,\n\t\t},\n\t}\n\tflags := cmd.Flags()\n\tflags.StringVarP(&opts.ImageRegistry, \"private-image-registry\", \"\", \"\", \"Private image registry where pull images from. If set, all required images will be downloaded from it, it would be useful in offline installation scenarios. In addition, you still can use --kube-image-registry to specify the registry for Kubernetes's images.\")\n\tflags.StringSliceVar(&opts.PullSecrets, \"image-pull-secrets\", nil, \"Image pull secrets are used to pull images from the private registry, could be secret list separated by comma (e.g '--image-pull-secrets PullSecret1,PullSecret2', the secrets should be pre-settled in the namespace declared by '--namespace')\")\n\t// kube image registry\n\tflags.StringVarP(&opts.KubeImageMirrorCountry, \"kube-image-mirror-country\", \"\", \"\", \"Country code of the kube image registry to be used. For Chinese mainland users, set it to cn\")\n\tflags.StringVarP(&opts.KubeImageRegistry, \"kube-image-registry\", \"\", \"\", \"Kube image registry. For Chinese mainland users, you may use local gcr.io mirrors such as registry.cn-hangzhou.aliyuncs.com/google_containers to override default kube image registry\")\n\tflags.StringVar(&opts.KubeImageTag, \"kube-image-tag\", \"v1.25.4\", \"Choose a specific Kubernetes version for the control plane.\")\n\t// cert\n\tflags.StringVar(&opts.ExternalIP, \"cert-external-ip\", \"\", \"the external IP of Karmada certificate (e.g 192.168.1.2,172.16.1.2)\")\n\tflags.StringVar(&opts.ExternalDNS, \"cert-external-dns\", \"\", \"the external DNS of Karmada certificate (e.g localhost,localhost.com)\")\n\tflags.DurationVar(&opts.CertValidity, \"cert-validity-period\", cert.Duration365d, \"the validity period of Karmada certificate (e.g 8760h0m0s, that is 365 days)\")\n\t// Kubernetes\n\tflags.StringVarP(&opts.Namespace, \"namespace\", \"n\", \"karmada-system\", \"Kubernetes namespace\")\n\tflags.StringVar(&opts.StorageClassesName, \"storage-classes-name\", \"\", \"Kubernetes StorageClasses Name\")\n\tflags.StringVar(&opts.KubeConfig, \"kubeconfig\", \"\", \"absolute path to the kubeconfig file\")\n\tflags.StringVar(&opts.Context, \"context\", \"\", \"The name of the kubeconfig context to use\")\n\tflags.StringVar(&opts.HostClusterDomain, \"host-cluster-domain\", options.DefaultHostClusterDomain, \"The cluster domain of karmada host cluster. (e.g. --host-cluster-domain=host.karmada)\")\n\t// etcd\n\tflags.StringVarP(&opts.EtcdStorageMode, \"etcd-storage-mode\", \"\", \"hostPath\",\n\t\tfmt.Sprintf(\"etcd data storage mode(%s). value is PVC, specify --storage-classes-name\", strings.Join(kubernetes.SupportedStorageMode(), \",\")))\n\tflags.StringVarP(&opts.EtcdImage, \"etcd-image\", \"\", \"\", \"etcd image\")\n\tflags.StringVarP(&opts.EtcdInitImage, \"etcd-init-image\", \"\", kubernetes.DefaultInitImage, \"etcd init container image\")\n\tflags.Int32VarP(&opts.EtcdReplicas, \"etcd-replicas\", \"\", 1, \"etcd replica set, cluster 3,5...singular\")\n\tflags.StringVarP(&opts.EtcdHostDataPath, \"etcd-data\", \"\", \"/var/lib/karmada-etcd\", \"etcd data path,valid in hostPath mode.\")\n\tflags.StringVarP(&opts.EtcdNodeSelectorLabels, \"etcd-node-selector-labels\", \"\", \"\", \"etcd pod select the labels of the node. valid in hostPath mode ( e.g. --etcd-node-selector-labels karmada.io/etcd=true)\")\n\tflags.StringVarP(&opts.EtcdPersistentVolumeSize, \"etcd-pvc-size\", \"\", \"5Gi\", \"etcd data path,valid in pvc mode.\")\n\t// karmada\n\tflags.StringVar(&opts.CRDs, \"crds\", kubernetes.DefaultCrdURL, \"Karmada crds resource.(local file e.g. --crds /root/crds.tar.gz)\")\n\tflags.StringVarP(&opts.KarmadaAPIServerAdvertiseAddress, \"karmada-apiserver-advertise-address\", \"\", \"\", \"The IP address the Karmada API Server will advertise it's listening on. If not set, the address on the master node will be used.\")\n\tflags.Int32VarP(&opts.KarmadaAPIServerNodePort, \"port\", \"p\", 32443, \"Karmada apiserver service node port\")\n\tflags.StringVarP(&opts.KarmadaDataPath, \"karmada-data\", \"d\", \"/etc/karmada\", \"Karmada data path. kubeconfig cert and crds files\")\n\tflags.StringVarP(&opts.KarmadaPkiPath, \"karmada-pki\", \"\", \"/etc/karmada/pki\", \"Karmada pki path. Karmada cert files\")\n\tflags.StringVarP(&opts.KarmadaAPIServerImage, \"karmada-apiserver-image\", \"\", \"\", \"Kubernetes apiserver image\")\n\tflags.Int32VarP(&opts.KarmadaAPIServerReplicas, \"karmada-apiserver-replicas\", \"\", 1, \"Karmada apiserver replica set\")\n\tflags.StringVarP(&opts.KarmadaSchedulerImage, \"karmada-scheduler-image\", \"\", kubernetes.DefaultKarmadaSchedulerImage, \"Karmada scheduler image\")\n\tflags.Int32VarP(&opts.KarmadaSchedulerReplicas, \"karmada-scheduler-replicas\", \"\", 1, \"Karmada scheduler replica set\")\n\tflags.StringVarP(&opts.KubeControllerManagerImage, \"karmada-kube-controller-manager-image\", \"\", \"\", \"Kubernetes controller manager image\")\n\tflags.Int32VarP(&opts.KubeControllerManagerReplicas, \"karmada-kube-controller-manager-replicas\", \"\", 1, \"Karmada kube controller manager replica set\")\n\tflags.StringVarP(&opts.KarmadaControllerManagerImage, \"karmada-controller-manager-image\", \"\", kubernetes.DefaultKarmadaControllerManagerImage, \"Karmada controller manager image\")\n\tflags.Int32VarP(&opts.KarmadaControllerManagerReplicas, \"karmada-controller-manager-replicas\", \"\", 1, \"Karmada controller manager replica set\")\n\tflags.StringVarP(&opts.KarmadaWebhookImage, \"karmada-webhook-image\", \"\", kubernetes.DefualtKarmadaWebhookImage, \"Karmada webhook image\")\n\tflags.Int32VarP(&opts.KarmadaWebhookReplicas, \"karmada-webhook-replicas\", \"\", 1, \"Karmada webhook replica set\")\n\tflags.StringVarP(&opts.KarmadaAggregatedAPIServerImage, \"karmada-aggregated-apiserver-image\", \"\", kubernetes.DefaultKarmadaAggregatedAPIServerImage, \"Karmada aggregated apiserver image\")\n\tflags.Int32VarP(&opts.KarmadaAggregatedAPIServerReplicas, \"karmada-aggregated-apiserver-replicas\", \"\", 1, \"Karmada aggregated apiserver replica set\")\n\tflags.IntVarP(&opts.WaitComponentReadyTimeout, \"wait-component-ready-timeout\", \"\", cmdinitoptions.WaitComponentReadyTimeout, \"Wait for karmada component ready timeout. 0 means wait forever\")\n\treturn cmd\n}", "func newJobRunner(logger *persist.Logger, ant *Ant, siadirectory string, existingWalletSeed string) (*JobRunner, error) {\n\tjr := &JobRunner{\n\t\tstaticLogger: logger,\n\t\tstaticAntsSyncWG: ant.staticAntsSyncWG,\n\t\tstaticAnt: ant,\n\t\tstaticClient: ant.StaticClient,\n\t\tstaticDataDir: ant.Config.DataDir,\n\t}\n\n\t// Get the wallet\n\twg, err := jr.staticClient.WalletGet()\n\tif err != nil {\n\t\treturn nil, errors.AddContext(err, \"can't get wallet info\")\n\t}\n\tif wg.Unlocked && existingWalletSeed == \"\" {\n\t\t// Set the wallet seed in the jobrunner and return. This case happens\n\t\t// when newJobRunner() is called multiple times (by purpose or by\n\t\t// mistake) on the ant.\n\t\twsg, err := jr.staticClient.WalletSeedsGet()\n\t\tif err != nil {\n\t\t\treturn nil, errors.AddContext(err, \"can't get wallet seeds\")\n\t\t}\n\t\tjr.StaticWalletSeed = wsg.PrimarySeed\n\t\treturn jr, nil\n\t}\n\n\t// Init the wallet when needed and save seed\n\tvar checkSeed bool\n\tif existingWalletSeed == \"\" && !wg.Encrypted {\n\t\t// No wallet seed was specified and wallet is encrypted. Initialize a\n\t\t// new wallet.\n\t\tjr.staticLogger.Debugf(\"%v: init wallet\", jr.staticDataDir)\n\t\twalletParams, err := jr.staticClient.WalletInitPost(\"\", false)\n\t\tif err != nil {\n\t\t\ter := errors.AddContext(err, \"can't init wallet\")\n\t\t\tjr.staticLogger.Errorf(\"%v: %v\", jr.staticDataDir, er)\n\t\t\treturn nil, er\n\t\t}\n\t\tjr.StaticWalletSeed = walletParams.PrimarySeed\n\t} else if existingWalletSeed == \"\" && wg.Encrypted {\n\t\t// Nothing to do. Not sure if or when this case can happen.\n\t} else if existingWalletSeed != \"\" && !wg.Encrypted {\n\t\t// A wallet seed was specified, but wallet is not encrypted. Initialize\n\t\t// the wallet with the existing seed.\n\t\tjr.staticLogger.Debugf(\"%v: init wallet using existing seed\", jr.staticDataDir)\n\t\terr := jr.staticClient.WalletInitSeedPost(existingWalletSeed, \"\", false)\n\t\tif err != nil {\n\t\t\ter := errors.AddContext(err, \"can't init wallet using existing seed\")\n\t\t\tjr.staticLogger.Errorf(\"%v: %v\", jr.staticDataDir, er)\n\t\t\treturn nil, er\n\t\t}\n\t\tjr.StaticWalletSeed = existingWalletSeed\n\t} else if existingWalletSeed != \"\" && wg.Encrypted {\n\t\t// A wallet seed was specified, wallet is encrypted. Just save seed.\n\t\t// Executed e.g. during siad upgrade with job runner re-creation.\n\t\tcheckSeed = true\n\t\tjr.staticLogger.Debugf(\"%v: use existing initialized wallet\", jr.staticDataDir)\n\t\tjr.StaticWalletSeed = existingWalletSeed\n\t}\n\n\t// Unlock the wallet\n\terr = jr.staticClient.WalletUnlockPost(jr.StaticWalletSeed)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// Check that actual seed equals existingWalletSeed.\n\tif checkSeed {\n\t\twsg, err := jr.staticClient.WalletSeedsGet()\n\t\tif err != nil {\n\t\t\treturn nil, errors.AddContext(err, \"can't get wallet seeds\")\n\t\t}\n\t\tif wsg.PrimarySeed != existingWalletSeed {\n\t\t\treturn nil, errors.New(\"wallet primary seed doesn't equal expected existing seed\")\n\t\t}\n\t}\n\n\treturn jr, nil\n}" ]
[ "0.5944629", "0.5933648", "0.59169173", "0.56426483", "0.5536578", "0.5498435", "0.54153734", "0.5258806", "0.51931775", "0.5177603", "0.514529", "0.5142841", "0.5117338", "0.5056667", "0.50183445", "0.5017194", "0.49952316", "0.49911648", "0.49311557", "0.49291143", "0.49195227", "0.4914043", "0.490853", "0.48887983", "0.4886003", "0.48783633", "0.48781615", "0.4868", "0.48636082", "0.48544884", "0.4838778", "0.4835153", "0.4820801", "0.48202768", "0.48164973", "0.48152587", "0.47889867", "0.47807205", "0.47725382", "0.47690845", "0.47584617", "0.472289", "0.4719108", "0.47103038", "0.46754596", "0.46674767", "0.46462807", "0.46343607", "0.4624972", "0.4623107", "0.4611991", "0.46074805", "0.46059015", "0.46051937", "0.46049747", "0.45926765", "0.45900524", "0.45862976", "0.45855886", "0.45784348", "0.45765835", "0.45742306", "0.45717925", "0.45618877", "0.45607296", "0.45510912", "0.45446455", "0.45428053", "0.45377824", "0.45367667", "0.45313168", "0.45152798", "0.45132506", "0.45026085", "0.4502101", "0.44932306", "0.44892442", "0.4488963", "0.44696468", "0.44677755", "0.44677183", "0.4451835", "0.44512478", "0.44405532", "0.44340333", "0.442988", "0.44294208", "0.44129786", "0.44057834", "0.440453", "0.440278", "0.4402689", "0.43988243", "0.43987468", "0.43974516", "0.43873876", "0.43762335", "0.43708217", "0.43688592", "0.43609908" ]
0.7841147
0
NewJobResource initializes a new JSONAPI job resource
func NewJobResource(j job.Job) *JobResource { resource := &JobResource{ JAID: NewJAIDInt32(j.ID), Name: j.Name.ValueOrZero(), Type: JobSpecType(j.Type), SchemaVersion: j.SchemaVersion, GasLimit: j.GasLimit, ForwardingAllowed: j.ForwardingAllowed, MaxTaskDuration: j.MaxTaskDuration, PipelineSpec: NewPipelineSpec(j.PipelineSpec), ExternalJobID: j.ExternalJobID, } switch j.Type { case job.DirectRequest: resource.DirectRequestSpec = NewDirectRequestSpec(j.DirectRequestSpec) case job.FluxMonitor: resource.FluxMonitorSpec = NewFluxMonitorSpec(j.FluxMonitorSpec) case job.Cron: resource.CronSpec = NewCronSpec(j.CronSpec) case job.OffchainReporting: resource.OffChainReportingSpec = NewOffChainReportingSpec(j.OCROracleSpec) case job.OffchainReporting2: resource.OffChainReporting2Spec = NewOffChainReporting2Spec(j.OCR2OracleSpec) case job.Keeper: resource.KeeperSpec = NewKeeperSpec(j.KeeperSpec) case job.VRF: resource.VRFSpec = NewVRFSpec(j.VRFSpec) case job.Webhook: resource.WebhookSpec = NewWebhookSpec(j.WebhookSpec) case job.BlockhashStore: resource.BlockhashStoreSpec = NewBlockhashStoreSpec(j.BlockhashStoreSpec) case job.BlockHeaderFeeder: resource.BlockHeaderFeederSpec = NewBlockHeaderFeederSpec(j.BlockHeaderFeederSpec) case job.Bootstrap: resource.BootstrapSpec = NewBootstrapSpec(j.BootstrapSpec) case job.Gateway: resource.GatewaySpec = NewGatewaySpec(j.GatewaySpec) } jes := []JobError{} for _, e := range j.JobSpecErrors { jes = append(jes, NewJobError((e))) } resource.Errors = jes return resource }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func NewJobResource(j job.Job) *JobResource {\n\tresource := &JobResource{\n\t\tJAID: NewJAIDInt32(j.ID),\n\t\tName: j.Name.ValueOrZero(),\n\t\tType: JobSpecType(j.Type),\n\t\tSchemaVersion: j.SchemaVersion,\n\t\tMaxTaskDuration: j.MaxTaskDuration,\n\t\tPipelineSpec: NewPipelineSpec(j.PipelineSpec),\n\t}\n\n\tswitch j.Type {\n\tcase job.DirectRequest:\n\t\tresource.DirectRequestSpec = NewDirectRequestSpec(j.DirectRequestSpec)\n\tcase job.FluxMonitor:\n\t\tresource.FluxMonitorSpec = NewFluxMonitorSpec(j.FluxMonitorSpec)\n\tcase job.OffchainReporting:\n\t\tresource.OffChainReportingSpec = NewOffChainReportingSpec(j.OffchainreportingOracleSpec)\n\tcase job.Keeper:\n\t\tresource.KeeperSpec = NewKeeperSpec(j.KeeperSpec)\n\t}\n\n\tjes := []JobError{}\n\tfor _, e := range j.JobSpecErrors {\n\t\tjes = append(jes, NewJobError((e)))\n\t}\n\tresource.Errors = jes\n\n\treturn resource\n}", "func NewJob(ctx *pulumi.Context,\n\tname string, args *JobArgs, opts ...pulumi.ResourceOption) (*Job, error) {\n\tif args == nil {\n\t\targs = &JobArgs{}\n\t}\n\n\treplaceOnChanges := pulumi.ReplaceOnChanges([]string{\n\t\t\"location\",\n\t\t\"project\",\n\t})\n\topts = append(opts, replaceOnChanges)\n\topts = internal.PkgResourceDefaultOpts(opts)\n\tvar resource Job\n\terr := ctx.RegisterResource(\"google-native:dataflow/v1b3:Job\", name, args, &resource, opts...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &resource, nil\n}", "func new_job(w http.ResponseWriter, req *http.Request) {\n\tfmt.Println(\"Handling connection...\")\n\n\t// Parse the HTTP request.\n\tif err := req.ParseForm(); err != nil {\n\t\tpanic(err)\n\t}\n\n\t// Put the bytes from the request into a file\n\tbuf := new(bytes.Buffer)\n\tbuf.ReadFrom(req.Body)\n\tjobJson := buf.String()\n\n\t// Print out the json.\n\tfmt.Println(jobJson)\n\n\t// Convert string json to job struct\n\tjob := data.JsonToJob([]byte(jobJson))\n\n\t// Run the code and get []byte output\n\toutput := runCode(job.Extension, job.Code, job.FileName)\n\n\t// Send a response back.\n\tw.Write(output)\n}", "func new_job(w http.ResponseWriter, req *http.Request) {\n\tfmt.Println(\"Handling connection...\")\n\n\t// Parse the HTTP request.\n\tif err := req.ParseForm(); err != nil {\n\t\tpanic(err)\n\t}\n\n\t// Put the bytes from the request into a file\n\tbuf := new(bytes.Buffer)\n\tbuf.ReadFrom(req.Body)\n\tjobJson := buf.String()\n\n\t// Convert string json to job struct\n\tjob := data.JsonToJob([]byte(jobJson))\n\n\tvar num *int = nil\n\n\tif job.ParameterEnd >= job.ParameterStart {\n\t\tnum = &job.ParameterStart\n\t}\n\n\tvar args []string\n\tif job.Args[0] != \"NONE\" {\n\t\targs = job.Args\n\t}\n\n\t// Run the code and get []byte output\n\toutput := runCode(job.Extension, job.Code, job.FileName, num, args)\n\n\t// Send a response back.\n\tw.Write(output)\n}", "func (c *Client) CreateNewJob(job *Job) error {\n\tbody, err := json.Marshal(job)\n\tif err != nil {\n\t\tlogrus.WithError(err)\n\t\treturn err\n\t}\n\n\turl := fmt.Sprintf(\"http://%s/api/v1/jobs\", c.options.ServerAddr)\n\tresp, err := http.Post(url, \"application/json\", bytes.NewReader(body))\n\tif err != nil {\n\t\tlogrus.WithError(err)\n\t\treturn err\n\t}\n\tdefer resp.Body.Close()\n\n\tbody, err = ioutil.ReadAll(resp.Body)\n\tif err != nil {\n\t\tlogrus.WithError(err)\n\t}\n\treturn err\n}", "func (c *jobsRESTClient) CreateJob(ctx context.Context, req *runpb.CreateJobRequest, opts ...gax.CallOption) (*CreateJobOperation, error) {\n\tm := protojson.MarshalOptions{AllowPartial: true, UseEnumNumbers: true}\n\tbody := req.GetJob()\n\tjsonReq, err := m.Marshal(body)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tbaseUrl, err := url.Parse(c.endpoint)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tbaseUrl.Path += fmt.Sprintf(\"/v2/%v/jobs\", req.GetParent())\n\n\tparams := url.Values{}\n\tparams.Add(\"$alt\", \"json;enum-encoding=int\")\n\tparams.Add(\"jobId\", fmt.Sprintf(\"%v\", req.GetJobId()))\n\tif req.GetValidateOnly() {\n\t\tparams.Add(\"validateOnly\", fmt.Sprintf(\"%v\", req.GetValidateOnly()))\n\t}\n\n\tbaseUrl.RawQuery = params.Encode()\n\n\t// Build HTTP headers from client and context metadata.\n\thds := []string{\"x-goog-request-params\", fmt.Sprintf(\"%s=%v\", \"parent\", url.QueryEscape(req.GetParent()))}\n\n\thds = append(c.xGoogHeaders, hds...)\n\thds = append(hds, \"Content-Type\", \"application/json\")\n\theaders := gax.BuildHeaders(ctx, hds...)\n\tunm := protojson.UnmarshalOptions{AllowPartial: true, DiscardUnknown: true}\n\tresp := &longrunningpb.Operation{}\n\te := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {\n\t\tif settings.Path != \"\" {\n\t\t\tbaseUrl.Path = settings.Path\n\t\t}\n\t\thttpReq, err := http.NewRequest(\"POST\", baseUrl.String(), bytes.NewReader(jsonReq))\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\thttpReq = httpReq.WithContext(ctx)\n\t\thttpReq.Header = headers\n\n\t\thttpRsp, err := c.httpClient.Do(httpReq)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tdefer httpRsp.Body.Close()\n\n\t\tif err = googleapi.CheckResponse(httpRsp); err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tbuf, err := io.ReadAll(httpRsp.Body)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tif err := unm.Unmarshal(buf, resp); err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\treturn nil\n\t}, opts...)\n\tif e != nil {\n\t\treturn nil, e\n\t}\n\n\toverride := fmt.Sprintf(\"/v2/%s\", resp.GetName())\n\treturn &CreateJobOperation{\n\t\tlro: longrunning.InternalNewOperation(*c.LROClient, resp),\n\t\tpollPath: override,\n\t}, nil\n}", "func newJobJob(dbJob *models.Job) (j *JobJob, err error) {\n\tj = &JobJob{Job: NewJob(dbJob)}\n\tj.TargetJob, err = models.FindJob(dbJob.ObjectID)\n\tif err != nil {\n\t\treturn j, err\n\t}\n\tif j.TargetJob == nil {\n\t\treturn j, fmt.Errorf(\"job id %d does not exist\", dbJob.ObjectID)\n\t}\n\treturn j, err\n}", "func (c *Jobs) CreateJob(in *JobIn) (out *JobOut, err error) {\n\tbody, err := c.call(\"POST\", \"/jobs/\", in)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer body.Close()\n\n\terr = json.NewDecoder(body).Decode(&out)\n\treturn\n}", "func NewJob(jobType string, data interface{}) Job {\n\tj := Job{\n\t\tuuid: uuid.New().String(),\n\t\tjobType: jobType,\n\t\tdata: data,\n\t}\n\n\t// detect the coordinated request\n\tif req, ok := data.(*request.CoordinatedRequest); ok {\n\t\tj.req = req\n\t}\n\n\treturn j\n}", "func (c *client) startNewJob(ctx context.Context, opts launcher.LaunchOptions, jobInterface v12.JobInterface, ns string, safeName string, safeSha string) ([]runtime.Object, error) {\n\tlog.Logger().Infof(\"about to create a new job for name %s and sha %s\", safeName, safeSha)\n\n\t// lets see if we are using a version stream to store the git operator configuration\n\tfolder := filepath.Join(opts.Dir, \"versionStream\", \"git-operator\")\n\texists, err := files.DirExists(folder)\n\tif err != nil {\n\t\treturn nil, errors.Wrapf(err, \"failed to check if folder exists %s\", folder)\n\t}\n\tif !exists {\n\t\t// lets try the original location\n\t\tfolder = filepath.Join(opts.Dir, \".jx\", \"git-operator\")\n\t}\n\n\tjobFileName := \"job.yaml\"\n\n\tfileNamePath := filepath.Join(opts.Dir, \".jx\", \"git-operator\", \"filename.txt\")\n\texists, err = files.FileExists(fileNamePath)\n\tif err != nil {\n\t\treturn nil, errors.Wrapf(err, \"failed to check for file %s\", fileNamePath)\n\t}\n\tif exists {\n\t\tdata, err := ioutil.ReadFile(fileNamePath)\n\t\tif err != nil {\n\t\t\treturn nil, errors.Wrapf(err, \"failed to load file %s\", fileNamePath)\n\t\t}\n\t\tjobFileName = strings.TrimSpace(string(data))\n\t\tif jobFileName == \"\" {\n\t\t\treturn nil, errors.Errorf(\"the job name file %s is empty\", fileNamePath)\n\t\t}\n\t}\n\n\tfileName := filepath.Join(folder, jobFileName)\n\texists, err = files.FileExists(fileName)\n\tif err != nil {\n\t\treturn nil, errors.Wrapf(err, \"failed to find file %s in repository %s\", fileName, safeName)\n\t}\n\tif !exists {\n\t\treturn nil, errors.Errorf(\"repository %s does not have a Job file: %s\", safeName, fileName)\n\t}\n\n\tresource := &v1.Job{}\n\terr = yamls.LoadFile(fileName, resource)\n\tif err != nil {\n\t\treturn nil, errors.Wrapf(err, \"failed to load Job file %s in repository %s\", fileName, safeName)\n\t}\n\n\tif !opts.NoResourceApply {\n\t\t// now lets check if there is a resources dir\n\t\tresourcesDir := filepath.Join(folder, \"resources\")\n\t\texists, err = files.DirExists(resourcesDir)\n\t\tif err != nil {\n\t\t\treturn nil, errors.Wrapf(err, \"failed to check if resources directory %s exists in repository %s\", resourcesDir, safeName)\n\t\t}\n\t\tif exists {\n\t\t\tabsDir, err := filepath.Abs(resourcesDir)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, errors.Wrapf(err, \"failed to get absolute resources dir %s\", resourcesDir)\n\t\t\t}\n\n\t\t\tcmd := &cmdrunner.Command{\n\t\t\t\tName: \"kubectl\",\n\t\t\t\tArgs: []string{\"apply\", \"-f\", absDir},\n\t\t\t}\n\t\t\tlog.Logger().Infof(\"running command: %s\", cmd.CLI())\n\t\t\t_, err = c.runner(cmd)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, errors.Wrapf(err, \"failed to apply resources in dir %s\", absDir)\n\t\t\t}\n\t\t}\n\t}\n\n\t// lets try use a maximum of 31 characters and a minimum of 10 for the sha\n\tnamePrefix := trimLength(safeName, 20)\n\n\tid := uuid.New().String()\n\tresourceName := namePrefix + \"-\" + id\n\n\tresource.Name = resourceName\n\n\tif resource.Labels == nil {\n\t\tresource.Labels = map[string]string{}\n\t}\n\tresource.Labels[constants.DefaultSelectorKey] = constants.DefaultSelectorValue\n\tresource.Labels[launcher.RepositoryLabelKey] = safeName\n\tresource.Labels[launcher.CommitShaLabelKey] = safeSha\n\n\tr2, err := jobInterface.Create(ctx, resource, metav1.CreateOptions{})\n\tif err != nil {\n\t\treturn nil, errors.Wrapf(err, \"failed to create Job %s in namespace %s\", resourceName, ns)\n\t}\n\tlog.Logger().Infof(\"created Job %s in namespace %s\", resourceName, ns)\n\treturn []runtime.Object{r2}, nil\n}", "func NewJob() *Job {\n\treturn &Job{}\n}", "func NewJob(ctx *pulumi.Context,\n\tname string, args *JobArgs, opts ...pulumi.ResourceOption) (*Job, error) {\n\tif args == nil {\n\t\treturn nil, errors.New(\"missing one or more required arguments\")\n\t}\n\n\tif args.RoleArn == nil {\n\t\treturn nil, errors.New(\"invalid value for required argument 'RoleArn'\")\n\t}\n\tif args.Type == nil {\n\t\treturn nil, errors.New(\"invalid value for required argument 'Type'\")\n\t}\n\topts = internal.PkgResourceDefaultOpts(opts)\n\tvar resource Job\n\terr := ctx.RegisterResource(\"aws-native:databrew:Job\", name, args, &resource, opts...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &resource, nil\n}", "func NewJob(arrTime int) (j *Job) {\n\tj = new(Job)\n\tj.IntAttrs = make(map[string]int)\n\tj.StrAttrs = make(map[string]string)\n\tj.JobId = rand.Int63()\n\tj.ArrTime = arrTime\n\treturn j\n}", "func (c *cloudSchedulerRESTClient) CreateJob(ctx context.Context, req *schedulerpb.CreateJobRequest, opts ...gax.CallOption) (*schedulerpb.Job, error) {\n\tm := protojson.MarshalOptions{AllowPartial: true, UseEnumNumbers: true}\n\tbody := req.GetJob()\n\tjsonReq, err := m.Marshal(body)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tbaseUrl, err := url.Parse(c.endpoint)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tbaseUrl.Path += fmt.Sprintf(\"/v1/%v/jobs\", req.GetParent())\n\n\tparams := url.Values{}\n\tparams.Add(\"$alt\", \"json;enum-encoding=int\")\n\n\tbaseUrl.RawQuery = params.Encode()\n\n\t// Build HTTP headers from client and context metadata.\n\thds := []string{\"x-goog-request-params\", fmt.Sprintf(\"%s=%v\", \"parent\", url.QueryEscape(req.GetParent()))}\n\n\thds = append(c.xGoogHeaders, hds...)\n\thds = append(hds, \"Content-Type\", \"application/json\")\n\theaders := gax.BuildHeaders(ctx, hds...)\n\topts = append((*c.CallOptions).CreateJob[0:len((*c.CallOptions).CreateJob):len((*c.CallOptions).CreateJob)], opts...)\n\tunm := protojson.UnmarshalOptions{AllowPartial: true, DiscardUnknown: true}\n\tresp := &schedulerpb.Job{}\n\te := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {\n\t\tif settings.Path != \"\" {\n\t\t\tbaseUrl.Path = settings.Path\n\t\t}\n\t\thttpReq, err := http.NewRequest(\"POST\", baseUrl.String(), bytes.NewReader(jsonReq))\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\thttpReq = httpReq.WithContext(ctx)\n\t\thttpReq.Header = headers\n\n\t\thttpRsp, err := c.httpClient.Do(httpReq)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tdefer httpRsp.Body.Close()\n\n\t\tif err = googleapi.CheckResponse(httpRsp); err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tbuf, err := io.ReadAll(httpRsp.Body)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tif err := unm.Unmarshal(buf, resp); err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\treturn nil\n\t}, opts...)\n\tif e != nil {\n\t\treturn nil, e\n\t}\n\treturn resp, nil\n}", "func (m *Master) constructJobResources(c *Config, restStorage map[string]rest.Storage) {\n\t// Note that job's storage settings are changed by changing the batch\n\t// group. Clearly we want all jobs to be stored in the same place no\n\t// matter where they're accessed from.\n\trestOptions := func(resource string) generic.RESTOptions {\n\t\treturn generic.RESTOptions{\n\t\t\tStorage: c.StorageDestinations.Search([]string{batch.GroupName, extensions.GroupName}, resource),\n\t\t\tDecorator: m.StorageDecorator(),\n\t\t\tDeleteCollectionWorkers: m.deleteCollectionWorkers,\n\t\t}\n\t}\n\tjobStorage, jobStatusStorage := jobetcd.NewREST(restOptions(\"jobs\"))\n\trestStorage[\"jobs\"] = jobStorage\n\trestStorage[\"jobs/status\"] = jobStatusStorage\n}", "func (r *realKubeClient) CreateJob(job *Job) error {\n\turl := \"/apis/extensions/v1beta1/namespaces/\" + job.Metadata[\"namespace\"].(string) + \"/jobs\"\n\tdata, err := json.Marshal(job)\n\tif err != nil {\n\t\treturn err\n\t}\n\tbyteData := bytes.NewReader(data)\n\treturn r.doPost(url, byteData)\n\n}", "func NewJobResources(js []job.Job) []JobResource {\n\trs := []JobResource{}\n\n\tfor _, j := range js {\n\t\trs = append(rs, *NewJobResource(j))\n\t}\n\n\treturn rs\n}", "func NewJob(opts BulkOptions) (string, error) {\n\ttpl := BulkTemps.NewJob\n\ts, err := RenderTemplate(tpl, opts)\n\tlog.Printf(\"job query: %s\", s)\n\treturn s, err\n}", "func NewJob() *Job {\n\treturn &Job{\n\t\tVars: make(map[string]string),\n\t\tModules: make(map[string]string),\n\t}\n}", "func NewJob() *Job {\n\treturn &Job{\n\t\tID: uuid.New().String(),\n\t\tPriority: PriorityNormal,\n\t\tTimestamp: time.Now(),\n\t\tContentType: ContentTypeMsgpack,\n\t}\n}", "func NewJobClient(subscriptionID string) JobClient {\n return NewJobClientWithBaseURI(DefaultBaseURI, subscriptionID)\n}", "func NewJOB(c *httpclient.Client) *JOB {\n\tclient := httpclient.New()\n\tif c != nil {\n\t\tclient = c\n\t}\n\tsg, err := time.LoadLocation(\"Asia/Singapore\")\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\treturn &JOB{\n\t\tClient: client,\n\t\tsg: sg,\n\t}\n}", "func NewJob(jobtype string, args ...interface{}) *Job {\n\treturn &Job{\n\t\tType: jobtype,\n\t\tQueue: \"default\",\n\t\tArgs: args,\n\t\tJid: RandomJid(),\n\t\tCreatedAt: time.Now().UTC().Format(time.RFC3339Nano),\n\t\tRetry: 25,\n\t}\n}", "func (client JobClient) CreateOrUpdate(ctx context.Context, resourceGroupName string, accountName string, jobName string, body JobResourceDescription) (result JobCreateOrUpdateFuture, err error) {\n if tracing.IsEnabled() {\n ctx = tracing.StartSpan(ctx, fqdn + \"/JobClient.CreateOrUpdate\")\n defer func() {\n sc := -1\n if result.Response() != nil {\n sc = result.Response().StatusCode\n }\n tracing.EndSpan(ctx, sc, err)\n }()\n }\n if err := validation.Validate([]validation.Validation{\n { TargetValue: body,\n Constraints: []validation.Constraint{\t{Target: \"body.JobResourceDescriptionProperties\", Name: validation.Null, Rule: true, Chain: nil },\n \t{Target: \"body.Identity\", Name: validation.Null, Rule: false ,\n Chain: []validation.Constraint{\t{Target: \"body.Identity.Type\", Name: validation.Null, Rule: true, Chain: nil },\n }}}}}); err != nil {\n return result, validation.NewError(\"microsoftazuremanagementaisupercomputer.JobClient\", \"CreateOrUpdate\", err.Error())\n }\n\n req, err := client.CreateOrUpdatePreparer(ctx, resourceGroupName, accountName, jobName, body)\n if err != nil {\n err = autorest.NewErrorWithError(err, \"microsoftazuremanagementaisupercomputer.JobClient\", \"CreateOrUpdate\", nil , \"Failure preparing request\")\n return\n }\n\n result, err = client.CreateOrUpdateSender(req)\n if err != nil {\n err = autorest.NewErrorWithError(err, \"microsoftazuremanagementaisupercomputer.JobClient\", \"CreateOrUpdate\", result.Response(), \"Failure sending request\")\n return\n }\n\n return\n}", "func NewJob(receipts []uuid.UUID, sendTime time.Time) *Job {\n\treturn &Job{\n\t\tID: uuid.NewUUID(),\n\t\tSendTime: sendTime,\n\t\tSendStatus: READY,\n\t\tReceipts: receipts,\n\t}\n}", "func (c *controller) CreateJob(namespace string, job Job) (*apibatchv1.Job, error) {\n\tj := job.Build()\n\treturn c.k8sBatchClient.Jobs(namespace).Create(j)\n}", "func NewJob(object dbus.BusObject) *Job {\n\treturn &Job{object}\n}", "func (m *MemoryStorage) NewJob(parent context.Context, params JobParams, log *logrus.Entry) (Job, error) {\n\tvar err error\n\t// Validate all the mandatory params are present.\n\tswitch {\n\tcase params.CheckID == \"\":\n\t\terr = errors.New(\"check job missing check ID\")\n\tcase params.Target == \"\":\n\t\terr = errors.New(\"check job missing image\")\n\tcase params.Timeout <= 0:\n\t\terr = errors.New(\"check job missing timeout\")\n\t}\n\n\tif err != nil {\n\t\treturn Job{}, err\n\t}\n\n\tjob := Job{\n\t\tJobParams: JobParams{\n\t\t\tScanID: params.ScanID,\n\t\t\tScanStartTime: params.ScanStartTime,\n\t\t\tCheckID: params.CheckID,\n\t\t\tTarget: params.Target,\n\t\t\tOptions: params.Options,\n\t\t\tRequiredVars: params.RequiredVars,\n\t\t\tImage: params.Image,\n\t\t\tTimeout: params.Timeout,\n\t\t},\n\t\tlog: log,\n\t}\n\n\tjob.Ctx, job.Cancel = context.WithCancel(parent)\n\n\terr = m.Add(job)\n\tif err != nil {\n\t\treturn Job{}, UnableToAddJobError{err}\n\t}\n\treturn job, nil\n}", "func (c *Client) CreateJob(ctx context.Context, body interface{}) (string, error) {\n\tbodyMap, err := toJSONMap(body)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tresponse, err := c.API.SendRequestWithMaps(ctx, \"POST\", \"/bulk\", bodyMap)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tvar jobID string\n\terr = json.Unmarshal(response[\"jobId\"], &jobID)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\treturn jobID, nil\n}", "func (client JobClient) CreateOrUpdatePreparer(ctx context.Context, resourceGroupName string, accountName string, jobName string, body JobResourceDescription) (*http.Request, error) {\n pathParameters := map[string]interface{} {\n \"accountName\": accountName,\n \"jobName\": jobName,\n \"resourceGroupName\": resourceGroupName,\n \"subscriptionId\": autorest.Encode(\"path\",client.SubscriptionID),\n }\n\n const APIVersion = \"2020-12-01-preview\"\n queryParameters := map[string]interface{} {\n \"api-version\": APIVersion,\n }\n\n preparer := autorest.CreatePreparer(\nautorest.AsContentType(\"application/json; charset=utf-8\"),\nautorest.AsPut(),\nautorest.WithBaseURL(client.BaseURI),\nautorest.WithPathParameters(\"/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.AISupercomputer/accounts/{accountName}/jobs/{jobName}\",pathParameters),\nautorest.WithJSON(body),\nautorest.WithQueryParameters(queryParameters))\n return preparer.Prepare((&http.Request{}).WithContext(ctx))\n }", "func NewSynchronizationJob()(*SynchronizationJob) {\n m := &SynchronizationJob{\n Entity: *NewEntity(),\n }\n return m\n}", "func (*SchematicsV1) NewJobData(jobType string) (model *JobData, err error) {\n\tmodel = &JobData{\n\t\tJobType: core.StringPtr(jobType),\n\t}\n\terr = core.ValidateStruct(model, \"required parameters\")\n\treturn\n}", "func CreateJob(request *http.Request) *Job {\n\tjob := &Job{\n\t\trequest: request,\n\t\tresponse: nil,\n\t\terror: nil,\n\t\tisDone: make(chan bool, 1), // mark as done is non-blocking for worker\n\t\tisLogin: false,\n\t}\n\n\treturn job\n}", "func NewJob(name, ns string, containers ContainerList) *Job {\n\tret := &Job{\n\t\tcore: &batchv1.Job{\n\t\t\tMetadata: objectMeta(name, ns),\n\t\t\tSpec: &batchv1.JobSpec{\n\t\t\t\tTemplate: podTemplateSpec(emptyMap(), containers),\n\t\t\t},\n\t\t},\n\t}\n\tret.core.Spec.Template.Spec.RestartPolicy = k8s.String(\"OnFailure\")\n\treturn ret\n}", "func (rh *RestHandler) Create(w http.ResponseWriter, r *http.Request) {\n\terr := r.ParseForm()\n\tif err != nil {\n\t\twriteError(w, http.StatusBadRequest, \"unable to parse form values\")\n\t\treturn\n\t}\n\n\tjob := model.Job{}\n\tif r.FormValue(\"partner_id\") == \"\" {\n\t\twriteError(w, http.StatusBadRequest, \"missing partner_id value\")\n\t\treturn\n\t}\n\tjob.PartnerID, err = strconv.ParseInt(r.FormValue(\"partner_id\"), 10, 64)\n\tif err != nil {\n\t\twriteError(w, http.StatusBadRequest, \"invalid partner_id value\")\n\t\treturn\n\t}\n\tif r.FormValue(\"category_id\") == \"\" {\n\t\twriteError(w, http.StatusBadRequest, \"missing category_id value\")\n\t\treturn\n\t}\n\tjob.CategoryID, err = strconv.ParseInt(r.FormValue(\"category_id\"), 10, 64)\n\tif err != nil {\n\t\twriteError(w, http.StatusBadRequest, errors.Wrap(err, \"invalid category_id value\").Error())\n\t\treturn\n\t}\n\tjob.Title = strings.TrimSpace(r.FormValue(\"title\"))\n\tif job.Title == \"\" {\n\t\twriteError(w, http.StatusBadRequest, \"missing or empty title given\")\n\t\treturn\n\t}\n\n\tif r.FormValue(\"expires_at\") == \"\" {\n\t\twriteError(w, http.StatusBadRequest, \"missing expires_at value\")\n\t\treturn\n\t}\n\tnow := time.Now()\n\tjob.ExpiresAt, err = time.ParseInLocation(model.DateFormat, r.FormValue(\"expires_at\"), now.Location())\n\tif err != nil || job.ExpiresAt.IsZero() {\n\t\twriteError(w, http.StatusBadRequest, errors.Wrap(err, \"invalid expiration date\").Error())\n\t\treturn\n\t}\n\t//Times are parsed without hour, so whe have to add the hours until de end of the day\n\tjob.ExpiresAt = job.ExpiresAt.Add(23*time.Hour + 59*time.Minute + 59*time.Second)\n\tif job.ExpiresAt.Before(now) {\n\t\twriteError(w, http.StatusBadRequest, \"job already expired\")\n\t\treturn\n\t}\n\treq := model.RequestCreate{}\n\treq.Job = job\n\n\tencreq, err := crypt.EncryptRequest(rh.cipher, req)\n\tif err != nil {\n\t\twriteError(w, http.StatusInternalServerError, err.Error())\n\t\treturn\n\t}\n\n\t_, err = rh.backend.Create(context.Background(), encreq)\n\tif err != nil {\n\t\twriteError(w, http.StatusInternalServerError, err.Error())\n\t\treturn\n\t}\n\n\twriteResponse(w, http.StatusCreated, nil)\n}", "func (w *Worker) NewJob(j *templates.Message, reply *bool) (e error) {\n\t// T.Ln(\"received new job\")\n\tif !w.dispatchReady.Load() {\n\t\tD.Ln(\"dispatch not ready\")\n\t\t*reply = true\n\t\treturn\n\t}\n\tif w.templatesMessage != nil {\n\t\tif j.PrevBlock == w.templatesMessage.PrevBlock {\n\t\t\t// T.Ln(\"not a new job\")\n\t\t\t*reply = true\n\t\t\treturn\n\t\t}\n\t}\n\t// D.S(j)\n\t*reply = true\n\tD.Ln(\"halting current work\")\n\tw.stopChan <- struct{}{}\n\t// load the job into the template\n\tif w.templatesMessage == nil {\n\t\tw.templatesMessage = j\n\t} else {\n\t\t*w.templatesMessage = *j\n\t}\n\tD.Ln(\"switching to new job\")\n\tw.startChan <- struct{}{}\n\treturn\n}", "func NewJob(name string, schedule string, ) *Job {\n\tthis := Job{}\n\tthis.Name = name\n\tthis.Schedule = schedule\n\treturn &this\n}", "func NewJob(id string, check CheckFunc, task TaskFunc) *Job {\n\treturn &Job{id, check, task}\n}", "func NewJob(intervel uint64) *Job {\n\treturn &Job{\n\t\tintervel, 0,\n\t\t\"\",\n\t\tJOB_UNIT_TYPE_UNKNOWN,\n\t\t\"\",\n\t\ttime.Unix(0, 0),\n\t\ttime.Unix(0, 0), 0,\n\t\tmake(map[string]interface{}),\n\t\tmake(map[string]([]interface{})),\n\t}\n}", "func CreateJob(ctx *gin.Context) {\n\tlog := logger.RuntimeLog\n\tvar jobModel *model.Job\n\tif err := ctx.BindJSON(&jobModel); err != nil {\n\t\tSendResponse(ctx, err, \"Request Body Invalid\")\n\t}\n\n\tjobNamespace := strings.ToLower(jobModel.JobMeta.AppMeta.Namespace)\n\tjobName := jobModel.JobMeta.AppMeta.Name\n\tzoneName := jobModel.JobMeta.AppMeta.ZoneName\n\n\t// fetch k8s-client handler by zoneName\n\tkclient, err := GetClientByAzCode(zoneName)\n\tif err != nil {\n\t\tlog.WithError(err)\n\t\tSendResponse(ctx, errno.ErrTokenInvalid, nil)\n\t}\n\n\tstartAt := time.Now()\n\t_, err = kclient.BatchV1().Jobs(jobNamespace).Create(makeupJobData(ctx, jobModel))\n\tif err != nil {\n\t\tSendResponse(ctx, err, \"create deployment fail.\")\n\t\treturn\n\t}\n\tlogger.MetricsEmit(\n\t\tJOB_CONST.K8S_LOG_Method_CreateJob,\n\t\tutil.GetReqID(ctx),\n\t\tfloat32(time.Since(startAt)/time.Millisecond),\n\t\terr == err,\n\t)\n\tSendResponse(ctx, errno.OK, fmt.Sprintf(\"Create Job %s success.\", jobName))\n\treturn\n}", "func (n *Namespace) CreateJob(backend Backend, job *Job) error {\n\n\t// ensure the namespace exists\n\terr := n.checkOrCreateNamespace(backend)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// get JSON for Job\n\tjson, err := job.Serialize()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// create job, overwriting existing\n\terr = backend.WriteKey(getJobPath(n.namespace, job.ID), string(json), false, etcd.PrevNoExist, 0)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"problem creating job %v\", err)\n\t}\n\n\treturn nil\n}", "func NewJob() helmify.Processor {\n\treturn &job{}\n}", "func (c *CombinedStorage) NewJob(parent context.Context, params JobParams, log *logrus.Entry) (Job, error) {\n\tj, err := c.Storage.NewJob(parent, params, log)\n\tif err != nil {\n\t\tif params.CheckID == \"\" {\n\t\t\treturn j, err\n\t\t}\n\t\tstatus := StatusMalformed\n\t\tif _, ok := err.(*UnableToAddJobError); ok {\n\t\t\tstatus = StatusFailed\n\t\t}\n\t\tinnerErr := c.remote.UpdateCheckState(params.CheckID, State{Status: status})\n\t\tif innerErr != nil {\n\t\t\tc.log.WithError(innerErr).Error(\"error updating check status in persistence\")\n\t\t}\n\t}\n\treturn j, err\n}", "func newJob(job Runnable, priority int) JobEntry {\n\treturn &pt{\n\t\tpriority: priority,\n\t\tjob: job,\n\t\tlock: &sync.Mutex{},\n\t}\n}", "func (d Dispatcher) NewJob(task string, name string, priv bool, privKey string) (string, error) {\n\tif privKey == \"\" {\n\t\treturn \"\", errors.New(\"Empty private key\")\n\t}\n\tj, err := job.NewJob(task, name, priv, privKey)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\td.AddJob(*j)\n\treturn j.GetID(), nil\n}", "func Create(ctx context.Context, options *Options) (_ *JobObject, err error) {\n\tif options == nil {\n\t\toptions = &Options{}\n\t}\n\n\tvar jobName *winapi.UnicodeString\n\tif options.Name != \"\" {\n\t\tjobName, err = winapi.NewUnicodeString(options.Name)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\tvar jobHandle windows.Handle\n\tif options.UseNTVariant {\n\t\toa := winapi.ObjectAttributes{\n\t\t\tLength: unsafe.Sizeof(winapi.ObjectAttributes{}),\n\t\t\tObjectName: jobName,\n\t\t\tAttributes: 0,\n\t\t}\n\t\tstatus := winapi.NtCreateJobObject(&jobHandle, winapi.JOB_OBJECT_ALL_ACCESS, &oa)\n\t\tif status != 0 {\n\t\t\treturn nil, winapi.RtlNtStatusToDosError(status)\n\t\t}\n\t} else {\n\t\tvar jobNameBuf *uint16\n\t\tif jobName != nil && jobName.Buffer != nil {\n\t\t\tjobNameBuf = jobName.Buffer\n\t\t}\n\t\tjobHandle, err = windows.CreateJobObject(nil, jobNameBuf)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\tdefer func() {\n\t\tif err != nil {\n\t\t\twindows.Close(jobHandle)\n\t\t}\n\t}()\n\n\tjob := &JobObject{\n\t\thandle: jobHandle,\n\t}\n\n\t// If the IOCP we'll be using to receive messages for all jobs hasn't been\n\t// created, create it and start polling.\n\tif options.Notifications {\n\t\tmq, err := setupNotifications(ctx, job)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tjob.mq = mq\n\t}\n\n\tif options.EnableIOTracking {\n\t\tif err := enableIOTracking(jobHandle); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\treturn job, nil\n}", "func (r *resolver) newNode(j *spec.Node, jobArgs map[string]interface{}) (*Node, error) {\n\t// Make a copy of the jobArgs before this node gets created and potentially\n\t// adds additional keys to the jobArgs. A shallow copy is sufficient because\n\t// args values should never change.\n\toriginalArgs := map[string]interface{}{}\n\tfor k, v := range jobArgs {\n\t\toriginalArgs[k] = v\n\t}\n\n\t// Make the name of this node unique within the request by assigning it an id.\n\tid, err := r.idGen.UID()\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"Error making id for '%s %s' job: %s\", *j.NodeType, j.Name, err)\n\t}\n\n\t// Create the job\n\trj, err := r.jobFactory.Make(job.NewIdWithRequestId(*j.NodeType, j.Name, id, r.request.Id))\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"Error making '%s %s' job: %s\", *j.NodeType, j.Name, err)\n\t}\n\n\tif err := rj.Create(jobArgs); err != nil {\n\t\treturn nil, fmt.Errorf(\"Error creating '%s %s' job: %s\", *j.NodeType, j.Name, err)\n\t}\n\n\tbytes, err := rj.Serialize()\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"Error serializing '%s %s' job: %s\", *j.NodeType, j.Name, err)\n\t}\n\n\treturn &Node{\n\t\tName: j.Name,\n\t\tId: id,\n\t\tSpec: j, // on the next refactor, we shouldn't need to set this ourselves\n\t\tJobBytes: bytes,\n\t\tArgs: originalArgs, // Args is the jobArgs map that this node was created with\n\t\tRetry: j.Retry,\n\t\tRetryWait: j.RetryWait,\n\t}, nil\n}", "func (d *Dispatcher) NewJob() *JobBuilder {\n\treturn &JobBuilder{Submitter: d}\n}", "func PostJob(w http.ResponseWriter, r *http.Request) {\n\tresponse := services.CreateJob(r)\n\n\trender.Status(r, response.Code)\n\trender.JSON(w, r, response)\n}", "func NewJob(server Server, command string) *Job {\n\tjob := &Job{}\n\tjob.server = server\n\tjob.command = command\n\tjob.hostConfig = config.forServer(server)\n\tjob.getAgent()\n\tgo job.run()\n\treturn job\n}", "func NewJob(name string, f func() (bool, error)) *Job {\n\treturn &Job{Name: name, F: f}\n}", "func createNewSecHubJob(context *Context) {\n\tfmt.Printf(\"- Creating new sechub job\\n\")\n\tresponse := sendWithDefaultHeader(\"POST\", buildCreateNewSecHubJobAPICall(context), context)\n\n\tdata, err := ioutil.ReadAll(response.Body)\n\tHandleError(err)\n\n\tvar result jobScheduleResult\n\tjsonErr := json.Unmarshal(data, &result)\n\tHandleError(jsonErr)\n\n\tcontext.config.secHubJobUUID = result.JobId\n}", "func (c *Controller) CreateJob(yaml []byte) (weles.JobID, error) {\n\tj, err := c.jobs.NewJob(yaml)\n\tif err != nil {\n\t\treturn weles.JobID(0), err\n\t}\n\n\tgo c.parser.Parse(j)\n\n\treturn j, nil\n}", "func NewJob(brigadeSVC brigade.Interface, logger log.Logger) subcollector {\n\treturn &job{\n\t\tbrigadeSVC: brigadeSVC,\n\t\tlogger: logger,\n\n\t\tjobInfoDesc: prometheus.NewDesc(\n\t\t\tprometheus.BuildFQName(namespace, jobSubSystem, \"info\"),\n\t\t\t\"Brigade job information.\",\n\t\t\t[]string{\"id\", \"build_id\", \"name\", \"image\"}, nil,\n\t\t),\n\t\tjobStatusDesc: prometheus.NewDesc(\n\t\t\tprometheus.BuildFQName(namespace, jobSubSystem, \"status\"),\n\t\t\t\"Brigade job status.\",\n\t\t\t[]string{\"id\", \"status\"}, nil,\n\t\t),\n\t\tjobDurationDesc: prometheus.NewDesc(\n\t\t\tprometheus.BuildFQName(namespace, jobSubSystem, \"duration_seconds\"),\n\t\t\t\"Brigade job duration in seconds.\",\n\t\t\t[]string{\"id\"}, nil,\n\t\t),\n\t\tjobCreationDesc: prometheus.NewDesc(\n\t\t\tprometheus.BuildFQName(namespace, jobSubSystem, \"create_time_seconds\"),\n\t\t\t\"Brigade job creation time in unix timestamp.\",\n\t\t\t[]string{\"id\"}, nil,\n\t\t),\n\t\tjobStartDesc: prometheus.NewDesc(\n\t\t\tprometheus.BuildFQName(namespace, jobSubSystem, \"start_time_seconds\"),\n\t\t\t\"Brigade job start time in unix timestamp.\",\n\t\t\t[]string{\"id\"}, nil,\n\t\t),\n\t}\n}", "func CreateJob(name string) *batchv1.Job {\n\tj := &batchv1.Job{}\n\tj.APIVersion = \"batch/v1\"\n\tj.Kind = \"Job\"\n\tj.Name = name\n\tj.Spec.Template.Name = name\n\tj.Spec.Template.Spec.RestartPolicy = \"Never\"\n\treturn j\n}", "func NewJob(interval time.Duration) *Job {\n\treturn &Job{\n\t\tinterval: interval,\n\t\tfuncs: make(map[string]interface{}),\n\t\tfparams: make(map[string][]interface{}),\n\t\ttags: []string{},\n\t}\n}", "func CreateJob(ctx context.Context, p string) (j *Job, err error) {\n\tt := utils.FromTaskContext(ctx)\n\n\tj = &Job{\n\t\tPath: p,\n\t\tMarker: \"\",\n\t}\n\n\tcontent, err := msgpack.Marshal(j)\n\tif err != nil {\n\t\treturn\n\t}\n\n\terr = contexts.DB.Put(constants.FormatJobKey(t, j.Path), content, nil)\n\tif err != nil {\n\t\treturn\n\t}\n\n\treturn\n}", "func NewJob(name string, fun interface{}, args ...interface{}) (*Job, error) {\n\tfunction := reflect.ValueOf(fun)\n\tif function.Kind() != reflect.Func {\n\t\treturn nil, errors.New(\"schedule: jobs can only be created for functions\")\n\t}\n\targuments := make([]reflect.Value, len(args))\n\tfor i, arg := range args {\n\t\targuments[i] = reflect.ValueOf(arg)\n\t}\n\treturn &Job{\n\t\tName: name,\n\t\tfunction: function,\n\t\targs: arguments,\n\t\ttrigger: NewTrigger(),\n\t}, nil\n}", "func (js *JobServ) NewJobId() int64 {\n\tid := js.NextJobId\n\tjs.NextJobId++\n\treturn id\n}", "func CreateJobHandler(ctx *gin.Context) {\n\tlog.Info(\"received request to create new job\")\n\tvar j Job\n\tif err := ctx.ShouldBind(&j); err != nil {\n\t\tlog.Error(fmt.Errorf(\"unable to parse request body: %+v\", err))\n\t\tstatus := http.StatusBadRequest\n\t\tctx.AbortWithStatusJSON(status, gin.H{\"http_code\": status,\n\t\t\t\"message\": \"Invalid request body\"})\n\t\treturn\n\t}\n\t// add job creator to metadata\n\tj.Meta[\"creator\"] = ctx.MustGet(\"uid\").(string)\n\t// create new job in persistence layer\n\tid, err := persistence.CreateJob(j)\n\tif err != nil {\n\t\tlog.Error(fmt.Errorf(\"unable to create new job: %+v\", err))\n\t\tstatus := http.StatusInternalServerError\n\t\tctx.AbortWithStatusJSON(status, gin.H{\"http_code\": status,\n\t\t\t\"message\": \"Internal server error\"})\n\t\treturn\n\t}\n\tctx.JSON(http.StatusCreated, gin.H{\"http_code\": http.StatusCreated,\n\t\t\"message\": \"Successfully created job\", \"id\": id})\n}", "func (client JobClient) Create(ctx context.Context, resourceGroupName string, automationAccountName string, jobName string, parameters JobCreateParameters, clientRequestID string) (result Job, err error) {\n\tif tracing.IsEnabled() {\n\t\tctx = tracing.StartSpan(ctx, fqdn+\"/JobClient.Create\")\n\t\tdefer func() {\n\t\t\tsc := -1\n\t\t\tif result.Response.Response != nil {\n\t\t\t\tsc = result.Response.Response.StatusCode\n\t\t\t}\n\t\t\ttracing.EndSpan(ctx, sc, err)\n\t\t}()\n\t}\n\tif err := validation.Validate([]validation.Validation{\n\t\t{TargetValue: resourceGroupName,\n\t\t\tConstraints: []validation.Constraint{{Target: \"resourceGroupName\", Name: validation.MaxLength, Rule: 90, Chain: nil},\n\t\t\t\t{Target: \"resourceGroupName\", Name: validation.MinLength, Rule: 1, Chain: nil},\n\t\t\t\t{Target: \"resourceGroupName\", Name: validation.Pattern, Rule: `^[-\\w\\._]+$`, Chain: nil}}},\n\t\t{TargetValue: parameters,\n\t\t\tConstraints: []validation.Constraint{{Target: \"parameters.JobCreateProperties\", Name: validation.Null, Rule: true, Chain: nil}}}}); err != nil {\n\t\treturn result, validation.NewError(\"automation.JobClient\", \"Create\", err.Error())\n\t}\n\n\treq, err := client.CreatePreparer(ctx, resourceGroupName, automationAccountName, jobName, parameters, clientRequestID)\n\tif err != nil {\n\t\terr = autorest.NewErrorWithError(err, \"automation.JobClient\", \"Create\", nil, \"Failure preparing request\")\n\t\treturn\n\t}\n\n\tresp, err := client.CreateSender(req)\n\tif err != nil {\n\t\tresult.Response = autorest.Response{Response: resp}\n\t\terr = autorest.NewErrorWithError(err, \"automation.JobClient\", \"Create\", resp, \"Failure sending request\")\n\t\treturn\n\t}\n\n\tresult, err = client.CreateResponder(resp)\n\tif err != nil {\n\t\terr = autorest.NewErrorWithError(err, \"automation.JobClient\", \"Create\", resp, \"Failure responding to request\")\n\t\treturn\n\t}\n\n\treturn\n}", "func newJobID(tm Time) (Job, error) {\n\tk, err := ksuid.NewRandomWithTime(tm)\n\tif err != nil {\n\t\treturn Job{}, err\n\t}\n\treturn Job(k), nil\n}", "func NewJobIamPolicy(ctx *pulumi.Context,\n\tname string, args *JobIamPolicyArgs, opts ...pulumi.ResourceOption) (*JobIamPolicy, error) {\n\tif args == nil {\n\t\treturn nil, errors.New(\"missing one or more required arguments\")\n\t}\n\n\tif args.PolicyData == nil {\n\t\treturn nil, errors.New(\"invalid value for required argument 'PolicyData'\")\n\t}\n\topts = internal.PkgResourceDefaultOpts(opts)\n\tvar resource JobIamPolicy\n\terr := ctx.RegisterResource(\"gcp:cloudrunv2/jobIamPolicy:JobIamPolicy\", name, args, &resource, opts...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &resource, nil\n}", "func createJob(t *testing.T, descr string, delete bool) *Job {\n\tcf := func(now *time.Time) (bool, bool) { return now.Seconds()%2 == 0, delete }\n\ttf := func(id string) { t.Logf(\"Performed job %s\\n\", id) }\n\n\treturn NewJob(\"test-server-\"+descr, cf, tf)\n}", "func New(c Getting, u *url.URL) *Resource {\n\tr := Resource{\n\t\tClient: c,\n\t\tURI: u,\n\t}\n\n\treturn &r\n}", "func CreateJob(board schema.Board, boardID string, categoryID string, ownerID string) {\n\n\tobjID, err := primitive.ObjectIDFromHex(boardID)\n\tcategoryID_int, err := strconv.Atoi(categoryID)\n\tif err != nil {\n\t\t// handle error\n\t\tlog.Fatal(\"CreateJob() str conv ERROR:\", err)\n\t}\n\n\t//get job details from board document inputted through put request\n\tjd := schema.JobDetails{\n\t\tCompany: board.Categories[0].Jobs[0].JobDetails.Company,\n\t\tTitle: board.Categories[0].Jobs[0].JobDetails.Title,\n\t\tLocation: board.Categories[0].Jobs[0].JobDetails.Location,\n\t\tCategory: board.Categories[0].Jobs[0].JobDetails.Category,\n\t\tPostDate: board.Categories[0].Jobs[0].JobDetails.PostDate,\n\t\tDescription: board.Categories[0].Jobs[0].JobDetails.Description,\n\t\tExperience: board.Categories[0].Jobs[0].JobDetails.Experience,\n\t\tURL: board.Categories[0].Jobs[0].JobDetails.URL,\n\t\tDateAdded: board.Categories[0].Jobs[0].JobDetails.DateAdded,\n\t\tSalary: board.Categories[0].Jobs[0].JobDetails.Salary,\n\t\tTasks: board.Categories[0].Jobs[0].JobDetails.Tasks}\n\n\t//stick job details into job var\n\tj := schema.Job{JobDetails: jd, ID: board.Categories[0].Jobs[0].ID}\n\n\tfilter := bson.M{\"_id\": bson.M{\"$eq\": objID}, \"categories.id\": bson.M{\"$eq\": categoryID_int}}\n\tupdate := bson.M{\"$set\": bson.M{\"categories.$.jobs\": j}}\n\n\tupdateResult, err := db.Collection(COLLNAME).UpdateOne(\n\t\tcontext.Background(),\n\t\tfilter,\n\t\tupdate,\n\t)\n\tif err != nil {\n\t\tlog.Fatal(\"CreateJob() ERROR:\", err)\n\t}\n\n\tfmt.Println(\"create job func input: \", j)\n\n\tfmt.Printf(\"Matched %v documents and updated %v documents.\\n\", updateResult.MatchedCount, updateResult.ModifiedCount)\n\tfmt.Println(\"Full Result: \", updateResult)\n\n}", "func NewResource() Resource {\n\tid, _ := NewID()\n\treturn Resource{ID: id}\n}", "func (j *JobWorker) CreateJob(request CreateJobRequest) (Job, error) {\n\t// combines stderr and stdout of the job process\n\tvar outputBuffer Buffer\n\n\tcmd := exec.Command(request.Command, request.Args...)\n\tcmd.Stdout = &outputBuffer\n\tcmd.Stderr = &outputBuffer\n\n\t// no need to save the job if it fails to start\n\tif err := cmd.Start(); err != nil {\n\t\terr = fmt.Errorf(\"job failed to start, reason: %v\", err)\n\t\tlog.Errorf(err.Error())\n\t\treturn Job{}, err\n\t}\n\n\t// job started, create job object, assign ID and save the job\n\tjob := Job{\n\t\tID: getNextJobID(),\n\t\tRequest: request,\n\t\tStatus: JobRunning,\n\t\tcmd: cmd,\n\t\toutputBuffer: &outputBuffer,\n\t\twaitForExit: &sync.WaitGroup{},\n\t\tstatusLock: &sync.RWMutex{},\n\t}\n\n\tlog.Infof(\"job %v: started\", job.ID)\n\n\t// save the job in memory for future reference\n\tj.store(job.ID, &job)\n\n\t// wait for job to finish\n\tgo handleFinish(&job)\n\n\treturn job, nil\n}", "func (o *VRS) CreateJob(child *Job) *bambou.Error {\n\n\treturn bambou.CurrentSession().CreateChild(o, child)\n}", "func (jr *JobResource) UnmarshalJSON(body []byte) error {\n\tvar m map[string]*json.RawMessage\n\terr := json.Unmarshal(body, &m)\n\tif err != nil {\n\t\treturn err\n\t}\n\tfor k, v := range m {\n\t\tswitch k {\n\t\tcase \"properties\":\n\t\t\tif v != nil {\n\t\t\t\tvar jobProperties JobProperties\n\t\t\t\terr = json.Unmarshal(*v, &jobProperties)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\tjr.JobProperties = &jobProperties\n\t\t\t}\n\t\tcase \"name\":\n\t\t\tif v != nil {\n\t\t\t\tvar name string\n\t\t\t\terr = json.Unmarshal(*v, &name)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\tjr.Name = &name\n\t\t\t}\n\t\tcase \"id\":\n\t\t\tif v != nil {\n\t\t\t\tvar ID string\n\t\t\t\terr = json.Unmarshal(*v, &ID)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\tjr.ID = &ID\n\t\t\t}\n\t\tcase \"type\":\n\t\t\tif v != nil {\n\t\t\t\tvar typeVar string\n\t\t\t\terr = json.Unmarshal(*v, &typeVar)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\tjr.Type = &typeVar\n\t\t\t}\n\t\tcase \"location\":\n\t\t\tif v != nil {\n\t\t\t\tvar location string\n\t\t\t\terr = json.Unmarshal(*v, &location)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\tjr.Location = &location\n\t\t\t}\n\t\tcase \"tags\":\n\t\t\tif v != nil {\n\t\t\t\tvar tags map[string]*string\n\t\t\t\terr = json.Unmarshal(*v, &tags)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\tjr.Tags = tags\n\t\t\t}\n\t\tcase \"sku\":\n\t\t\tif v != nil {\n\t\t\t\tvar sku Sku\n\t\t\t\terr = json.Unmarshal(*v, &sku)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\tjr.Sku = &sku\n\t\t\t}\n\t\t}\n\t}\n\n\treturn nil\n}", "func NewJob() *Job {\n\tj := &Job{\n\t\tch: make(chan bool),\n\t\twaitGroup: &sync.WaitGroup{},\n\t}\n\treturn j\n}", "func NewJob(cipher crypt.Cipher, dburl string) (interfaces.Job, error) {\n\tu, err := url.Parse(dburl)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdbtype := u.Scheme\n\tu.Scheme = \"\"\n\n\tswitch dbtype {\n\tcase \"mysql\":\n\t\tq := u.Query()\n\t\tq.Set(\"parseTime\", \"true\")\n\t\tu.RawQuery = q.Encode()\n\t\tdb, err := dsql.Open(dbtype, u.String()[2:])\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\terr = db.Ping()\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn sql.NewJob(cipher, db), nil\n\tdefault:\n\t\treturn memory.NewJob(cipher), nil\n\t}\n}", "func (c *Controller) CreateResource(w http.ResponseWriter, r *http.Request) {\n\tvar resource Resource\n\tbody, err := ioutil.ReadAll(io.LimitReader(r.Body, 1048576))\n\tif err != nil {\n\t\tlog.Fatalln(\"Error creating resource\", err)\n\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\treturn\n\t}\n\tif err := r.Body.Close(); err != nil {\n\t\tlog.Fatalln(\"Error creating resource\", err)\n\t}\n\tif err := json.Unmarshal(body, &resource); err != nil {\n\t\tw.WriteHeader(422)\n\t\tif err := json.NewEncoder(w).Encode(err); err != nil {\n\t\t\tlog.Fatalln(\"Error unmarshalling data\", err)\n\t\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\t\treturn\n\t\t}\n\t}\n\tsuccess := c.Dao.CreateResource(resource)\n\tif !success {\n\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\treturn\n\t}\n\tw.Header().Set(\"Content-Type\", \"application/json; charset=UTF-8\")\n\tw.WriteHeader(http.StatusCreated)\n\treturn\n}", "func (s *JobService) Create(ctx context.Context, bij *api_types.InferenceJob) (err error) {\n\n\tbij.CreatedAt = time.Now().UTC()\n\tbij.UpdatedAt = time.Now().UTC()\n\n\tif errs := ValidateJobInput(*bij); len(errs) > 0 {\n\t\treturn odahuErrors.InvalidEntityError{\n\t\t\tEntity: bij.ID,\n\t\t\tValidationErrors: errs,\n\t\t}\n\t}\n\n\tservice, err := s.sRepo.Get(ctx, nil, bij.Spec.InferenceServiceID)\n\tif err != nil {\n\t\tif odahuErrors.IsNotFoundError(err) {\n\t\t\treturn odahuErrors.InvalidEntityError{\n\t\t\t\tEntity: \"job\",\n\t\t\t\tValidationErrors: []error{fmt.Errorf(\"unable to fetch corresponding service: %s\", err)},\n\t\t\t}\n\t\t}\n\t\treturn err\n\t}\n\n\tDefaultJob(bij, service)\n\n\terrs, err := ValidateJob(*bij, s.connGetter, service)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif len(errs) > 0 {\n\t\treturn odahuErrors.InvalidEntityError{\n\t\t\tEntity: bij.ID,\n\t\t\tValidationErrors: errs,\n\t\t}\n\t}\n\n\terr = s.repo.Create(ctx, nil, *bij)\n\n\treturn err\n}", "func (r *ProjectsLocationsJobsService) Create(parent string, job *Job) *ProjectsLocationsJobsCreateCall {\n\tc := &ProjectsLocationsJobsCreateCall{s: r.s, urlParams_: make(gensupport.URLParams)}\n\tc.parent = parent\n\tc.job = job\n\treturn c\n}", "func (p *AuroraSchedulerManagerClient) CreateJob(ctx context.Context, description *JobConfiguration) (r *Response, err error) {\n var _args182 AuroraSchedulerManagerCreateJobArgs\n _args182.Description = description\n var _result183 AuroraSchedulerManagerCreateJobResult\n var meta thrift.ResponseMeta\n meta, err = p.Client_().Call(ctx, \"createJob\", &_args182, &_result183)\n p.SetLastResponseMeta_(meta)\n if err != nil {\n return\n }\n return _result183.GetSuccess(), nil\n}", "func CreateJob(cmd *cobra.Command, args []string) error {\n\n\tclient, err := auth.GetClient()\n\tif err != nil {\n\t\treturn err\n\t}\n\t// Parse all flags\n\n\tvar allowSideEffectsDefault bool\n\tallowSideEffects := &allowSideEffectsDefault\n\terr = flags.ParseFlag(cmd.Flags(), \"allow-side-effects\", &allowSideEffects)\n\tif err != nil {\n\t\treturn fmt.Errorf(`error parsing \"allow-side-effects\": ` + err.Error())\n\t}\n\tvar collectEventSummaryDefault bool\n\tcollectEventSummary := &collectEventSummaryDefault\n\terr = flags.ParseFlag(cmd.Flags(), \"collect-event-summary\", &collectEventSummary)\n\tif err != nil {\n\t\treturn fmt.Errorf(`error parsing \"collect-event-summary\": ` + err.Error())\n\t}\n\tvar collectFieldSummaryDefault bool\n\tcollectFieldSummary := &collectFieldSummaryDefault\n\terr = flags.ParseFlag(cmd.Flags(), \"collect-field-summary\", &collectFieldSummary)\n\tif err != nil {\n\t\treturn fmt.Errorf(`error parsing \"collect-field-summary\": ` + err.Error())\n\t}\n\tvar collectTimeBucketsDefault bool\n\tcollectTimeBuckets := &collectTimeBucketsDefault\n\terr = flags.ParseFlag(cmd.Flags(), \"collect-time-buckets\", &collectTimeBuckets)\n\tif err != nil {\n\t\treturn fmt.Errorf(`error parsing \"collect-time-buckets\": ` + err.Error())\n\t}\n\tvar earliestDefault string\n\tearliest := &earliestDefault\n\terr = flags.ParseFlag(cmd.Flags(), \"earliest\", &earliest)\n\tif err != nil {\n\t\treturn fmt.Errorf(`error parsing \"earliest\": ` + err.Error())\n\t}\n\tvar enablePreviewDefault bool\n\tenablePreview := &enablePreviewDefault\n\terr = flags.ParseFlag(cmd.Flags(), \"enable-preview\", &enablePreview)\n\tif err != nil {\n\t\treturn fmt.Errorf(`error parsing \"enable-preview\": ` + err.Error())\n\t}\n\tvar extractAllFieldsDefault bool\n\textractAllFields := &extractAllFieldsDefault\n\terr = flags.ParseFlag(cmd.Flags(), \"extract-all-fields\", &extractAllFields)\n\tif err != nil {\n\t\treturn fmt.Errorf(`error parsing \"extract-all-fields\": ` + err.Error())\n\t}\n\tvar extractFieldsDefault string\n\textractFields := &extractFieldsDefault\n\terr = flags.ParseFlag(cmd.Flags(), \"extract-fields\", &extractFields)\n\tif err != nil {\n\t\treturn fmt.Errorf(`error parsing \"extract-fields\": ` + err.Error())\n\t}\n\tvar latestDefault string\n\tlatest := &latestDefault\n\terr = flags.ParseFlag(cmd.Flags(), \"latest\", &latest)\n\tif err != nil {\n\t\treturn fmt.Errorf(`error parsing \"latest\": ` + err.Error())\n\t}\n\tvar maxTimeDefault int32\n\tmaxTime := &maxTimeDefault\n\terr = flags.ParseFlag(cmd.Flags(), \"max-time\", &maxTime)\n\tif err != nil {\n\t\treturn fmt.Errorf(`error parsing \"max-time\": ` + err.Error())\n\t}\n\tvar messages []model.Message\n\terr = flags.ParseFlag(cmd.Flags(), \"messages\", &messages)\n\tif err != nil {\n\t\treturn fmt.Errorf(`error parsing \"messages\": ` + err.Error())\n\t}\n\tvar moduleDefault string\n\tmodule := &moduleDefault\n\terr = flags.ParseFlag(cmd.Flags(), \"module\", &module)\n\tif err != nil {\n\t\treturn fmt.Errorf(`error parsing \"module\": ` + err.Error())\n\t}\n\tvar query string\n\terr = flags.ParseFlag(cmd.Flags(), \"query\", &query)\n\tif err != nil {\n\t\treturn fmt.Errorf(`error parsing \"query\": ` + err.Error())\n\t}\n\tvar relativeTimeAnchorDefault string\n\trelativeTimeAnchor := &relativeTimeAnchorDefault\n\terr = flags.ParseFlag(cmd.Flags(), \"relative-time-anchor\", &relativeTimeAnchor)\n\tif err != nil {\n\t\treturn fmt.Errorf(`error parsing \"relative-time-anchor\": ` + err.Error())\n\t}\n\tvar requiredFreshnessDefault int32\n\trequiredFreshness := &requiredFreshnessDefault\n\terr = flags.ParseFlag(cmd.Flags(), \"required-freshness\", &requiredFreshness)\n\tif err != nil {\n\t\treturn fmt.Errorf(`error parsing \"required-freshness\": ` + err.Error())\n\t}\n\tvar statusDefault model.SearchStatus\n\tstatus := &statusDefault\n\terr = flags.ParseFlag(cmd.Flags(), \"status\", &status)\n\tif err != nil {\n\t\treturn fmt.Errorf(`error parsing \"status\": ` + err.Error())\n\t}\n\tvar timezone interface{}\n\terr = flags.ParseFlag(cmd.Flags(), \"timezone\", &timezone)\n\tif err != nil {\n\t\treturn fmt.Errorf(`error parsing \"timezone\": ` + err.Error())\n\t}\n\t// Form the request body\n\tgenerated_request_body := model.SearchJob{\n\n\t\tAllowSideEffects: allowSideEffects,\n\t\tCollectEventSummary: collectEventSummary,\n\t\tCollectFieldSummary: collectFieldSummary,\n\t\tCollectTimeBuckets: collectTimeBuckets,\n\t\tEnablePreview: enablePreview,\n\t\tExtractAllFields: extractAllFields,\n\t\tExtractFields: extractFields,\n\t\tMaxTime: maxTime,\n\t\tMessages: messages,\n\t\tModule: module,\n\t\tQuery: query,\n\t\tQueryParameters: &model.QueryParameters{\n\t\t\tEarliest: earliest,\n\t\t\tLatest: latest,\n\t\t\tRelativeTimeAnchor: relativeTimeAnchor,\n\t\t\tTimezone: timezone,\n\t\t},\n\t\tRequiredFreshness: requiredFreshness,\n\t\tStatus: status,\n\t}\n\n\t// Silence Usage\n\tcmd.SilenceUsage = true\n\n\tresp, err := client.SearchService.CreateJob(generated_request_body)\n\tif err != nil {\n\t\treturn err\n\t}\n\tjsonx.Pprint(cmd, resp)\n\treturn nil\n}", "func (c *PlatformGraphQLClient) CreateJob(ctx context.Context, tdoID string, isReprocessJob bool, tasks ...CreateJobTask) (*Job, error) {\n\treq := graphql.NewRequest(`\n\t\tmutation(\n\t\t\t$targetId: ID\n\t\t\t$isReprocessJob: Boolean\n\t\t\t$tasks: [CreateTask!]\n\t\t) {\n\t\t\tcreateJob(input: {\n\t\t\t\ttargetId: $targetId\n\t\t\t\tisReprocessJob: $isReprocessJob\n\t\t\t\ttasks: $tasks\n\t\t\t}) {\n\t\t\t\tid\n\t\t\t\tname\n\t\t\t\ttargetId\n\t\t\t\tstatus\n\t\t\t\ttasks {\n\t\t\t\t\trecords {\n\t\t\t\t\t\tid\n\t\t\t\t\t\tstatus\n\t\t\t\t\t\tengine {\n\t\t\t\t\t\t\tid\n\t\t\t\t\t\t\tname\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t`)\n\n\treq.Var(\"targetId\", tdoID)\n\treq.Var(\"isReprocessJob\", isReprocessJob)\n\treq.Var(\"tasks\", tasks)\n\n\tvar resp struct {\n\t\tResult *Job `json:\"createJob\"`\n\t}\n\treturn resp.Result, c.Run(ctx, req, &resp)\n}", "func (r *ReconcileDescheduler) createJob(descheduler *deschedulerv1alpha1.Descheduler) (*batch.Job, error) {\n\tactiveDeadline := int64(100)\n\tlog.Printf(\"Creating descheduler job\")\n\tjob := &batch.Job{\n\t\tTypeMeta: metav1.TypeMeta{\n\t\t\tKind: \"Job\",\n\t\t\tAPIVersion: batch.SchemeGroupVersion.String(),\n\t\t},\n\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\tName: descheduler.Name,\n\t\t\tNamespace: descheduler.Namespace,\n\t\t},\n\t\tSpec: batch.JobSpec{\n\t\t\tActiveDeadlineSeconds: &activeDeadline,\n\t\t\tTemplate: v1.PodTemplateSpec{\n\t\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\t\tName: \"descheduler-job-spec\",\n\t\t\t\t},\n\t\t\t\tSpec: v1.PodSpec{\n\t\t\t\t\tVolumes: []v1.Volume{{\n\t\t\t\t\t\tName: \"policy-volume\",\n\t\t\t\t\t\tVolumeSource: v1.VolumeSource{\n\t\t\t\t\t\t\tConfigMap: &v1.ConfigMapVolumeSource{\n\t\t\t\t\t\t\t\tLocalObjectReference: v1.LocalObjectReference{\n\t\t\t\t\t\t\t\t\tName: descheduler.Name,\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\tRestartPolicy: \"Never\",\n\t\t\t\t\tContainers: []v1.Container{{\n\t\t\t\t\t\tName: \"openshift-descheduler\",\n\t\t\t\t\t\tImage: \"registry.svc.ci.openshift.org/openshift/origin-v4.0:descheduler\", // TODO: Make this configurable too.\n\t\t\t\t\t\tPorts: []v1.ContainerPort{{ContainerPort: 80}},\n\t\t\t\t\t\tResources: v1.ResourceRequirements{\n\t\t\t\t\t\t\tLimits: v1.ResourceList{\n\t\t\t\t\t\t\t\tv1.ResourceCPU: resource.MustParse(\"100m\"),\n\t\t\t\t\t\t\t\tv1.ResourceMemory: resource.MustParse(\"500Mi\"),\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\tRequests: v1.ResourceList{\n\t\t\t\t\t\t\t\tv1.ResourceCPU: resource.MustParse(\"100m\"),\n\t\t\t\t\t\t\t\tv1.ResourceMemory: resource.MustParse(\"500Mi\"),\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tCommand: []string{\"/bin/descheduler\", \"--policy-config-file\", \"/policy-dir/policy.yaml\"},\n\t\t\t\t\t\tVolumeMounts: []v1.VolumeMount{{\n\t\t\t\t\t\t\tMountPath: \"/policy-dir\",\n\t\t\t\t\t\t\tName: \"policy-volume\",\n\t\t\t\t\t\t}},\n\t\t\t\t\t}},\n\t\t\t\t\tServiceAccountName: \"openshift-descheduler\", // TODO: This is hardcoded as of now, find a way to reference it from rbac.yaml.\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n\terr := controllerutil.SetControllerReference(descheduler, job, r.scheme)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"Error setting owner references %v\", err)\n\t}\n\treturn job, nil\n}", "func NewCreateJobCommand(p *config.KfParams) *cobra.Command {\n\tvar (\n\t\tresourceFlags utils.ResourceFlags\n\t\tschedule string\n\t\tconcurrencyPolicy string\n\t\tasync utils.AsyncFlags\n\t)\n\n\tcmd := &cobra.Command{\n\t\tUse: \"create-job APP_NAME JOB_NAME COMMAND\",\n\t\tShort: \"Create a Job on the App.\",\n\t\tExample: `kf create-job my-app my-job \"sleep 100\"`,\n\t\tArgs: cobra.ExactArgs(3),\n\t\tLong: `The create-job sub-command lets operators create a Job that can be run on a schedule or ad hoc.`,\n\t\tSilenceUsage: true,\n\t\tRunE: func(cmd *cobra.Command, args []string) error {\n\t\t\tctx := cmd.Context()\n\t\t\tif err := p.ValidateSpaceTargeted(); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\tappName := args[0]\n\t\t\tjobName := args[1]\n\t\t\tcommand := args[2]\n\n\t\t\tclient := client.Get(ctx)\n\n\t\t\tapp, err := client.KfV1alpha1().\n\t\t\t\tApps(p.Space).\n\t\t\t\tGet(ctx, appName, metav1.GetOptions{})\n\t\t\tif err != nil {\n\t\t\t\treturn fmt.Errorf(\"failed to get App: %s\", err)\n\t\t\t}\n\n\t\t\tdesiredTaskSchedule := &v1alpha1.TaskSchedule{\n\t\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\t\tName: jobName,\n\t\t\t\t\tNamespace: p.Space,\n\t\t\t\t\tOwnerReferences: []metav1.OwnerReference{\n\t\t\t\t\t\t*kmeta.NewControllerRef(app),\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tSpec: v1alpha1.TaskScheduleSpec{\n\t\t\t\t\tSchedule: placeholderCron,\n\t\t\t\t\tSuspend: true,\n\t\t\t\t\tConcurrencyPolicy: concurrencyPolicy,\n\t\t\t\t\tTaskTemplate: v1alpha1.TaskSpec{\n\t\t\t\t\t\tAppRef: corev1.LocalObjectReference{\n\t\t\t\t\t\t\tName: appName,\n\t\t\t\t\t\t},\n\t\t\t\t\t\tCPU: resourceFlags.CPU(),\n\t\t\t\t\t\tMemory: manifest.CFToSIUnits(resourceFlags.Memory()),\n\t\t\t\t\t\tDisk: manifest.CFToSIUnits(resourceFlags.Disk()),\n\t\t\t\t\t\tCommand: command,\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t}\n\n\t\t\tif schedule != \"\" {\n\t\t\t\tdesiredTaskSchedule.Spec.Schedule = schedule\n\t\t\t\tdesiredTaskSchedule.Spec.Suspend = false\n\t\t\t}\n\n\t\t\ttaskSchedule, err := client.KfV1alpha1().\n\t\t\t\tTaskSchedules(p.Space).\n\t\t\t\tCreate(ctx, desiredTaskSchedule, metav1.CreateOptions{})\n\t\t\tif err != nil {\n\t\t\t\treturn fmt.Errorf(\"failed to create Job: %s\", err)\n\t\t\t}\n\n\t\t\tlogging.FromContext(ctx).Infof(\"Job %s created.\", taskSchedule.Name)\n\n\t\t\treturn async.WaitFor(\n\t\t\t\tctx,\n\t\t\t\tcmd.OutOrStderr(),\n\t\t\t\t\"Waiting for Job to become ready\",\n\t\t\t\ttime.Second,\n\t\t\t\tfunc() (bool, error) {\n\t\t\t\t\tts, err := client.KfV1alpha1().\n\t\t\t\t\t\tTaskSchedules(p.Space).\n\t\t\t\t\t\tGet(ctx, jobName, metav1.GetOptions{})\n\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\treturn false, err\n\t\t\t\t\t}\n\t\t\t\t\treturn ts.Status.IsReady(), nil\n\t\t\t\t},\n\t\t\t)\n\t\t},\n\t}\n\n\tresourceFlags.Add(cmd)\n\tasync.Add(cmd)\n\n\t// The default is left as \"\" here to determine if the schedule flag was\n\t// provided. When not provided the schedule is defaulted to placeholderCron.\n\tcmd.Flags().StringVarP(\n\t\t&schedule,\n\t\t\"schedule\",\n\t\t\"s\",\n\t\t\"\",\n\t\t\"Cron schedule on which to execute the Job.\",\n\t)\n\n\tcmd.Flags().StringVarP(\n\t\t&concurrencyPolicy,\n\t\t\"concurrency-policy\",\n\t\t\"c\",\n\t\t\"Always\",\n\t\t\"Specifies how to treat concurrent executions of a Job: Always (default), Replace, or Forbid.\",\n\t)\n\n\treturn cmd\n}", "func NewJobProposalResource(jp feeds.JobProposal) *JobProposalResource {\n\tres := &JobProposalResource{\n\t\tJAID: NewJAIDInt64(jp.ID),\n\t\tStatus: jp.Status,\n\t\tSpec: jp.Spec,\n\t\tFeedsManagerID: strconv.FormatInt(jp.FeedsManagerID, 10),\n\t\tMultiaddrs: jp.Multiaddrs,\n\t\tCreatedAt: jp.CreatedAt,\n\t}\n\n\tif jp.ExternalJobID.Valid {\n\t\tuuid := jp.ExternalJobID.UUID.String()\n\t\tres.ExternalJobID = &uuid\n\t}\n\n\treturn res\n}", "func (r *jobs) Create(txID string, job *dbmodels.Job) (*dbmodels.Job, error) {\n\trc := r.db.Database().Collection(viper.GetString(\"db.jobs_collection\"))\n\tctx, cancel := context.WithTimeout(\n\t\tcontext.Background(),\n\t\ttime.Duration(viper.GetInt(\"db.query_timeout_in_sec\"))*time.Second,\n\t)\n\tdefer cancel()\n\n\tinsertResult, err := rc.InsertOne(ctx, job)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"Failed to create organization with error %v\", err)\n\t}\n\n\tjob.ID = insertResult.InsertedID.(primitive.ObjectID)\n\treturn job, nil\n}", "func NewResource() (Resource, error) {\n\t// Get cluster config\n\tconfig, err := rest.InClusterConfig()\n\tif err != nil {\n\t\tlog.Fatalf(\"Error getting in cluster config: %s\", err.Error())\n\t\treturn Resource{}, err\n\t}\n\n\t// Setup event source client\n\teventSrcClient, err := eventsrcclientset.NewForConfig(config)\n\tif err != nil {\n\t\tlog.Printf(\"Error building event source client: %s\", err.Error())\n\t\treturn Resource{}, err\n\t}\n\n\t// Setup tektoncd client\n\ttektonClient, err := tektoncdclientset.NewForConfig(config)\n\tif err != nil {\n\t\tlog.Printf(\"Error building tekton clientset: %s\", err.Error())\n\t\treturn Resource{}, err\n\t}\n\n\t// Setup k8s client\n\tk8sClient, err := k8sclientset.NewForConfig(config)\n\tif err != nil {\n\t\tlog.Printf(\"Error building k8s clientset: %s\", err.Error())\n\t\treturn Resource{}, err\n\t}\n\n\tr := Resource{\n\t\tK8sClient: k8sClient,\n\t\tTektonClient: tektonClient,\n\t\tEventSrcClient: eventSrcClient,\n\t}\n\treturn r, nil\n}", "func New(i interface{}) *Resource {\n\tr := &Resource{}\n\tr.geth = mustMakeRpc(i, \"Get\")\n\tr.posth = mustMakeRpc(i, \"Post\")\n\tr.puth = mustMakeRpc(i, \"Put\")\n\tr.deleteh = mustMakeRpc(i, \"Delete\")\n\n\t// println(\"[debug]\", r.geth, r.posth, r.puth, r.deleteh)\n\treturn r\n}", "func (client JobClient) CreateOrUpdateResponder(resp *http.Response) (result JobResourceDescription, err error) {\n err = autorest.Respond(\n resp,\n azure.WithErrorUnlessStatusCode(http.StatusOK,http.StatusCreated,http.StatusAccepted),\n autorest.ByUnmarshallingJSON(&result),\n autorest.ByClosing())\n result.Response = autorest.Response{Response: resp}\n return\n }", "func (p *AuroraSchedulerManagerClient) CreateJob(ctx context.Context, description *JobConfiguration) (r *Response, err error) {\n var _args132 AuroraSchedulerManagerCreateJobArgs\n _args132.Description = description\n var _result133 AuroraSchedulerManagerCreateJobResult\n if err = p.Client_().Call(ctx, \"createJob\", &_args132, &_result133); err != nil {\n return\n }\n return _result133.GetSuccess(), nil\n}", "func (c *jobsRESTClient) CreateJobOperation(name string) *CreateJobOperation {\n\toverride := fmt.Sprintf(\"/v2/%s\", name)\n\treturn &CreateJobOperation{\n\t\tlro: longrunning.InternalNewOperation(*c.LROClient, &longrunningpb.Operation{Name: name}),\n\t\tpollPath: override,\n\t}\n}", "func HandleAddJob(cache job.JobCache, defaultOwner string, disableLocalJobs bool) func(http.ResponseWriter,\n\t*http.Request) {\n\treturn func(w http.ResponseWriter, r *http.Request) {\n\t\tnewJob, err := unmarshalNewJob(r)\n\t\tif err != nil {\n\t\t\terrorEncodeJSON(err, http.StatusBadRequest, w)\n\t\t\treturn\n\t\t}\n\n\t\tif disableLocalJobs && (newJob.JobType == job.LocalJob) {\n\t\t\terrorEncodeJSON(errors.New(\"local jobs are disabled\"), http.StatusForbidden, w)\n\t\t\treturn\n\t\t}\n\n\t\tif defaultOwner != \"\" && newJob.Owner == \"\" {\n\t\t\tnewJob.Owner = defaultOwner\n\t\t}\n\n\t\tif newJob.JobType == job.RemoteJob {\n\t\t\tisValid, err := validateJob(r, newJob)\n\t\t\tif err != nil {\n\t\t\t\tlog.Errorf(\"Unable to validate job %s due to %v\", newJob.Name, err)\n\t\t\t\tw.WriteHeader(http.StatusForbidden)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tif !isValid {\n\t\t\t\tlog.Errorf(\"Validation failed for job %s\", newJob.Name)\n\t\t\t\tw.WriteHeader(http.StatusForbidden)\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\n\t\terr = newJob.Init(cache)\n\t\tif err != nil {\n\t\t\terrStr := fmt.Sprintf(\"Error occurred when initializing the job: %+v\", newJob)\n\t\t\tlog.Errorf(errStr+\": %s\", err)\n\t\t\terrorEncodeJSON(errors.New(errStr), http.StatusBadRequest, w)\n\t\t\treturn\n\t\t}\n\n\t\tresp := &AddJobResponse{\n\t\t\tId: newJob.Id,\n\t\t}\n\n\t\tw.Header().Set(contentType, jsonContentType)\n\t\tw.WriteHeader(http.StatusCreated)\n\t\tif err := json.NewEncoder(w).Encode(resp); err != nil {\n\t\t\tlog.Errorf(\"Error occurred when marshaling response: %s\", err)\n\t\t\treturn\n\t\t}\n\t}\n}", "func createJobWithCustomAttributes(w io.Writer, projectID, companyID, jobTitle string) (*talentpb.Job, error) {\n\tctx := context.Background()\n\n\t// Initialize a job service client.\n\tc, err := talent.NewJobClient(ctx)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"talent.NewJobClient: %w\", err)\n\t}\n\tdefer c.Close()\n\n\t// requisitionID shoud be the unique ID in your system\n\trequisitionID := fmt.Sprintf(\"job-with-custom-attribute-%s\", uuid.Must(uuid.NewV4()).String())\n\tjobToCreate := &talentpb.Job{\n\t\tCompany: fmt.Sprintf(\"projects/%s/companies/%s\", projectID, companyID),\n\t\tRequisitionId: requisitionID,\n\t\tTitle: jobTitle,\n\t\tApplicationInfo: &talentpb.Job_ApplicationInfo{\n\t\t\tUris: []string{\"https://googlesample.com/career\"},\n\t\t},\n\t\tDescription: \"Design, devolop, test, deploy, maintain and improve software.\",\n\t\tLanguageCode: \"en-US\",\n\t\tPromotionValue: 2,\n\t\tEmploymentTypes: []talentpb.EmploymentType{talentpb.EmploymentType_FULL_TIME},\n\t\tAddresses: []string{\"Mountain View, CA\"},\n\t\tCustomAttributes: map[string]*talentpb.CustomAttribute{\n\t\t\t\"someFieldString\": {\n\t\t\t\tFilterable: true,\n\t\t\t\tStringValues: []string{\"someStrVal\"},\n\t\t\t},\n\t\t\t\"someFieldLong\": {\n\t\t\t\tFilterable: true,\n\t\t\t\tLongValues: []int64{900},\n\t\t\t},\n\t\t},\n\t\tCompensationInfo: &talentpb.CompensationInfo{\n\t\t\tEntries: []*talentpb.CompensationInfo_CompensationEntry{\n\t\t\t\t{\n\t\t\t\t\tType: talentpb.CompensationInfo_BASE,\n\t\t\t\t\tUnit: talentpb.CompensationInfo_HOURLY,\n\t\t\t\t\tCompensationAmount: &talentpb.CompensationInfo_CompensationEntry_Amount{\n\t\t\t\t\t\tAmount: &money.Money{\n\t\t\t\t\t\t\tCurrencyCode: \"USD\",\n\t\t\t\t\t\t\tUnits: 1,\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n\n\t// Construct a createJob request.\n\treq := &talentpb.CreateJobRequest{\n\t\tParent: fmt.Sprintf(\"projects/%s\", projectID),\n\t\tJob: jobToCreate,\n\t}\n\n\tresp, err := c.CreateJob(ctx, req)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"CreateJob: %w\", err)\n\t}\n\n\tfmt.Fprintf(w, \"Created job with custom attributres: %q\\n\", resp.GetName())\n\tfmt.Fprintf(w, \"Custom long field has value: %v\\n\", resp.GetCustomAttributes()[\"someFieldLong\"].GetLongValues())\n\n\treturn resp, nil\n}", "func (c *Client) JobFromID(ctx context.Context, id string) (*Job, error) {\n\treturn c.JobFromProject(ctx, c.projectID, id, c.Location)\n}", "func (cli *Client) CreateJob(pipelineName, sourceKey, targetKey, presetName string) (*api.CreateJobResponse, error) {\n\treturn api.CreateJob(cli, pipelineName, sourceKey, targetKey, presetName)\n}", "func AddJob(projectKey, pipelineName string, j *Job) error {\n\turi := fmt.Sprintf(\"/project/%s/pipeline/%s/stage/%d/job\", projectKey, pipelineName, j.PipelineStageID)\n\n\tdata, err := json.Marshal(j)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t_, code, err := Request(\"POST\", uri, data)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif code >= 300 {\n\t\treturn fmt.Errorf(\"HTTP %d\", code)\n\t}\n\n\treturn nil\n}", "func NewResource(Params ...interface{}) *Resource {\n\tif len(Params) == 0 {\n\t\treturn nil\n\t}\n\tvar URL = toolbox.AsString(Params[0])\n\tURL = url.Normalize(URL, file.Scheme)\n\n\tvar credential string\n\tif len(Params) > 1 {\n\t\tcredential = toolbox.AsString(Params[1])\n\t}\n\treturn &Resource{\n\t\tURL: URL,\n\t\tCredentials: credential,\n\t}\n}", "func (j *JobIDConfig) createJobRef(c *Client) *bq.JobReference {\n\tprojectID := j.ProjectID\n\tif projectID == \"\" { // Use Client.ProjectID as a default.\n\t\tprojectID = c.projectID\n\t}\n\tloc := j.Location\n\tif loc == \"\" { // Use Client.Location as a default.\n\t\tloc = c.Location\n\t}\n\tjr := &bq.JobReference{ProjectId: projectID, Location: loc}\n\tif j.JobID == \"\" {\n\t\tjr.JobId = randomIDFn()\n\t} else if j.AddJobIDSuffix {\n\t\tjr.JobId = j.JobID + \"-\" + randomIDFn()\n\t} else {\n\t\tjr.JobId = j.JobID\n\t}\n\treturn jr\n}", "func (c *JobClient) Create() *JobCreate {\n\tmutation := newJobMutation(c.config, OpCreate)\n\treturn &JobCreate{config: c.config, hooks: c.Hooks(), mutation: mutation}\n}", "func NewJobHandler(js server.JobService, n int, lg *logrus.Logger) *JobHandler {\n\treturn &JobHandler{\n\t\tLogger: lg,\n\t\tJobService: js,\n\t\tQueue: jobqueue.NewJobQueue(js, n, func(j *server.Job) {\n\t\t\tjs.UpdateJob(j)\n\t\t}, lg),\n\t}\n}", "func newScheduledJobs(c *BatchClient, namespace string) *scheduledJobs {\n\treturn &scheduledJobs{c, namespace}\n}", "func NewEducationSubmissionResource()(*EducationSubmissionResource) {\n m := &EducationSubmissionResource{\n Entity: *NewEntity(),\n }\n return m\n}", "func New(name string) *Resource {\n\n\tlastId := (int64)(len(resourceMap))\n\n\treturn &Resource{\n\t\tId: lastId + 1,\n\t\tName: name,\n\t\tStatus: true,\n\t\tCreatedAt: time.Now(),\n\t\tUpdatedAt: time.Now(),\n\t}\n}", "func NewCfnJob(scope constructs.Construct, id *string, props *CfnJobProps) CfnJob {\n\t_init_.Initialize()\n\n\tj := jsiiProxy_CfnJob{}\n\n\t_jsii_.Create(\n\t\t\"aws-cdk-lib.aws_glue.CfnJob\",\n\t\t[]interface{}{scope, id, props},\n\t\t&j,\n\t)\n\n\treturn &j\n}" ]
[ "0.71115786", "0.6927896", "0.68013906", "0.6778276", "0.67617893", "0.6550171", "0.6489", "0.63859403", "0.63677716", "0.6361811", "0.63571984", "0.63510984", "0.634167", "0.6319448", "0.63165826", "0.62932074", "0.6272358", "0.62598807", "0.6237205", "0.6236662", "0.62259847", "0.62196493", "0.6186681", "0.611666", "0.61102563", "0.6086934", "0.6081527", "0.6042167", "0.6000776", "0.5998787", "0.59928304", "0.59910905", "0.5990378", "0.5979861", "0.5975571", "0.59527236", "0.5951335", "0.59475213", "0.5941112", "0.59260094", "0.591973", "0.58784294", "0.586709", "0.58526194", "0.5838112", "0.58093673", "0.5799073", "0.5778585", "0.5769108", "0.5757702", "0.5750888", "0.5735112", "0.57274204", "0.5715492", "0.5706453", "0.56936604", "0.56932724", "0.569159", "0.56795555", "0.5676714", "0.566075", "0.56195796", "0.55897814", "0.5585792", "0.55857664", "0.5575672", "0.5571097", "0.5567372", "0.55660063", "0.5554598", "0.5548403", "0.5543121", "0.5540335", "0.55355734", "0.55328584", "0.5522142", "0.5521706", "0.5520703", "0.5518784", "0.55099046", "0.54864806", "0.5474512", "0.54710865", "0.54656434", "0.5465049", "0.546006", "0.5459418", "0.5453703", "0.54475385", "0.5446859", "0.5445093", "0.5436502", "0.54347545", "0.54215765", "0.54158175", "0.5411537", "0.5409832", "0.5406941", "0.54048854", "0.5404604" ]
0.7175808
0
GetName implements the api2go EntityNamer interface
func (r JobResource) GetName() string { return "jobs" }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (x ExternalEntity) GetName() string {\n\treturn x.Name\n}", "func (x GenericEntity) GetName() string {\n\treturn x.Name\n}", "func (x DashboardEntity) GetName() string {\n\treturn x.Name\n}", "func (x ApmDatabaseInstanceEntity) GetName() string {\n\treturn x.Name\n}", "func (x ApmApplicationEntity) GetName() string {\n\treturn x.Name\n}", "func (o EntityRecognizerOutput) Name() pulumi.StringOutput {\n\treturn o.ApplyT(func(v *EntityRecognizer) pulumi.StringOutput { return v.Name }).(pulumi.StringOutput)\n}", "func (x GenericInfrastructureEntity) GetName() string {\n\treturn x.Name\n}", "func (x ApmExternalServiceEntity) GetName() string {\n\treturn x.Name\n}", "func (x BrowserApplicationEntity) GetName() string {\n\treturn x.Name\n}", "func (x MobileApplicationEntity) GetName() string {\n\treturn x.Name\n}", "func (x InfrastructureHostEntity) GetName() string {\n\treturn x.Name\n}", "func (x SyntheticMonitorEntity) GetName() string {\n\treturn x.Name\n}", "func (x UnavailableEntity) GetName() string {\n\treturn x.Name\n}", "func (o *GovChainMetadata) GetName() string {\n\tif o == nil {\n\t\tvar ret string\n\t\treturn ret\n\t}\n\n\treturn o.Name\n}", "func (o *DisplayInfo) GetName() string {\n\tif o == nil {\n\t\tvar ret string\n\t\treturn ret\n\t}\n\n\treturn o.Name\n}", "func (o *ARVRInterface) GetName() gdnative.String {\n\t//log.Println(\"Calling ARVRInterface.GetName()\")\n\n\t// Build out the method's arguments\n\tptrArguments := make([]gdnative.Pointer, 0, 0)\n\n\t// Get the method bind\n\tmethodBind := gdnative.NewMethodBind(\"ARVRInterface\", \"get_name\")\n\n\t// Call the parent method.\n\t// String\n\tretPtr := gdnative.NewEmptyString()\n\tgdnative.MethodBindPtrCall(methodBind, o.GetBaseObject(), ptrArguments, retPtr)\n\n\t// If we have a return type, convert it from a pointer into its actual object.\n\tret := gdnative.NewStringFromPointer(retPtr)\n\treturn ret\n}", "func (x InfrastructureAwsLambdaFunctionEntity) GetName() string {\n\treturn x.Name\n}", "func (m *metadata) GetName() string {\n\treturn m.name\n}", "func (x WorkloadEntity) GetName() string {\n\treturn x.Name\n}", "func (n Normalizer) GetName() string {\n\treturn n.name\n}", "func (x SecureCredentialEntity) GetName() string {\n\treturn x.Name\n}", "func (i *Identity) GetName() string {\n\treturn i.Name\n}", "func (e *Entry) GetName() string {\n\tif len(e.NameRaw) > 0 {\n\t\treturn string(e.NameRaw)\n\t}\n\treturn e.Name\n}", "func (m *Metadata) GetName() string {\n\treturn m.Name\n}", "func (o *ContentProvider2) GetName() string {\n\tif o == nil {\n\t\tvar ret string\n\t\treturn ret\n\t}\n\n\treturn o.Name\n}", "func (Tellurium) GetName() string {\n\treturn \"Tellurium\"\n}", "func (o *RelatedAssetSerializerWithPermission) GetName() string {\n\tif o == nil {\n\t\tvar ret string\n\t\treturn ret\n\t}\n\n\treturn o.Name\n}", "func (meta *Metadata) GetName() string {\n\treturn meta.Name\n}", "func (a Entity) Name() []string {\n\tnewName := make([]string, len(a.name))\n\tcopy(newName, a.name)\n\n\treturn newName\n}", "func (_ EntityHeadLook) Name() string { return \"*EntityHeadLook\" }", "func (a AliasedName) GetName() string { return a.Name }", "func (e *EnumMatcher) GetName() string { return e.Name }", "func (hs100 *Hs100) GetName() (string, error) {\n\tresp, err := hs100.commandSender.SendCommand(hs100.Address, isOnCommand)\n\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tname, err := name(resp)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\treturn name, nil\n}", "func (o *CreateInstance) GetName() string {\n\tif o == nil {\n\t\tvar ret string\n\t\treturn ret\n\t}\n\n\treturn o.Name\n}", "func (x GenericEntityOutline) GetName() string {\n\treturn x.Name\n}", "func (m *BookingNamedEntity) GetDisplayName()(*string) {\n val, err := m.GetBackingStore().Get(\"displayName\")\n if err != nil {\n panic(err)\n }\n if val != nil {\n return val.(*string)\n }\n return nil\n}", "func (b *Being) GetName() string {\n\treturn b.Name.Display\n}", "func (x ApmDatabaseInstanceEntityOutline) GetName() string {\n\treturn x.Name\n}", "func (o *Ga4ghChemotherapy) GetName() string {\n\tif o == nil || o.Name == nil {\n\t\tvar ret string\n\t\treturn ret\n\t}\n\treturn *o.Name\n}", "func (img Image) GetName() string {\n\treturn img.Name\n}", "func (o *CreateTemplateRequestEntity) GetName() string {\n\tif o == nil || o.Name == nil {\n\t\tvar ret string\n\t\treturn ret\n\t}\n\treturn *o.Name\n}", "func (obj *ObjectBase) GetName() string {\n\treturn obj.name\n}", "func (s *AppServerV3) GetName() string {\n\treturn s.Metadata.Name\n}", "func (x ThirdPartyServiceEntity) GetName() string {\n\treturn x.Name\n}", "func (m *Matcher) GetName() string {\n\treturn m.name\n}", "func (o *TokenCard) GetName() string {\n\tif o == nil {\n\t\tvar ret string\n\t\treturn ret\n\t}\n\n\treturn o.Name\n}", "func (o *ContentProviderReadDetailed) GetName() string {\n\tif o == nil {\n\t\tvar ret string\n\t\treturn ret\n\t}\n\n\treturn o.Name\n}", "func (x ExternalEntityOutline) GetName() string {\n\treturn x.Name\n}", "func (o *Ga4ghTumourboard) GetName() string {\n\tif o == nil || o.Name == nil {\n\t\tvar ret string\n\t\treturn ret\n\t}\n\treturn *o.Name\n}", "func (o *Content) GetName() string {\n\tif o == nil {\n\t\tvar ret string\n\t\treturn ret\n\t}\n\n\treturn o.Name\n}", "func (inst *InstTrunc) GetName() string {\n\treturn inst.Name\n}", "func (x ApmApplicationEntityOutline) GetName() string {\n\treturn x.Name\n}", "func (x GenericInfrastructureEntityOutline) GetName() string {\n\treturn x.Name\n}", "func (z *Zombie) GetName() string {\n\treturn z.name\n}", "func (b *Base) GetName() string {\n\treturn b.Name\n}", "func (b *Base) GetName() string {\n\treturn b.Name\n}", "func (o *OIDC) GetName() string {\n\treturn o.Name\n}", "func (t *EntityType) Name() string {\n\treturn t.TypeName\n}", "func (o *AuthorDto) GetName() string {\n\tif o == nil || o.Name == nil {\n\t\tvar ret string\n\t\treturn ret\n\t}\n\treturn *o.Name\n}", "func (inst *InstSExt) GetName() string {\n\treturn inst.Name\n}", "func (r Roster) GetName(i int) string {\n\treturn r[i].name\n}", "func (r *Member) GetName() string {\n\tvar args [0]interface{}\n\n\tvar argsSerialized []byte\n\n\terr := proxyctx.Current.Serialize(args, &argsSerialized)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\tres, err := proxyctx.Current.RouteCall(r.Reference, true, \"GetName\", argsSerialized)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\tret := [1]interface{}{}\n\tvar ret0 string\n\tret[0] = &ret0\n\n\terr = proxyctx.Current.Deserialize(res, &ret)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\treturn ret0\n}", "func GetName() string {\n\n\tvar name string\n\n\t// Schickt get Request für die URL\n\tres, error := http.Get(PageURL)\n\n\tif error != nil {\n\t\tfmt.Println(error)\n\t\tname = \"Es ist kein Name angegeben.\"\n\t} else {\n\n\t\t// erstellt einen Tokenizer aus dem Response Body\n\t\tpage := html.NewTokenizer(res.Body)\n\n\t\tfound := false\n\n\t\tfor !found {\n\t\t\t_ = page.Next()\n\t\t\ttoken := page.Token()\n\n\t\t\tif token.Type == html.ErrorToken {\n\t\t\t\tbreak\n\t\t\t}\n\n\t\t\t// prüft ob der Token ein Titel ist\n\t\t\tif token.DataAtom == atom.Title {\n\t\t\t\t_ = page.Next()\n\n\t\t\t\t// Inhalt vom nächsten Token speichern -> Inhalt vom Titel\n\t\t\t\ttoken := page.Token()\n\t\t\t\tname = token.String()\n\n\t\t\t\t// gefunden -> zum Ende Springen\n\t\t\t\tfound = true\n\n\t\t\t\t// Kein Titel gefunden -> Default Wert speichern\n\t\t\t} else {\n\t\t\t\tname = \"Es ist kein Name angegeben.\"\n\t\t\t}\n\t\t}\n\t}\n\n\treturn name\n}", "func (o AiFeatureStoreEntityTypeFeatureOutput) Name() pulumi.StringOutput {\n\treturn o.ApplyT(func(v *AiFeatureStoreEntityTypeFeature) pulumi.StringOutput { return v.Name }).(pulumi.StringOutput)\n}", "func (p *Person) GetName() string {\n\treturn p.name\n}", "func (e *Enum) GetName() string { return e.Name }", "func (o *IncidentTeamResponseAttributes) GetName() string {\n\tif o == nil || o.Name == nil {\n\t\tvar ret string\n\t\treturn ret\n\t}\n\treturn *o.Name\n}", "func (s *DatabaseServerV3) GetName() string {\n\treturn s.Metadata.Name\n}", "func (Lawrencium) GetName() string {\n\treturn \"Lawrencium\"\n}", "func (t *Team) GetName() string {\n\tif t == nil || t.Name == nil {\n\t\treturn \"\"\n\t}\n\treturn *t.Name\n}", "func (x DashboardEntityOutline) GetName() string {\n\treturn x.Name\n}", "func (e *EnumEntry) GetName() string { return e.Name }", "func (o ApiOutput) Name() pulumi.StringOutput {\n\treturn o.ApplyT(func(v *Api) pulumi.StringOutput { return v.Name }).(pulumi.StringOutput)\n}", "func GetName() string {\n\treturn newX2ethereum().GetName()\n}", "func GetName(node *yaml.RNode, path string) string {\n\treturn GetStringField(node, path, \"metadata\", \"name\")\n}", "func (t *SentryTaggedStruct) GetName() string {\n\treturn \"\"\n}", "func (o *IamUserAuthorization) GetName() string {\n\tif o == nil {\n\t\tvar ret string\n\t\treturn ret\n\t}\n\n\treturn o.Name\n}", "func (o *EquipmentBaseSensor) GetName() string {\n\tif o == nil || o.Name == nil {\n\t\tvar ret string\n\t\treturn ret\n\t}\n\treturn *o.Name\n}", "func (arg1 *UConverter) GetName(arg2 *UErrorCode) string", "func (api *API) GetName() string {\n\treturn \"api\"\n}", "func (b *Base) GetName() string {\n\tpanic(\"must override Name() on Base model\")\n\treturn \"base\"\n}", "func ExamplePerson_GetName() {\n\tperson := NewPerson(\"bob\")\n\tfmt.Println(person.GetName())\n\t// Output: bob\n}", "func (s *FindImageFromTagsStage) GetName() string {\n\treturn s.Name\n}", "func (_MonsterOwnership *MonsterOwnershipCaller) Name(opts *bind.CallOpts) (string, error) {\n\tvar (\n\t\tret0 = new(string)\n\t)\n\tout := ret0\n\terr := _MonsterOwnership.contract.Call(opts, out, \"name\")\n\treturn *ret0, err\n}", "func (o *RackUnitPersonality) GetName() string {\n\tif o == nil || o.Name == nil {\n\t\tvar ret string\n\t\treturn ret\n\t}\n\treturn *o.Name\n}", "func (o *Platform) GetName() string {\n\tif o == nil {\n\t\tvar ret string\n\t\treturn ret\n\t}\n\n\treturn o.Name\n}", "func (a *AnnotationsFile) GetName() string {\n\tif a.Annotations.PackageName != \"\" {\n\t\treturn a.Annotations.PackageName\n\t}\n\treturn \"\"\n}", "func (o ApiOperationResponseRepresentationExampleOutput) Name() pulumi.StringOutput {\n\treturn o.ApplyT(func(v ApiOperationResponseRepresentationExample) string { return v.Name }).(pulumi.StringOutput)\n}", "func (inst *InstBitCast) GetName() string {\n\treturn inst.Name\n}", "func (c *AmaraProvider) GetName() string {\n\treturn \"amara\"\n}", "func (o *View) GetName() string {\n\tif o == nil {\n\t\tvar ret string\n\t\treturn ret\n\t}\n\n\treturn o.Name\n}", "func (t BaseTechnique) GetName() string {\n\treturn t.Name\n}", "func (o *EmbeddedUnitModel) GetName() string {\n\tif o == nil || o.Name == nil {\n\t\tvar ret string\n\t\treturn ret\n\t}\n\treturn *o.Name\n}", "func (o *Tag) GetName() string {\n\tif o == nil || o.Name == nil {\n\t\tvar ret string\n\t\treturn ret\n\t}\n\treturn *o.Name\n}", "func (m *Resource) GetName() string {\n\tif m != nil {\n\t\treturn m.Name\n\t}\n\treturn \"\"\n}", "func (x ApmExternalServiceEntityOutline) GetName() string {\n\treturn x.Name\n}", "func (o ApiOperationRequestRepresentationExampleOutput) Name() pulumi.StringOutput {\n\treturn o.ApplyT(func(v ApiOperationRequestRepresentationExample) string { return v.Name }).(pulumi.StringOutput)\n}", "func (o *SLOCorrectionResponseAttributesModifier) GetName() string {\n\tif o == nil || o.Name == nil {\n\t\tvar ret string\n\t\treturn ret\n\t}\n\treturn *o.Name\n}", "func (x BrowserApplicationEntityOutline) GetName() string {\n\treturn x.Name\n}", "func (m *ProjectionMapping) GetName() string {\n\treturn m.Name\n}", "func (m Metadata) Name() string {\n\tname, ok := m[\"name\"]\n\tif !ok {\n\t\treturn \"\"\n\t}\n\treturn name.(string)\n}" ]
[ "0.72349846", "0.72203386", "0.7149104", "0.7125047", "0.7036643", "0.6993078", "0.6991599", "0.68841416", "0.686844", "0.6778971", "0.67610085", "0.6752631", "0.67306346", "0.6612504", "0.6607173", "0.6597457", "0.6588913", "0.65749925", "0.6517986", "0.65003", "0.64853036", "0.64834183", "0.64812946", "0.6457664", "0.6455576", "0.6453631", "0.64519376", "0.6443313", "0.64357996", "0.6407663", "0.6372782", "0.6365157", "0.6365084", "0.6349616", "0.6346033", "0.6343223", "0.6335851", "0.6318998", "0.63035077", "0.6287528", "0.628472", "0.6279212", "0.62782", "0.6269534", "0.62596", "0.625955", "0.62574494", "0.6255624", "0.6241621", "0.6237596", "0.6235173", "0.62168413", "0.62153834", "0.62013566", "0.6193693", "0.6193693", "0.6192137", "0.61909467", "0.61876607", "0.6181524", "0.6181359", "0.61703676", "0.6166667", "0.6159205", "0.61576015", "0.61539733", "0.61507547", "0.61492276", "0.6141577", "0.61349297", "0.61348593", "0.6132782", "0.61313087", "0.6126239", "0.6124425", "0.6117747", "0.6116463", "0.6114115", "0.61118066", "0.6111348", "0.61105216", "0.6101175", "0.6093746", "0.60933626", "0.609137", "0.60808176", "0.60723233", "0.6069911", "0.60683787", "0.6068348", "0.6067701", "0.6064258", "0.60642177", "0.605736", "0.60544705", "0.60544163", "0.6052384", "0.6050646", "0.604884", "0.6047938", "0.60475945" ]
0.0
-1
WithGroupIDConsumerOption provides an option to modify the GroupID for a consumer Group
func WithGroupIDConsumerOption(groupID string) ConsumerOption { return func(c *Consumer) { c.config.GroupID = groupID } }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func WithConsumerGroupID(groupID string) ConfigOpt {\n\treturn func(c *kafkalib.ConfigMap) {\n\t\t_ = c.SetKey(\"group.id\", groupID)\n\t}\n}", "func ConsumerSetGroupID(groupID string) model.Option {\n\treturn model.FuncOption(func(d *model.Dispatcher) { d.ConsumerGroupID = groupID })\n}", "func DeliverGroup(g string) ConsumerOption {\n\treturn func(o *api.ConsumerConfig) error {\n\t\to.DeliverGroup = g\n\t\treturn nil\n\t}\n}", "func applyServerGroupID(opts servers.CreateOptsBuilder, serverGroupID string) servers.CreateOptsBuilder {\n\tif serverGroupID != \"\" {\n\t\treturn schedulerhints.CreateOptsExt{\n\t\t\tCreateOptsBuilder: opts,\n\t\t\tSchedulerHints: schedulerhints.SchedulerHints{\n\t\t\t\tGroup: serverGroupID,\n\t\t\t},\n\t\t}\n\t}\n\treturn opts\n}", "func (c ConsumerConfig) Apply(kafkaConf *kafkalib.ConfigMap) {\n\tif id := c.GroupID; id != \"\" {\n\t\t_ = kafkaConf.SetKey(\"group.id\", id)\n\t}\n}", "func WithGroup(group guid.GUID) ProviderOpt {\n\treturn func(opts *providerOpts) {\n\t\topts.group = group\n\t}\n}", "func ConsumerID(id string) ConsumerOption {\n\treturn func(o *ConsumerOptions) error {\n\t\tif id == \"\" {\n\t\t\treturn errors.New(\"invalid consumer id\")\n\t\t}\n\t\to.ConsumerID = id\n\t\treturn nil\n\t}\n}", "func NewDeleteConsumerGroupCommand(f *factory.Factory) *cobra.Command {\n\topts := &Options{\n\t\tConnection: f.Connection,\n\t\tCfgHandler: f.CfgHandler,\n\t\tIO: f.IOStreams,\n\t\tLogger: f.Logger,\n\t\tlocalizer: f.Localizer,\n\t}\n\n\tcmd := &cobra.Command{\n\t\tUse: opts.localizer.LocalizeByID(\"kafka.consumerGroup.delete.cmd.use\"),\n\t\tShort: opts.localizer.LocalizeByID(\"kafka.consumerGroup.delete.cmd.shortDescription\"),\n\t\tLong: opts.localizer.LocalizeByID(\"kafka.consumerGroup.delete.cmd.longDescription\"),\n\t\tExample: opts.localizer.LocalizeByID(\"kafka.consumerGroup.delete.cmd.example\"),\n\t\tArgs: cobra.NoArgs,\n\t\tRunE: func(cmd *cobra.Command, args []string) (err error) {\n\t\t\tif opts.kafkaID != \"\" {\n\t\t\t\treturn runCmd(opts)\n\t\t\t}\n\n\t\t\tif !f.CfgHandler.Cfg.HasKafka() {\n\t\t\t\treturn errors.New(opts.localizer.LocalizeByID(\"kafka.consumerGroup.common.error.noKafkaSelected\"))\n\t\t\t}\n\n\t\t\topts.kafkaID = opts.CfgHandler.Cfg.Services.Kafka.ClusterID\n\n\t\t\treturn runCmd(opts)\n\t\t},\n\t}\n\n\topts.localizer.LocalizeByID(\"kafka.consumerGroup.common.flag.id.description\", localize.NewEntry(\"Action\", \"delete\"))\n\tcmd.Flags().BoolVarP(&opts.skipConfirm, \"yes\", \"y\", false, opts.localizer.LocalizeByID(\"kafka.consumerGroup.delete.flag.yes.description\"))\n\tcmd.Flags().StringVar(&opts.id, \"id\", \"\", opts.localizer.LocalizeByID(\"kafka.consumerGroup.common.flag.id.description\", localize.NewEntry(\"Action\", \"delete\")))\n\t_ = cmd.MarkFlagRequired(\"id\")\n\n\t// flag based completions for ID\n\t_ = cmd.RegisterFlagCompletionFunc(\"id\", func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) {\n\t\treturn cmdutil.FilterValidConsumerGroupIDs(f, toComplete)\n\t})\n\n\treturn cmd\n}", "func WithGroupID(groupID uint32) DescriptorSelectorFunc {\n\treturn func(d Descriptor) (bool, error) {\n\t\tif groupID == 0 {\n\t\t\treturn false, ErrInvalidGroupID\n\t\t}\n\t\treturn d.GroupID() == groupID, nil\n\t}\n}", "func (o *GroupV2AddOptionalConversationParams) SetGroupID(groupID int64) {\n\to.GroupID = groupID\n}", "func (r *Replicator) UpdateConsumerGroup(ctx thrift.Context, updateRequest *shared.UpdateConsumerGroupRequest) (*shared.ConsumerGroupDescription, error) {\n\tr.m3Client.IncCounter(metrics.ReplicatorUpdateCgScope, metrics.ReplicatorRequests)\n\n\tcgDesc, err := r.metaClient.UpdateConsumerGroup(ctx, updateRequest)\n\tif err != nil {\n\t\tr.logger.WithFields(bark.Fields{\n\t\t\tcommon.TagCnsPth: common.FmtCnsPth(updateRequest.GetConsumerGroupName()),\n\t\t\tcommon.TagDstPth: common.FmtDstPth(updateRequest.GetDestinationPath()),\n\t\t\tcommon.TagDst: common.FmtDst(cgDesc.GetDestinationUUID()),\n\t\t\tcommon.TagErr: err,\n\t\t}).Error(`Error updating cg`)\n\t\tr.m3Client.IncCounter(metrics.ReplicatorUpdateCgScope, metrics.ReplicatorFailures)\n\t\treturn nil, err\n\t}\n\n\tr.logger.WithFields(bark.Fields{\n\t\tcommon.TagCnsPth: common.FmtCnsPth(updateRequest.GetConsumerGroupName()),\n\t\tcommon.TagCnsm: common.FmtCnsm(cgDesc.GetConsumerGroupUUID()),\n\t\tcommon.TagDstPth: common.FmtDstPth(updateRequest.GetDestinationPath()),\n\t\tcommon.TagDst: common.FmtDst(cgDesc.GetDestinationUUID()),\n\t\tcommon.TagDLQID: common.FmtDLQID(cgDesc.GetDeadLetterQueueDestinationUUID()),\n\t\t`IsMultiZone`: cgDesc.GetIsMultiZone(),\n\t\t`ActiveZone`: cgDesc.GetActiveZone(),\n\t}).Info(`Updated cg`)\n\treturn cgDesc, nil\n}", "func withGroupID(id int) groupOption {\n\treturn func(m *GroupMutation) {\n\t\tvar (\n\t\t\terr error\n\t\t\tonce sync.Once\n\t\t\tvalue *Group\n\t\t)\n\t\tm.oldValue = func(ctx context.Context) (*Group, error) {\n\t\t\tonce.Do(func() {\n\t\t\t\tif m.done {\n\t\t\t\t\terr = fmt.Errorf(\"querying old values post mutation is not allowed\")\n\t\t\t\t} else {\n\t\t\t\t\tvalue, err = m.Client().Group.Get(ctx, id)\n\t\t\t\t}\n\t\t\t})\n\t\t\treturn value, err\n\t\t}\n\t\tm.id = &id\n\t}\n}", "func withGroupID(id int) groupOption {\n\treturn func(m *GroupMutation) {\n\t\tvar (\n\t\t\terr error\n\t\t\tonce sync.Once\n\t\t\tvalue *Group\n\t\t)\n\t\tm.oldValue = func(ctx context.Context) (*Group, error) {\n\t\t\tonce.Do(func() {\n\t\t\t\tif m.done {\n\t\t\t\t\terr = fmt.Errorf(\"querying old values post mutation is not allowed\")\n\t\t\t\t} else {\n\t\t\t\t\tvalue, err = m.Client().Group.Get(ctx, id)\n\t\t\t\t}\n\t\t\t})\n\t\t\treturn value, err\n\t\t}\n\t\tm.id = &id\n\t}\n}", "func (m *kafkaConsumerGroupManagerImpl) StartConsumerGroup(ctx context.Context, groupId string, topics []string, handler KafkaConsumerHandler, ref types.NamespacedName, options ...SaramaConsumerHandlerOption) error {\n\tlogger := logging.FromContext(ctx)\n\n\tgroupLogger := m.logger.With(zap.String(\"GroupId\", groupId))\n\tgroupLogger.Info(\"Creating New Managed ConsumerGroup\")\n\tgroup, err := m.factory.createConsumerGroup(groupId)\n\tif err != nil {\n\t\tgroupLogger.Error(\"Failed To Create New Managed ConsumerGroup\")\n\t\treturn err\n\t}\n\n\tctx, cancel := context.WithCancel(context.Background())\n\n\t// consume is passed in to the KafkaConsumerGroupFactory so that it will call the manager's\n\t// consume() function instead of the one on the internal sarama ConsumerGroup. This allows the\n\t// manager to continue to block in the Consume call while a group goes through a stop/start cycle.\n\tconsume := func(ctx context.Context, topics []string, handler sarama.ConsumerGroupHandler) error {\n\t\tlogger.Debug(\"Consuming Messages On managed Consumer Group\", zap.String(\"GroupId\", groupId))\n\t\treturn m.consume(ctx, groupId, topics, handler)\n\t}\n\n\t// The only thing we really want from the factory is the cancel function for the customConsumerGroup\n\tcustomGroup := m.factory.startExistingConsumerGroup(groupId, group, consume, topics, logger, handler, ref, options...)\n\tmanagedGrp := createManagedGroup(ctx, m.logger, group, cancel, customGroup.cancel)\n\n\t// Add the Sarama ConsumerGroup we obtained from the factory to the managed group map,\n\t// so that it can be stopped and started via control-protocol messages.\n\tm.setGroup(groupId, managedGrp)\n\tm.notify(ManagerEvent{Event: GroupCreated, GroupId: groupId})\n\treturn nil\n}", "func (rec *RawEventCreate) SetGroupID(s string) *RawEventCreate {\n\trec.mutation.SetGroupID(s)\n\treturn rec\n}", "func (m *PrivilegedAccessGroupEligibilitySchedule) SetGroupId(value *string)() {\n err := m.GetBackingStore().Set(\"groupId\", value)\n if err != nil {\n panic(err)\n }\n}", "func (tt *Tester) ConsumerGroupBuilder() goka.ConsumerGroupBuilder {\n\treturn func(brokers []string, group, clientID string) (sarama.ConsumerGroup, error) {\n\t\ttt.mClients.RLock()\n\t\tdefer tt.mClients.RUnlock()\n\t\tclient, exists := tt.clients[clientID]\n\t\tif !exists {\n\t\t\treturn nil, fmt.Errorf(\"cannot create consumergroup because no client registered with ID: %s\", clientID)\n\t\t}\n\n\t\tif client.consumerGroup == nil {\n\t\t\treturn nil, fmt.Errorf(\"Did not expect a group graph\")\n\t\t}\n\n\t\treturn client.consumerGroup, nil\n\t}\n}", "func (o *QtreeCollectionGetParams) SetGroupID(groupID *string) {\n\to.GroupID = groupID\n}", "func MountConsumerGroup(serverAddr string, numConsumers int) (cg ConsumerAPI, err error) {\n\truntime.GOMAXPROCS(numConsumers)\n\tserverConn, err := rpc.Dial(\"tcp\", serverAddr)\n\tcheckError(err)\n\n\tvar brokerAddr string\n\terr = serverConn.Call(\"Server.GetBrokerAddr\", true, &brokerAddr)\n\tcheckError(err)\n\n\tbrokerConn, err := rpc.Dial(\"tcp\", brokerAddr)\n\tcheckError(err)\n\n\tvar _ignored bool\n\tvar numOfPartitions uint8\n\terr = brokerConn.Call(\"Broker.GetNumOfPartitions\", &_ignored, &numOfPartitions)\n\n\tpartitions := make([]uint, numOfPartitions)\n\n\tcg = &ConsumerGroup{ServerAddr: serverAddr, ServerConn: serverConn, NumConsumers: numConsumers, PartitionNextOffset: partitions}\n\treturn cg, nil\n}", "func (lc *LessonplanCreate) SetGroupIDID(id int) *LessonplanCreate {\n\tlc.mutation.SetGroupIDID(id)\n\treturn lc\n}", "func (o *DeletePackageVersionParams) SetGroupID(groupID string) {\n\to.GroupID = groupID\n}", "func (kc *KClient) NewConsumerGroup(groupID string, debug bool, topics ...string) (*ConsumerGroup, error) {\n\tvar cg ConsumerGroup\n\tvar dChan chan *DEBUG\n\tkc.config.Producer.RequiredAcks = sarama.WaitForAll\n\tkc.config.Producer.Return.Successes = true\n\tconfig := cluster.NewConfig()\n\tconf := kc.config\n\tconfig.Config = *conf\n\tconfig.Group.Mode = cluster.ConsumerModePartitions\n\tif debug {\n\t\tconfig.Consumer.Return.Errors = true\n\t\tconfig.Group.Return.Notifications = true\n\t\tdChan = make(chan *DEBUG, 256)\n\t}\n\tconsumer, err := cluster.NewConsumer([]string{kc.bootStrap}, groupID, topics, config)\n\tif err != nil {\n\t\treturn &cg, err\n\t}\n\tcg.consumer = consumer\n\tif debug {\n\t\tcg.debugChan = dChan\n\t\tcg.haveDebugChan = make(chan bool, 1)\n\t\tgo startDEBUG(&cg)\n\t}\n\n\treturn &cg, nil\n}", "func (o *RetrieveCustomerGroupParams) SetGroupID(groupID string) {\n\to.GroupID = groupID\n}", "func (m *BarRecordMutation) SetGroupID(id int) {\n\tm.group = &id\n}", "func (r *Replicator) DeleteConsumerGroup(ctx thrift.Context, deleteRequest *shared.DeleteConsumerGroupRequest) error {\n\tr.m3Client.IncCounter(metrics.ReplicatorDeleteCgScope, metrics.ReplicatorRequests)\n\n\terr := r.metaClient.DeleteConsumerGroup(ctx, deleteRequest)\n\tif err != nil {\n\t\tr.logger.WithFields(bark.Fields{\n\t\t\tcommon.TagCnsPth: common.FmtCnsPth(deleteRequest.GetConsumerGroupName()),\n\t\t\tcommon.TagDstPth: common.FmtDstPth(deleteRequest.GetDestinationPath()),\n\t\t\tcommon.TagErr: err,\n\t\t}).Error(`Error deleting cg`)\n\t\tr.m3Client.IncCounter(metrics.ReplicatorDeleteCgScope, metrics.ReplicatorFailures)\n\t\treturn err\n\t}\n\n\tr.logger.WithFields(bark.Fields{\n\t\tcommon.TagCnsPth: common.FmtCnsPth(deleteRequest.GetConsumerGroupName()),\n\t\tcommon.TagDstPth: common.FmtDstPth(deleteRequest.GetDestinationPath()),\n\t}).Info(`Deleted cg`)\n\n\treturn nil\n}", "func (m *GroupPolicyDefinition) SetGroupPolicyCategoryId(value *i561e97a8befe7661a44c8f54600992b4207a3a0cf6770e5559949bc276de2e22.UUID)() {\n err := m.GetBackingStore().Set(\"groupPolicyCategoryId\", value)\n if err != nil {\n panic(err)\n }\n}", "func WithLinkedGroupID(groupID uint32) DescriptorSelectorFunc {\n\treturn func(d Descriptor) (bool, error) {\n\t\tif groupID == 0 {\n\t\t\treturn false, ErrInvalidGroupID\n\t\t}\n\t\tlinkedID, isGroup := d.LinkedID()\n\t\treturn isGroup && linkedID == groupID, nil\n\t}\n}", "func (o *FileInfoCollectionGetParams) SetGroupID(groupID *int64) {\n\to.GroupID = groupID\n}", "func (m *kafkaConsumerGroupManagerImpl) startConsumerGroup(lock *commands.CommandLock, groupId string) error {\n\tgroupLogger := m.logger.With(zap.String(\"GroupId\", groupId))\n\n\t// Lock the managedGroup before starting it, if lock.LockBefore is true\n\tif err := m.lockBefore(lock, groupId); err != nil {\n\t\tgroupLogger.Error(\"Failed to lock consumer group prior to starting\", zap.Error(err))\n\t\treturn err\n\t}\n\n\tgroupLogger.Info(\"Starting Managed ConsumerGroup\")\n\tmanagedGrp := m.getGroup(groupId)\n\tif managedGrp == nil {\n\t\tgroupLogger.Info(\"ConsumerGroup Not Managed - Ignoring Start Request\")\n\t\treturn fmt.Errorf(\"start requested for consumer group not in managed list: %s\", groupId)\n\t}\n\n\tcreateGroup := func() (sarama.ConsumerGroup, error) {\n\t\treturn m.factory.createConsumerGroup(groupId)\n\t}\n\n\t// Instruct the managed group to use this new ConsumerGroup\n\terr := managedGrp.start(createGroup)\n\tif err != nil {\n\t\tgroupLogger.Error(\"Failed To Restart Managed ConsumerGroup\", zap.Error(err))\n\t\treturn err\n\t}\n\n\t// Unlock the managedGroup after starting it, if lock.UnlockAfter is true\n\tif err = m.unlockAfter(lock, groupId); err != nil {\n\t\tgroupLogger.Error(\"Failed to unlock consumer group after starting\", zap.Error(err))\n\t\treturn err\n\t}\n\tm.notify(ManagerEvent{Event: GroupStarted, GroupId: groupId})\n\treturn nil\n}", "func (lc *LessonplanCreate) SetGroupID(s *Section) *LessonplanCreate {\n\treturn lc.SetGroupIDID(s.ID)\n}", "func (o *DeleteSystemObjectDefinitionsByIDAttributeGroupsByIDAttributeDefinitionsByIDParams) SetGroupID(groupID string) {\n\to.GroupID = groupID\n}", "func withGroup(node *Group) groupOption {\n\treturn func(m *GroupMutation) {\n\t\tm.oldValue = func(context.Context) (*Group, error) {\n\t\t\treturn node, nil\n\t\t}\n\t\tm.id = &node.ID\n\t}\n}", "func withGroup(node *Group) groupOption {\n\treturn func(m *GroupMutation) {\n\t\tm.oldValue = func(context.Context) (*Group, error) {\n\t\t\treturn node, nil\n\t\t}\n\t\tm.id = &node.ID\n\t}\n}", "func (b *MessagesSearchConversationsBuilder) GroupID(v int) *MessagesSearchConversationsBuilder {\n\tb.Params[\"group_id\"] = v\n\treturn b\n}", "func WithReaderConsumerOption(reader *kafgo.Reader) ConsumerOption {\n\treturn func(c *Consumer) { c.reader = reader }\n}", "func (o *AssignUserToCustomerGroupUsingPATCH1Params) SetGroupID(groupID string) {\n\to.GroupID = groupID\n}", "func (b *MessagesMarkAsImportantConversationBuilder) GroupID(v int) *MessagesMarkAsImportantConversationBuilder {\n\tb.Params[\"group_id\"] = v\n\treturn b\n}", "func (m *kafkaConsumerGroupManagerImpl) stopConsumerGroup(lock *commands.CommandLock, groupId string) error {\n\tgroupLogger := m.logger.With(zap.String(\"GroupId\", groupId))\n\n\t// Lock the managedGroup before stopping it, if lock.LockBefore is true\n\tif err := m.lockBefore(lock, groupId); err != nil {\n\t\tgroupLogger.Error(\"Failed to lock consumer group prior to stopping\", zap.Error(err))\n\t\treturn err\n\t}\n\n\tgroupLogger.Info(\"Stopping Managed ConsumerGroup\")\n\n\tmanagedGrp := m.getGroup(groupId)\n\tif managedGrp == nil {\n\t\tgroupLogger.Info(\"ConsumerGroup Not Managed - Ignoring Stop Request\")\n\t\treturn fmt.Errorf(\"stop requested for consumer group not in managed list: %s\", groupId)\n\t}\n\n\tif err := managedGrp.stop(); err != nil {\n\t\tgroupLogger.Error(\"Failed to stop managed consumer group\", zap.Error(err))\n\t\treturn err\n\t}\n\n\t// Unlock the managedGroup after stopping it, if lock.UnlockAfter is true\n\tif err := m.unlockAfter(lock, groupId); err != nil {\n\t\tgroupLogger.Error(\"Failed to unlock consumer group after stopping\", zap.Error(err))\n\t\treturn err\n\t}\n\tm.notify(ManagerEvent{Event: GroupStopped, GroupId: groupId})\n\treturn nil\n}", "func (b *MessagesEditBuilder) GroupID(v int) *MessagesEditBuilder {\n\tb.Params[\"group_id\"] = v\n\treturn b\n}", "func (c *KafkaCluster) pauseConsumerGroup(groupID string, adminID string, expiry time.Time) {\n\tc.lock.Lock()\n\tdefer c.lock.Unlock()\n\n\tlog.Warningf(\"Cluster marking group %s paused with expiry: %s\", groupID, expiry.Format(time.UnixDate))\n\tc.pausedGroups[groupID] = expiry\n}", "func (luo *LessonplanUpdateOne) SetGroupIDID(id int) *LessonplanUpdateOne {\n\tluo.mutation.SetGroupIDID(id)\n\treturn luo\n}", "func (m *kafkaConsumerGroupManagerImpl) setGroup(groupId string, group managedGroup) {\n\tm.groupLock.Lock()\n\tdefer m.groupLock.Unlock()\n\tm.groups[groupId] = group\n}", "func (b *MessagesGetConversationsByIDBuilder) GroupID(v int) *MessagesGetConversationsByIDBuilder {\n\tb.Params[\"group_id\"] = v\n\treturn b\n}", "func NewConsumerGroup(addrs []string, groupID string, config *sarama.Config, sensor instana.TracerLogger) (sarama.ConsumerGroup, error) {\n\tc, err := sarama.NewConsumerGroup(addrs, groupID, config)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn consumerGroup{c, sensor}, nil\n}", "func (ref *Config) SetGroup(id string) {\n\tref.GroupID = id\n}", "func (b *MessagesGetConversationsBuilder) GroupID(v int) *MessagesGetConversationsBuilder {\n\tb.Params[\"group_id\"] = v\n\treturn b\n}", "func (c *Consumer) Group() string { return c.group }", "func (o *GroupV2AddOptionalConversationParams) WithGroupID(groupID int64) *GroupV2AddOptionalConversationParams {\n\to.SetGroupID(groupID)\n\treturn o\n}", "func (lu *LessonplanUpdate) SetGroupIDID(id int) *LessonplanUpdate {\n\tlu.mutation.SetGroupIDID(id)\n\treturn lu\n}", "func (op *ListOp) GroupID(val string) *ListOp {\n\tif op != nil {\n\t\top.QueryOpts.Set(\"group_id\", val)\n\t}\n\treturn op\n}", "func (b *GroupsLeaveBuilder) GroupID(v int) *GroupsLeaveBuilder {\n\tb.Params[\"group_id\"] = v\n\treturn b\n}", "func (b *MessagesSendBuilder) GroupID(v int) *MessagesSendBuilder {\n\tb.Params[\"group_id\"] = v\n\treturn b\n}", "func (b *MessagesDeleteConversationBuilder) GroupID(v int) *MessagesDeleteConversationBuilder {\n\tb.Params[\"group_id\"] = v\n\treturn b\n}", "func (b *MessagesDeleteBuilder) GroupID(v int) *MessagesDeleteBuilder {\n\tb.Params[\"group_id\"] = v\n\treturn b\n}", "func (b *PhotosGetOwnerCoverPhotoUploadServerBuilder) GroupID(v int) *PhotosGetOwnerCoverPhotoUploadServerBuilder {\n\tb.Params[\"group_id\"] = v\n\treturn b\n}", "func (b *GroupsEditBuilder) EventGroupID(v int) *GroupsEditBuilder {\n\tb.Params[\"event_group_id\"] = v\n\treturn b\n}", "func (b *MessagesSetActivityBuilder) GroupID(v int) *MessagesSetActivityBuilder {\n\tb.Params[\"group_id\"] = v\n\treturn b\n}", "func (o *RemoveControlFromGroupParams) SetGroupID(groupID string) {\n\to.GroupID = groupID\n}", "func (b *MessagesGetLongPollServerBuilder) GroupID(v int) *MessagesGetLongPollServerBuilder {\n\tb.Params[\"group_id\"] = v\n\treturn b\n}", "func WithProviderID(id *uint32) OptionFunc {\n\treturn func(opts *option) error {\n\t\topts.providerID = id\n\t\treturn nil\n\t}\n}", "func (m *kafkaConsumerGroupManagerImpl) CloseConsumerGroup(groupId string) error {\n\tgroupLogger := m.logger.With(zap.String(\"GroupId\", groupId))\n\tgroupLogger.Info(\"Closing ConsumerGroup and removing from management\")\n\tmanagedGrp := m.getGroup(groupId)\n\tif managedGrp == nil {\n\t\tgroupLogger.Warn(\"CloseConsumerGroup called on unmanaged group\")\n\t\treturn fmt.Errorf(\"could not close consumer group with id '%s' - group is not present in the managed map\", groupId)\n\t}\n\tif err := managedGrp.close(); err != nil {\n\t\tgroupLogger.Error(\"Failed To Close Managed ConsumerGroup\", zap.Error(err))\n\t\treturn err\n\t}\n\n\t// Remove this groupId from the map so that manager functions may not be called on it\n\tm.removeGroup(groupId)\n\tm.notify(ManagerEvent{Event: GroupClosed, GroupId: groupId})\n\n\treturn nil\n}", "func (o *GetBackupLocationsParams) SetGroupID(groupID int64) {\n\to.GroupID = groupID\n}", "func (b *MessagesMarkAsAnsweredConversationBuilder) GroupID(v int) *MessagesMarkAsAnsweredConversationBuilder {\n\tb.Params[\"group_id\"] = v\n\treturn b\n}", "func (b *MessagesRestoreBuilder) GroupID(v int) *MessagesRestoreBuilder {\n\tb.Params[\"group_id\"] = v\n\treturn b\n}", "func (b *MessagesSearchBuilder) GroupID(v int) *MessagesSearchBuilder {\n\tb.Params[\"group_id\"] = v\n\treturn b\n}", "func (_options *ListEnterprisesOptions) SetAccountGroupID(accountGroupID string) *ListEnterprisesOptions {\n\t_options.AccountGroupID = core.StringPtr(accountGroupID)\n\treturn _options\n}", "func (b *MessagesMarkAsReadBuilder) GroupID(v int) *MessagesMarkAsReadBuilder {\n\tb.Params[\"group_id\"] = v\n\treturn b\n}", "func (b *PagesSaveAccessBuilder) GroupID(v int) *PagesSaveAccessBuilder {\n\tb.Params[\"group_id\"] = v\n\treturn b\n}", "func (b *PhotosGetWallUploadServerBuilder) GroupID(v int) *PhotosGetWallUploadServerBuilder {\n\tb.Params[\"group_id\"] = v\n\treturn b\n}", "func ConsumerSetAsyncNum(num int) model.Option {\n\treturn model.FuncOption(func(d *model.Dispatcher) { d.ConsumerAsyncNum = num })\n}", "func (luo *LessonplanUpdateOne) SetGroupID(s *Section) *LessonplanUpdateOne {\n\treturn luo.SetGroupIDID(s.ID)\n}", "func WithOffsetConsumerOption(offset int64) ConsumerOption {\n\treturn func(c *Consumer) {\n\t\tswitch offset {\n\t\tcase LastOffset:\n\t\t\tc.config.StartOffset = LastOffset\n\t\tcase FirstOffset:\n\t\t\tc.config.StartOffset = FirstOffset\n\t\tdefault:\n\t\t\tc.config.StartOffset = FirstOffset\n\t\t}\n\t}\n}", "func WithAutoCommitConsumerOption(flag bool) ConsumerOption {\n\treturn func(c *Consumer) { c.autocommit = flag }\n}", "func (lu *LessonplanUpdate) SetGroupID(s *Section) *LessonplanUpdate {\n\treturn lu.SetGroupIDID(s.ID)\n}", "func (b *PhotosGetUploadServerBuilder) GroupID(v int) *PhotosGetUploadServerBuilder {\n\tb.Params[\"group_id\"] = v\n\treturn b\n}", "func (b *MessagesGetByIDBuilder) GroupID(v int) *MessagesGetByIDBuilder {\n\tb.Params[\"group_id\"] = v\n\treturn b\n}", "func (b *PhotosSaveBuilder) GroupID(v int) *PhotosSaveBuilder {\n\tb.Params[\"group_id\"] = v\n\treturn b\n}", "func WithTopicConsumerOption(topic string) ConsumerOption {\n\treturn func(c *Consumer) {\n\t\tc.config.Topic = topic\n\t}\n}", "func (b *PagesParseWikiBuilder) GroupID(v int) *PagesParseWikiBuilder {\n\tb.Params[\"group_id\"] = v\n\treturn b\n}", "func (b *GroupsGetMembersBuilder) GroupID(v string) *GroupsGetMembersBuilder {\n\tb.Params[\"group_id\"] = v\n\treturn b\n}", "func WithCGroup(useCGroup bool) Option {\n\treturn optionFunc(func(opts *options) (err error) {\n\t\topts.UseCGroup = useCGroup\n\t\treturn\n\t})\n}", "func (b *GroupsIsMemberBuilder) GroupID(v string) *GroupsIsMemberBuilder {\n\tb.Params[\"group_id\"] = v\n\treturn b\n}", "func (b *PhotosSaveWallPhotoBuilder) GroupID(v int) *PhotosSaveWallPhotoBuilder {\n\tb.Params[\"group_id\"] = v\n\treturn b\n}", "func (b *MessagesGetHistoryBuilder) GroupID(v int) *MessagesGetHistoryBuilder {\n\tb.Params[\"group_id\"] = v\n\treturn b\n}", "func (b *PagesSaveBuilder) GroupID(v int) *PagesSaveBuilder {\n\tb.Params[\"group_id\"] = v\n\treturn b\n}", "func (_options *ListAccountsOptions) SetAccountGroupID(accountGroupID string) *ListAccountsOptions {\n\t_options.AccountGroupID = core.StringPtr(accountGroupID)\n\treturn _options\n}", "func (b *MessagesGetLongPollHistoryBuilder) GroupID(v int) *MessagesGetLongPollHistoryBuilder {\n\tb.Params[\"group_id\"] = v\n\treturn b\n}", "func (b *PagesGetVersionBuilder) GroupID(v int) *PagesGetVersionBuilder {\n\tb.Params[\"group_id\"] = v\n\treturn b\n}", "func (o *UpdateSingleGroupPublicV1Params) SetGroupID(groupID string) {\n\to.GroupID = groupID\n}", "func (b *GroupsJoinBuilder) GroupID(v int) *GroupsJoinBuilder {\n\tb.Params[\"group_id\"] = v\n\treturn b\n}", "func (_options *GetAccountGroupOptions) SetAccountGroupID(accountGroupID string) *GetAccountGroupOptions {\n\t_options.AccountGroupID = core.StringPtr(accountGroupID)\n\treturn _options\n}", "func (o *QueryRolesParams) SetCidGroupID(cidGroupID *string) {\n\to.CidGroupID = cidGroupID\n}", "func (b *MessagesGetConversationMembersBuilder) GroupID(v int) *MessagesGetConversationMembersBuilder {\n\tb.Params[\"group_id\"] = v\n\treturn b\n}", "func NewConsumerGroupListResultIterator(page ConsumerGroupListResultPage) ConsumerGroupListResultIterator {\n\treturn ConsumerGroupListResultIterator{page: page}\n}", "func WithEndpointConsumerOption(end endpoint.Endpoint) ConsumerOption {\n\treturn func(c *Consumer) { c.end = end }\n}", "func NewConsumerGroupHandler(handlers map[string]ConsumerFn) *handlerMapConsumerGroupHandler {\n\treturn &handlerMapConsumerGroupHandler{\n\t\thandlers: handlers,\n\t}\n}", "func (option *SetAttribute) SetGroup(value string) {\n\toption.Group = &value\n}", "func (o ClusterManagedPrivateEndpointOutput) GroupId() pulumi.StringOutput {\n\treturn o.ApplyT(func(v *ClusterManagedPrivateEndpoint) pulumi.StringOutput { return v.GroupId }).(pulumi.StringOutput)\n}", "func (b *MessagesGetByConversationMessageIDBuilder) GroupID(v int) *MessagesGetByConversationMessageIDBuilder {\n\tb.Params[\"group_id\"] = v\n\treturn b\n}", "func (_options *UpdateAccountGroupOptions) SetAccountGroupID(accountGroupID string) *UpdateAccountGroupOptions {\n\t_options.AccountGroupID = core.StringPtr(accountGroupID)\n\treturn _options\n}" ]
[ "0.802058", "0.7819796", "0.61697775", "0.60025406", "0.5875389", "0.58104455", "0.57588154", "0.5694752", "0.5690244", "0.56404895", "0.5616615", "0.55349314", "0.55349314", "0.5481481", "0.5397696", "0.53932905", "0.53766704", "0.53226244", "0.5288298", "0.5288043", "0.5282635", "0.5280216", "0.5277006", "0.52691597", "0.5254388", "0.5244045", "0.52434003", "0.5231315", "0.52084374", "0.51839674", "0.51788485", "0.51630926", "0.51630926", "0.5149375", "0.51461434", "0.51427174", "0.5141998", "0.513566", "0.51277727", "0.51202667", "0.5113179", "0.5110803", "0.5106898", "0.5105498", "0.5105058", "0.5103622", "0.5101087", "0.50666124", "0.503494", "0.501976", "0.50173205", "0.5013731", "0.500682", "0.5003969", "0.49831212", "0.49705786", "0.49701142", "0.496767", "0.49670875", "0.4961443", "0.49594074", "0.49588653", "0.4953466", "0.49513343", "0.4951118", "0.49496633", "0.4923968", "0.49204585", "0.49130845", "0.4909241", "0.4907951", "0.4903613", "0.4901234", "0.4890626", "0.4886018", "0.48826885", "0.48643583", "0.48643386", "0.48613778", "0.48495296", "0.48466027", "0.4841965", "0.48352984", "0.4833425", "0.4819828", "0.48117834", "0.48068973", "0.47995898", "0.47905034", "0.47875485", "0.4781976", "0.47695422", "0.47568542", "0.4729489", "0.4723342", "0.47138214", "0.4708175", "0.47037748", "0.4698234", "0.4696479" ]
0.82104796
0
WithTopicConsumerOption provides an option to modify the topic on which the Consumer will listen to
func WithTopicConsumerOption(topic string) ConsumerOption { return func(c *Consumer) { c.config.Topic = topic } }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (c *Consumer) SetTopic(topic string) *Consumer {\n\tif topic != \"\" {\n\t\tc.mutex.Lock()\n\t\tc.bind.SetKey(topic)\n\t\tc.mutex.Unlock()\n\t\tc.SetQueueName(true, \"\")\n\t\tc.SetChannelKey(true, \"\")\n\t}\n\treturn c\n}", "func WithTopic(ctx context.Context, topic string) context.Context {\n\treturn context.WithValue(ctx, topicKey{}, topic)\n}", "func WithOffsetConsumerOption(offset int64) ConsumerOption {\n\treturn func(c *Consumer) {\n\t\tswitch offset {\n\t\tcase LastOffset:\n\t\t\tc.config.StartOffset = LastOffset\n\t\tcase FirstOffset:\n\t\t\tc.config.StartOffset = FirstOffset\n\t\tdefault:\n\t\t\tc.config.StartOffset = FirstOffset\n\t\t}\n\t}\n}", "func WithReaderConsumerOption(reader *kafgo.Reader) ConsumerOption {\n\treturn func(c *Consumer) { c.reader = reader }\n}", "func (conn *ProtoConnection) ConsumeTopic(msgClb func(messaging.ProtoMessage), topics ...string) error {\n\tconn.multiplexer.rwlock.Lock()\n\tdefer conn.multiplexer.rwlock.Unlock()\n\n\tif conn.multiplexer.started {\n\t\treturn fmt.Errorf(\"ConsumeTopic can be called only if the multiplexer has not been started yet\")\n\t}\n\n\tbyteClb := func(bm *client.ConsumerMessage) {\n\t\tpm := client.NewProtoConsumerMessage(bm, conn.serializer)\n\t\tmsgClb(pm)\n\t}\n\n\tfor _, topic := range topics {\n\t\t// check if we have already consumed the topic\n\t\tvar found bool\n\t\tvar subs *consumerSubscription\n\tLoopSubs:\n\t\tfor _, subscription := range conn.multiplexer.mapping {\n\t\t\tif subscription.manual == true {\n\t\t\t\t// do not mix dynamic and manual mode\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif subscription.topic == topic {\n\t\t\t\tfound = true\n\t\t\t\tsubs = subscription\n\t\t\t\tbreak LoopSubs\n\t\t\t}\n\t\t}\n\n\t\tif !found {\n\t\t\tsubs = &consumerSubscription{\n\t\t\t\tmanual: false, // non-manual example\n\t\t\t\ttopic: topic,\n\t\t\t\tconnectionName: conn.name,\n\t\t\t\tbyteConsMsg: byteClb,\n\t\t\t}\n\t\t\t// subscribe new topic\n\t\t\tconn.multiplexer.mapping = append(conn.multiplexer.mapping, subs)\n\t\t}\n\n\t\t// add subscription to consumerList\n\t\tsubs.byteConsMsg = byteClb\n\t}\n\n\treturn nil\n}", "func WithEndpointConsumerOption(end endpoint.Endpoint) ConsumerOption {\n\treturn func(c *Consumer) { c.end = end }\n}", "func WithUpdateTopicConfig(cfg pubsub.TopicConfigToUpdate) Option {\n\treturn func(c *Config) {\n\t\tc.TopicConfig = &cfg\n\t}\n}", "func WithTopicOptions(topicConfig *pubsub.TopicConfig) Option {\n\treturn func(b *EventBus) error {\n\t\tb.topicConfig = topicConfig\n\n\t\treturn nil\n\t}\n}", "func (c *Consumer) Topic() string { return c.topic }", "func WithTopicPrefix(topicPrefix string) Option {\n\treturn func(c *queue) {\n\t\tc.topicPrefix = topicPrefix\n\t}\n}", "func WithAutoCommitConsumerOption(flag bool) ConsumerOption {\n\treturn func(c *Consumer) { c.autocommit = flag }\n}", "func NewConsumer(topics []string, valueFactory ValueFactory, opts ...ConsumerOption) (*Consumer, error) {\n\tc := &Consumer{\n\t\tvalueFactory: valueFactory,\n\t\tavroAPI: avro.DefaultConfig,\n\t\tensureTopics: true,\n\t}\n\t// Loop through each option\n\tfor _, opt := range opts {\n\t\t// apply option\n\t\topt.applyC(c)\n\t}\n\n\tvar err error\n\n\t// if consumer not provided - make one\n\tif c.KafkaConsumer == nil {\n\t\t// if kafka config not provided - build default one\n\t\tif c.kafkaCfg == nil {\n\t\t\tvar envCfg struct {\n\t\t\t\tBroker string `env:\"KAFKA_BROKER\" envDefault:\"localhost:9092\"`\n\t\t\t\tCAFile string `env:\"KAFKA_CA_FILE\"`\n\t\t\t\tKeyFile string `env:\"KAFKA_KEY_FILE\"`\n\t\t\t\tCertificateFile string `env:\"KAFKA_CERTIFICATE_FILE\"`\n\t\t\t\tGroupID string `env:\"KAFKA_GROUP_ID\"`\n\t\t\t}\n\t\t\tif err := env.Parse(&envCfg); err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\n\t\t\t// default configuration\n\t\t\tc.kafkaCfg = &kafka.ConfigMap{\n\t\t\t\t\"bootstrap.servers\": envCfg.Broker,\n\t\t\t\t\"socket.keepalive.enable\": true,\n\t\t\t\t\"enable.auto.commit\": false,\n\t\t\t\t\"enable.partition.eof\": true,\n\t\t\t\t\"session.timeout.ms\": 6000,\n\t\t\t\t\"auto.offset.reset\": \"earliest\",\n\t\t\t\t\"group.id\": envCfg.GroupID,\n\t\t\t}\n\n\t\t\tif envCfg.CAFile != \"\" {\n\t\t\t\t// configure SSL\n\t\t\t\tc.kafkaCfg.SetKey(\"security.protocol\", \"ssl\")\n\t\t\t\tc.kafkaCfg.SetKey(\"ssl.ca.location\", envCfg.CAFile)\n\t\t\t\tc.kafkaCfg.SetKey(\"ssl.key.location\", envCfg.KeyFile)\n\t\t\t\tc.kafkaCfg.SetKey(\"ssl.certificate.location\", envCfg.CertificateFile)\n\t\t\t}\n\t\t}\n\n\t\tif c.KafkaConsumer, err = kafka.NewConsumer(c.kafkaCfg); err != nil {\n\t\t\treturn nil, errors.WithMessage(err, \"cannot initialize kafka consumer\")\n\t\t}\n\t}\n\n\tif c.srClient == nil {\n\t\tif c.srURL == nil {\n\t\t\tvar envCfg struct {\n\t\t\t\tSchemaRegistry *url.URL `env:\"KAFKA_SCHEMA_REGISTRY\" envDefault:\"http://localhost:8081\"`\n\t\t\t}\n\t\t\tif err := env.Parse(&envCfg); err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\tc.srURL = envCfg.SchemaRegistry\n\t\t}\n\n\t\tif c.srClient, err = NewCachedSchemaRegistryClient(c.srURL.String()); err != nil {\n\t\t\treturn nil, errors.WithMessage(err, \"cannot initialize schema registry client\")\n\t\t}\n\t}\n\n\tif c.eventHandler == nil {\n\t\tc.eventHandler = func(event kafka.Event) {\n\t\t\tlog.Println(event)\n\t\t}\n\t}\n\n\tif topics != nil {\n\t\tif err := c.KafkaConsumer.SubscribeTopics(topics, nil); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tif c.ensureTopics {\n\t\t\tif err = c.EnsureTopics(topics); err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t}\n\t}\n\n\treturn c, nil\n}", "func WithCheckpoint(checkpoint Checkpoint) Option {\n\treturn func(c *Consumer) error {\n\t\tc.checkpoint = checkpoint\n\t\treturn nil\n\t}\n}", "func (conn *ProtoConnection) ConsumeTopicOnPartition(msgClb func(messaging.ProtoMessage), topic string, partition int32, offset int64) error {\n\tconn.multiplexer.rwlock.Lock()\n\tdefer conn.multiplexer.rwlock.Unlock()\n\n\tif conn.multiplexer.started {\n\t\treturn fmt.Errorf(\"ConsumeTopicOnPartition can be called only if the multiplexer has not been started yet\")\n\t}\n\n\tbyteClb := func(bm *client.ConsumerMessage) {\n\t\tpm := client.NewProtoConsumerMessage(bm, conn.serializer)\n\t\tmsgClb(pm)\n\t}\n\n\t// check if we have already consumed the topic on partition and offset\n\tvar found bool\n\tvar subs *consumerSubscription\n\n\tfor _, subscription := range conn.multiplexer.mapping {\n\t\tif subscription.manual == false {\n\t\t\t// do not mix dynamic and manual mode\n\t\t\tcontinue\n\t\t}\n\t\tif subscription.topic == topic && subscription.partition == partition && subscription.offset == offset {\n\t\t\tfound = true\n\t\t\tsubs = subscription\n\t\t\tbreak\n\t\t}\n\t}\n\n\tif !found {\n\t\tsubs = &consumerSubscription{\n\t\t\tmanual: true, // manual example\n\t\t\ttopic: topic,\n\t\t\tpartition: partition,\n\t\t\toffset: offset,\n\t\t\tconnectionName: conn.name,\n\t\t\tbyteConsMsg: byteClb,\n\t\t}\n\t\t// subscribe new topic on partition\n\t\tconn.multiplexer.mapping = append(conn.multiplexer.mapping, subs)\n\t}\n\n\t// add subscription to consumerList\n\tsubs.byteConsMsg = byteClb\n\n\treturn nil\n}", "func (b *addPushNotificationsOnChannelsBuilder) Topic(topic string) *addPushNotificationsOnChannelsBuilder {\n\tb.opts.Topic = topic\n\treturn b\n}", "func WithKafkaMode(broker []string, topic string) *kafkaW {\n\treturn &kafkaW{\n\t\tbrokers: broker,\n\t\ttopic: topic,\n\t}\n}", "func (c *ConsumerManager) AddConsumer(topic, channel string, client ConsumerClient) error {\n\n}", "func WithQueueSubscriber(queue string) ConsumerOption {\n\treturn func(c *Consumer) error {\n\t\tif queue == \"\" {\n\t\t\treturn ErrInvalidQueueName\n\t\t}\n\t\tc.Subscriber = &QueueSubscriber{Queue: queue}\n\t\treturn nil\n\t}\n}", "func (tt *Tester) Consume(topic string, key string, msg interface{}, options ...EmitOption) {\n\ttt.waitStartup()\n\n\topts := new(emitOption)\n\topts.applyOptions(options...)\n\tvalue := reflect.ValueOf(msg)\n\tif msg == nil || (value.Kind() == reflect.Ptr && value.IsNil()) {\n\t\ttt.pushMessage(topic, key, nil, opts.headers)\n\t} else {\n\t\tdata, err := tt.codecForTopic(topic).Encode(msg)\n\t\tif err != nil {\n\t\t\tpanic(fmt.Errorf(\"Error encoding value %v: %v\", msg, err))\n\t\t}\n\t\ttt.pushMessage(topic, key, data, opts.headers)\n\t}\n\n\ttt.waitForClients()\n}", "func (conn *Conn) Topic(channel string, topic ...string) {\n\tt := strings.Join(topic, \" \")\n\tif t != \"\" {\n\t\tt = \" :\" + t\n\t}\n\tconn.Raw(TOPIC + \" \" + channel + t)\n}", "func (c *Connection) ConsumerWithConfig(done chan bool, config *Config, callback func(msgs <-chan amqp.Delivery)) error {\n\tmsgs, err := c.Channel.Consume(\n\t\tconfig.Queue,\n\t\tconfig.ConsumerTag,\n\t\tconfig.Options.Consume.AutoAck,\n\t\tconfig.Options.Consume.Exclusive,\n\t\tconfig.Options.Consume.NoLocal,\n\t\tconfig.Options.Consume.NoWait,\n\t\tconfig.Options.Consume.Args,\n\t)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tgo callback(msgs)\n\n\tlog.Println(\"Waiting for messages...\")\n\n\tfor {\n\t\tselect {\n\t\tcase <-done:\n\t\t\tc.Channel.Close()\n\t\t\tc.Conn.Close()\n\n\t\t\treturn nil\n\t\t}\n\t}\n}", "func (c *Client) Topic(target, topic string) error {\n\treturn c.Raw(\"TOPIC %s :%s\", target, topic)\n}", "func WithDecoderConsumerOption(fn Decoder) ConsumerOption {\n\treturn func(c *Consumer) { c.dec = fn }\n}", "func WithConsumerGroupID(groupID string) ConfigOpt {\n\treturn func(c *kafkalib.ConfigMap) {\n\t\t_ = c.SetKey(\"group.id\", groupID)\n\t}\n}", "func NewConsumer(log logrus.FieldLogger, conf Config, opts ...ConfigOpt) (Consumer, error) {\n\t// See Reference at https://github.com/edenhill/librdkafka/blob/master/CONFIGURATION.md\n\tkafkaConf := conf.baseKafkaConfig()\n\t_ = kafkaConf.SetKey(\"enable.auto.offset.store\", false) // manually StoreOffset after processing a message. Otherwise races may happen.)\n\n\t// In case we try to assign an offset out of range (greater than log-end-offset), consumer will use start consuming from offset zero.\n\t_ = kafkaConf.SetKey(\"auto.offset.reset\", \"earliest\")\n\n\tconf.Consumer.Apply(kafkaConf)\n\tfor _, opt := range opts {\n\t\topt(kafkaConf)\n\t}\n\n\tif err := conf.configureAuth(kafkaConf); err != nil {\n\t\treturn nil, errors.Wrap(err, \"error configuring auth for the Kafka consumer\")\n\t}\n\n\tconsumer, err := kafkalib.NewConsumer(kafkaConf)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif conf.RequestTimeout == 0 {\n\t\tconf.RequestTimeout = DefaultTimeout\n\t}\n\n\tcc := &ConfluentConsumer{\n\t\tc: consumer,\n\t\tconf: conf,\n\t\tlog: log,\n\t}\n\n\tlogFields := logrus.Fields{\"kafka_topic\": cc.conf.Topic}\n\n\tif cc.conf.Consumer.Partition != nil || cc.conf.Consumer.PartitionKey != \"\" {\n\t\t// set the default partitioner algorithm\n\t\tif cc.conf.Consumer.PartitionerAlgorithm == \"\" {\n\t\t\tcc.conf.Consumer.PartitionerAlgorithm = PartitionerMurMur2\n\t\t}\n\t\t// Set the partition if a key is set to determine the partition\n\t\tif cc.conf.Consumer.PartitionKey != \"\" && cc.conf.Consumer.PartitionerAlgorithm != \"\" {\n\t\t\tcc.AssignPartitionByKey(cc.conf.Consumer.PartitionKey, cc.conf.Consumer.PartitionerAlgorithm)\n\t\t}\n\n\t\tlogFields[\"kafka_partition_key\"] = cc.conf.Consumer.PartitionKey\n\t\tlogFields[\"kafka_partition\"] = *cc.conf.Consumer.Partition\n\t}\n\n\tcc.setupRebalanceHandler()\n\tcc.log.WithFields(logFields).Debug(\"Subscribing to Kafka topic\")\n\tif serr := cc.c.Subscribe(cc.conf.Topic, cc.rebalanceHandler); serr != nil {\n\t\terr = errors.Wrap(serr, \"error subscribing to topic\")\n\t}\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn cc, nil\n}", "func (c *Client) Topic(text string) error {\n\tvar args []byte\n\n\targs = combine(args, \"TOPIC \")\n\targs = combine(args, text)\n\n\treturn c.Send(MESSAGE_CHAT_MESSAGE, args)\n}", "func (o *TopicsToResetOffset) SetTopic(v string) {\n\to.Topic = v\n}", "func (m *Server) SetTopic(topic string) {\n\tm.tree.Set(topic, packet.Subscription{Topic: topic, QOS: packet.QOSAtMostOnce})\n}", "func WithSomeName(handler ConsumerHandler) ConsumerOption {\n\treturn func(consumer Consumer) error {\n\t\treturn consumer.Consume(\"some-name\", handler)\n\t}\n}", "func Consume(ctx context.Context, topicName string) {\n\tconfig.TLSConfMutex.RLock()\n\tMessageBusConfigFilePath := config.Data.MessageBusConf.MessageBusConfigFilePath\n\tmessageBusType := config.Data.MessageBusConf.MessageBusType\n\tconfig.TLSConfMutex.RUnlock()\n\t// connecting to kafka\n\tk, err := dc.Communicator(messageBusType, MessageBusConfigFilePath, topicName)\n\tif err != nil {\n\t\tl.LogWithFields(ctx).Error(\"unable to connect to kafka\" + err.Error())\n\t\treturn\n\t}\n\t// subscribe from message bus\n\tif err := k.Accept(EventSubscriber); err != nil {\n\t\tl.Log.Error(err.Error())\n\t\treturn\n\t}\n}", "func WithMessageProvider(s store.MessageProvider) WakuNodeOption {\n\treturn func(params *WakuNodeParameters) error {\n\t\tif params.store != nil {\n\t\t\tparams.store.SetMsgProvider(s)\n\t\t} else {\n\t\t\tparams.store = store.NewWakuStore(true, s)\n\t\t}\n\t\treturn nil\n\t}\n}", "func (k *Kafka) Subscribe(topic string, h broker.Handler, opts ...broker.SubscribeOption) (broker.Subscriber, error) {\n\n\top := &broker.SubscribeOptions{\n\t\tAutoAck: true,\n\t}\n\top.Apply(opts...)\n\n\tvar err error\n\t//handler function\n\thandler := func(msg *sarama.ConsumerMessage) {\n\t\tm := broker.Message{}\n\t\tif err := k.encoder.Decode(msg.Value, &m); err != nil {\n\t\t\tlog.Errorf(\"kafka: subscribe: decode failed, err: %v\", err)\n\t\t\treturn\n\t\t}\n\t\th(&event{\n\t\t\top: op,\n\t\t\tt: topic,\n\t\t\tm: &m,\n\t\t})\n\t}\n\t//Consumer with no groupID\n\tif op.Queue == \"\" {\n\t\tlog.Info(\"consumer with no groupID\")\n\t\t// Create new consumer\n\t\tk.consumer, err = sarama.NewConsumer(strings.Split(k.addrs, \",\"), k.config)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tpartitionList, err := k.consumer.Partitions(topic)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tfor partition := range partitionList {\n\t\t\tconsumer, err := k.consumer.ConsumePartition(topic, int32(partition), sarama.OffsetNewest)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\tgo func() {\n\t\t\t\tfor msg := range consumer.Messages() {\n\t\t\t\t\thandler(msg)\n\t\t\t\t}\n\t\t\t}()\n\t\t}\n\t\treturn &subscriber{\n\t\t\tqueue: op.Queue,\n\t\t\tt: topic,\n\t\t\ts: k.consumer,\n\t\t}, nil\n\n\t} //end no group\n\t{\n\t\t// Create new consumer group\n\t\tif k.consumerGroup, err = sarama.NewConsumerGroup([]string{k.addrs}, op.Queue, k.config); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tctx := context.Background()\n\t\tconsumer := Consumer{\n\t\t\tencoder: k.encoder,\n\t\t\ttopic: topic,\n\t\t\th: h,\n\t\t\tready: make(chan bool),\n\t\t}\n\t\tgo func() {\n\t\t\tfor {\n\n\t\t\t\tif err := k.consumerGroup.Consume(ctx, []string{topic}, &consumer); err != nil {\n\t\t\t\t\tlog.Panicf(\"Error from consumer: %v\", err)\n\t\t\t\t}\n\t\t\t\t<-ctx.Done()\n\n\t\t\t\tif ctx.Err() != nil {\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t}\n\t\t}()\n\t\t<-consumer.ready // Await till the consumer has been set up\n\t\tlog.Info(\"Sarama consumer up and running!...\")\n\n\t\treturn &subscriberGroup{\n\t\t\tqueue: op.Queue,\n\t\t\tt: topic,\n\t\t\tg: k.consumerGroup,\n\t\t}, nil\n\t}\n\n}", "func WithConsumeTimeout(s string) (OptionFunc, error) {\n\ttimeout, err := time.ParseDuration(s)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn func(c *Config) error {\n\t\tc.consumeTimeout = timeout\n\t\treturn nil\n\t}, nil\n}", "func TestConsumerOptions(tb testing.TB, options ...Option) []Option {\n\ttb.Helper()\n\n\tvar defaults []Option\n\n\tconfig := ConsumerOptions(func(c *Consumer) {\n\t\tc.Kafka = kafkaconfig.TestConsumer(tb)\n\t})\n\n\tdefaults = append(defaults, config)\n\n\treturn append(defaults, options...)\n}", "func setTopicInfo(t *Topic, client connection.Client) error {\n\tconfigRequest := &sarama.DescribeConfigsRequest{\n\t\tVersion: 0,\n\t\tIncludeSynonyms: true,\n\t\tResources: []*sarama.ConfigResource{\n\t\t\t{\n\t\t\t\tType: sarama.TopicResource,\n\t\t\t\tName: string(t.Name),\n\t\t\t\tConfigNames: nil,\n\t\t\t},\n\t\t},\n\t}\n\n\tcontroller, err := client.Controller()\n\tif err != nil {\n\t\treturn fmt.Errorf(\"failed to get controller from client: %s\", err)\n\t}\n\n\tconfigResponse, err := controller.DescribeConfigs(configRequest)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"failed to describe configs for topic %s: %s\", t.Name, err)\n\t}\n\n\tif len(configResponse.Resources) != 1 {\n\t\treturn fmt.Errorf(\"received an unexpected number of config resources (%d)\", len(configResponse.Resources))\n\t}\n\n\t// Collect partition information asynchronously\n\tvar wg sync.WaitGroup\n\tpartitionInChan, partitionOutChans := startPartitionPool(50, &wg, client)\n\tgo feedPartitionPool(partitionInChan, t.Name, client)\n\tpartitions := collectPartitions(partitionOutChans)\n\n\t// Populate topic struct fields\n\tt.Partitions = partitions\n\tt.Configs = configResponse.Resources[0].Configs\n\tt.PartitionCount = len(partitions)\n\tif len(partitions) > 0 {\n\t\tt.ReplicationFactor = len(partitions[0].Replicas)\n\t}\n\n\treturn nil\n}", "func withKubeClientProvider(kcp kube.ClientProvider) option {\n\treturn func(p *kubernetesprocessor) error {\n\t\treturn p.initKubeClient(p.logger, kcp)\n\t}\n}", "func (a *HipchatAdapter) Topic(res *Response, strings ...string) error {\n\tfor _, str := range strings {\n\t\t_ = str\n\t}\n\treturn nil\n}", "func WithGroupIDConsumerOption(groupID string) ConsumerOption {\n\treturn func(c *Consumer) {\n\t\tc.config.GroupID = groupID\n\t}\n}", "func ConsumerOmitOldMsg() model.Option {\n\treturn model.FuncOption(func(d *model.Dispatcher) { d.ConsumerOmitOldMsg = true })\n}", "func (b *Broker) AddTopic(subscriber *Subscriber, keyTopic string, priority uint, action func(msg interface{}) (err error)) {\n\n\tt := &topicSub{priority: priority, action: action, channel: make(chan interface{}, 1)}\n\n\tsubscriber.topic = t\n\n\tsubscriber.debug = keyTopic\n\t//subscriber.byPriority = append(subscriber.byPriority, t)\n\n\t// Sort topics by priority, will be useful to manage priorities after\n\n\tb.addSubscriber(keyTopic, subscriber)\n\n\t// Sort byPriority array\n\t//\tsort.SliceStable(subscriber.byPriority, func(i, j int) bool { return subscriber.byPriority[i].priority < subscriber.byPriority[j].priority })\n}", "func InitConsumer(broker, group string) {\n\tvar err error\n\tutils.ConsumerObject, err = kafka.NewConsumer(&kafka.ConfigMap{\n\t\t\"bootstrap.servers\": broker,\n\t\t\"group.id\": group,\n\t\t\"session.timeout.ms\": 6000,\n\t\t\"go.events.channel.enable\": true,\n\t\t\"go.application.rebalance.enable\": true,\n\t\t// Enable generation of PartitionEOF when the\n\t\t// end of a partition is reached.\n\t\t\"enable.partition.eof\": true,\n\t\t\"auto.offset.reset\": \"earliest\"})\n\n\tif err != nil {\n\t\tfmt.Fprintf(os.Stderr, \"Failed to create consumer: %s\\n\", err)\n\t\tos.Exit(1)\n\t}\n}", "func consumer(ctx context.Context, t *pubsub.Topic) fx.Hook {\n\tvar sub *pubsub.Subscription\n\treturn fx.Hook{\n\t\tOnStart: func(ctx context.Context) (err error) {\n\t\t\tif sub, err = t.Subscribe(); err == nil {\n\t\t\t\tgo func() {\n\t\t\t\t\tfor {\n\t\t\t\t\t\t_, err := sub.Next(context.Background())\n\t\t\t\t\t\tif err != nil {\n\t\t\t\t\t\t\tbreak\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}()\n\t\t\t}\n\n\t\t\treturn\n\t\t},\n\t\tOnStop: func(context.Context) error {\n\t\t\tsub.Cancel()\n\t\t\treturn t.Close()\n\t\t},\n\t}\n}", "func (o RepositoryPubsubConfigOutput) Topic() pulumi.StringOutput {\n\treturn o.ApplyT(func(v RepositoryPubsubConfig) string { return v.Topic }).(pulumi.StringOutput)\n}", "func Configure(kafkaBrokerUrls []string, clientID string, topic string) (w *kafka.Writer, err error) {\n\n\tdialer := &kafka.Dialer{\n\t\tTimeout: 10 * time.Second,\n\t\tClientID: clientID,\n\t}\n\n\tconfig := kafka.WriterConfig{\n\t\tBrokers: kafkaBrokerUrls,\n\t\tTopic: topic,\n\t\tBalancer: &kafka.LeastBytes{},\n\t\tDialer: dialer,\n\t\tWriteTimeout: 10 * time.Second,\n\t\tReadTimeout: 10 * time.Second,\n\t\t//CompressionCodec: snappy.NewCompressionCodec(),\n\t}\n\n\tw = kafka.NewWriter(config)\n\t//writer = w\n\n\treturn w, nil\n}", "func WithCounter(counter Counter) Option {\n\treturn func(c *Consumer) error {\n\t\tc.counter = counter\n\t\treturn nil\n\t}\n}", "func (client *Client) CreateTopicWithCallback(request *CreateTopicRequest, callback func(response *CreateTopicResponse, err error)) <-chan int {\n\tresult := make(chan int, 1)\n\terr := client.AddAsyncTask(func() {\n\t\tvar response *CreateTopicResponse\n\t\tvar err error\n\t\tdefer close(result)\n\t\tresponse, err = client.CreateTopic(request)\n\t\tcallback(response, err)\n\t\tresult <- 1\n\t})\n\tif err != nil {\n\t\tdefer close(result)\n\t\tcallback(nil, err)\n\t\tresult <- 0\n\t}\n\treturn result\n}", "func WithAutoCommitTimeConsumerOption(dur time.Duration) ConsumerOption {\n\treturn func(c *Consumer) { c.config.CommitInterval = dur }\n}", "func InitSetKafkaConfig(config KafkaConfig) model.Option {\n\treturn model.FuncOption(func(d *model.Dispatcher) { d.KafkaConfig = core.KafkaConfig(config) })\n}", "func TestNewConsumer(tb testing.TB, defaults bool, options ...Option) Consumer {\n\tc, err := NewConsumer()\n\trequire.NoError(tb, err)\n\n\tif !defaults {\n\t\tc.Inmem = inmemconfig.Consumer{Store: nil}\n\t\tc.Kafka = kafkaconfig.Consumer{}\n\t\tc.Pubsub = pubsubconfig.Consumer{}\n\t\tc.Standardstream = standardstreamconfig.Consumer{}\n\t\tc.Logger = nil\n\t\tc.HandleInterrupt = false\n\t\tc.HandleErrors = false\n\t\tc.Name = \"\"\n\t\tc.AllowEnvironmentBasedConfiguration = false\n\t}\n\n\tfor _, option := range options {\n\t\toption.apply(&c, nil)\n\t}\n\n\terr = envconfig.Process(c.Name, &c)\n\trequire.NoError(tb, err)\n\n\treturn c\n}", "func SetKafka(bk *KafkaBroker) OptionFunc {\n\treturn func(bi *brokerInstance) {\n\t\tbi.kafka = bk\n\t}\n}", "func (zj *ZapJournal) Topic(topic string) Writer {\n\treturn &ZapWriter{\n\t\tlogger: zj.logger.Sugar().Named(topic),\n\t\ttopic: topic,\n\t}\n}", "func WithEventSettings(cfg ConsumerConfig) OptionEvent {\n\treturn func(c *Consumer) {\n\t\tif cfg.MaxInFlight > 0 {\n\t\t\tc.cfg.MaxInFlight = cfg.MaxInFlight\n\t\t}\n\t\tif cfg.MaxAttempts > 0 {\n\t\t\tc.cfg.MaxAttempts = cfg.MaxAttempts\n\t\t}\n\t\tif cfg.Timeout > 0 {\n\t\t\tc.cfg.Timeout = cfg.Timeout\n\t\t}\n\t\tif cfg.RequeueInterval > 0 {\n\t\t\tc.cfg.RequeueInterval = cfg.RequeueInterval\n\t\t}\n\t\tif cfg.NumOfConsumers > 0 {\n\t\t\tc.cfg.NumOfConsumers = cfg.NumOfConsumers\n\t\t}\n\t}\n}", "func (a *DefaultApiService) UpdateTopic(ctx _context.Context, topicName string) ApiUpdateTopicRequest {\n\treturn ApiUpdateTopicRequest{\n\t\tApiService: a,\n\t\tctx: ctx,\n\t\ttopicName: topicName,\n\t}\n}", "func (o TopicRuleKafkaOutput) Topic() pulumi.StringOutput {\n\treturn o.ApplyT(func(v TopicRuleKafka) string { return v.Topic }).(pulumi.StringOutput)\n}", "func startConsumer(topic, channel, logPrefix string, config *nsq.Config, f nsq.HandlerFunc, nsqLookupds []string) *nsq.Consumer {\n\n\t// Create new consumer\n\tconsumer, _ := nsq.NewConsumer(topic, channel, config)\n\tconsumer.SetLogger(log.New(os.Stderr, logPrefix, log.Ltime), nsq.LogLevelError)\n\tconsumer.AddHandler(f)\n\n\t// Open connection to NSQ\n\tif err := consumer.ConnectToNSQLookupds(nsqLookupds); err != nil {\n\t\tlog.Println(\"[WARN] Can't connect to NSQ\", err)\n\t}\n\n\treturn consumer\n}", "func ConsumerID(id string) ConsumerOption {\n\treturn func(o *ConsumerOptions) error {\n\t\tif id == \"\" {\n\t\t\treturn errors.New(\"invalid consumer id\")\n\t\t}\n\t\to.ConsumerID = id\n\t\treturn nil\n\t}\n}", "func (c *Collection) Topic(p types.Path) types.Path {\n\treturn types.PathFromString(c.Spec.Metadata.Name).Join(p)\n}", "func ConsumerSetAsyncNum(num int) model.Option {\n\treturn model.FuncOption(func(d *model.Dispatcher) { d.ConsumerAsyncNum = num })\n}", "func TestConsumerAPIs(t *testing.T) {\n\n\tc, err := NewConsumer(&ConfigMap{})\n\tif err == nil {\n\t\tt.Fatalf(\"Expected NewConsumer() to fail without group.id\")\n\t}\n\n\tc, err = NewConsumer(&ConfigMap{\n\t\t\"group.id\": \"gotest\",\n\t\t\"socket.timeout.ms\": 10,\n\t\t\"session.timeout.ms\": 10})\n\tif err != nil {\n\t\tt.Fatalf(\"%s\", err)\n\t}\n\n\tt.Logf(\"Consumer %s\", c)\n\n\terr = c.Subscribe(\"gotest\", nil)\n\tif err != nil {\n\t\tt.Errorf(\"Subscribe failed: %s\", err)\n\t}\n\n\terr = c.SubscribeTopics([]string{\"gotest1\", \"gotest2\"},\n\t\tfunc(my_c *Consumer, ev Event) error {\n\t\t\tt.Logf(\"%s\", ev)\n\t\t\treturn nil\n\t\t})\n\tif err != nil {\n\t\tt.Errorf(\"SubscribeTopics failed: %s\", err)\n\t}\n\n\t_, err = c.Commit()\n\tif err != nil && err.(Error).Code() != ErrNoOffset {\n\t\tt.Errorf(\"Commit() failed: %s\", err)\n\t}\n\n\terr = c.Unsubscribe()\n\tif err != nil {\n\t\tt.Errorf(\"Unsubscribe failed: %s\", err)\n\t}\n\n\ttopic1 := \"gotest1\"\n\ttopic2 := \"gotest2\"\n\terr = c.Assign([]TopicPartition{{Topic: &topic1, Partition: 2},\n\t\t{Topic: &topic2, Partition: 1}})\n\tif err != nil {\n\t\tt.Errorf(\"Assign failed: %s\", err)\n\t}\n\n\terr = c.Unassign()\n\tif err != nil {\n\t\tt.Errorf(\"Unassign failed: %s\", err)\n\t}\n\n\terr = c.Close()\n\tif err != nil {\n\t\tt.Errorf(\"Close failed: %s\", err)\n\t}\n}", "func WithMetricsProvider(provider metrics.MetricsProvider) Option {\n\treturn func(config *queueInformerConfig) {\n\t\tconfig.provider = provider\n\t}\n}", "func WithAfterFuncsConsumerOption(fns ...AfterFunc) ConsumerOption {\n\treturn func(c *Consumer) { c.afters = append(c.afters, fns...) }\n}", "func newKafkaConsumer() sarama.Consumer {\n\n\tkafkaBroker := os.Getenv(\"KAFKA_BROKER\")\n\n\tif runtime.GOOS == \"darwin\" {\n\t\tbrokers = []string{\"localhost:9092\"}\n\t} else {\n\t\tif kafkaBroker == \"\" {\n\t\t\tfmt.Printf(\"$KAFKA_BROKER must be set\")\n\t\t\tos.Exit(-1)\n\t\t}\n\t\tbrokers = []string{kafkaBroker}\n\t}\n\n\tconsumer, err := sarama.NewConsumer(brokers, newKafkaConfiguration())\n\n\tfmt.Print(\"Creating new Kafka Consumer \\n\")\n\n\tif err != nil {\n\t\tfmt.Printf(\"Kafka error: %s\\n\", err)\n\t\tos.Exit(-1)\n\t}\n\n\treturn consumer\n}", "func (msg *Subscribe) AddTopic(topic *Topic) error {\n\tmsg.topics = append(msg.topics, topic)\n\treturn nil\n}", "func NewConsumer(ctx context.Context) (*Consumer, error) {\n\t// TODO support filter in downstream sink\n\ttz, err := util.GetTimezone(timezone)\n\tif err != nil {\n\t\treturn nil, errors.Annotate(err, \"can not load timezone\")\n\t}\n\tctx = util.PutTimezoneInCtx(ctx, tz)\n\tfilter, err := cdcfilter.NewFilter(config.GetDefaultReplicaConfig())\n\tif err != nil {\n\t\treturn nil, errors.Trace(err)\n\t}\n\tc := new(Consumer)\n\tc.fakeTableIDGenerator = &fakeTableIDGenerator{\n\t\ttableIDs: make(map[string]int64),\n\t}\n\tc.sinks = make([]*struct {\n\t\tsink.Sink\n\t\tresolvedTs uint64\n\t}, kafkaPartitionNum)\n\tctx, cancel := context.WithCancel(ctx)\n\terrCh := make(chan error, 1)\n\topts := map[string]string{}\n\tfor i := 0; i < int(kafkaPartitionNum); i++ {\n\t\ts, err := sink.NewSink(ctx, \"kafka-consumer\", downstreamURIStr, filter, config.GetDefaultReplicaConfig(), opts, errCh)\n\t\tif err != nil {\n\t\t\tcancel()\n\t\t\treturn nil, errors.Trace(err)\n\t\t}\n\t\tc.sinks[i] = &struct {\n\t\t\tsink.Sink\n\t\t\tresolvedTs uint64\n\t\t}{Sink: s}\n\t}\n\tsink, err := sink.NewSink(ctx, \"kafka-consumer\", downstreamURIStr, filter, config.GetDefaultReplicaConfig(), opts, errCh)\n\tif err != nil {\n\t\tcancel()\n\t\treturn nil, errors.Trace(err)\n\t}\n\tgo func() {\n\t\terr := <-errCh\n\t\tif errors.Cause(err) != context.Canceled {\n\t\t\tlog.Error(\"error on running consumer\", zap.Error(err))\n\t\t} else {\n\t\t\tlog.Info(\"consumer exited\")\n\t\t}\n\t\tcancel()\n\t}()\n\tc.ddlSink = sink\n\tc.ready = make(chan bool)\n\treturn c, nil\n}", "func NewConsumer(\n\tbrokers []string,\n\tlogger log.Logger,\n\toptions ...ConsumerOption,\n) (*Consumer, error) {\n\t// default values\n\tcfg := kafgo.ReaderConfig{\n\t\tBrokers: brokers,\n\t\tGroupID: defaultConsumerGroupID,\n\t\tTopic: defaultTopic,\n\t\tLogger: kafka.LoggerFunc(logger.Debugf),\n\t}\n\n\tcs := &Consumer{\n\t\treader: nil,\n\t\tconfig: &cfg,\n\t}\n\n\tfor _, o := range options {\n\t\to(cs)\n\t}\n\n\tif cs.end == nil {\n\t\treturn nil, errors.Wrap(\n\t\t\tErrCreatingConsumer, \"missing endpoint\",\n\t\t)\n\t}\n\n\tif cs.dec == nil {\n\t\treturn nil, errors.Wrap(\n\t\t\tErrCreatingConsumer, \"missing decoder\",\n\t\t)\n\t}\n\n\tif cs.errFn == nil {\n\t\tcs.errFn = defaultErrorFunc\n\t}\n\n\tif cs.errHandler == nil {\n\t\tcs.errHandler = transport.NewLogErrorHandler(logger)\n\t}\n\treturn cs, nil\n}", "func NewConsumer(cfg *ConsumerConfig, handler MessageHanlder) (*Consumer, error) {\n\tclusterConfig := cluster.NewConfig()\n\tclusterConfig.Metadata.RefreshFrequency = 1 * time.Minute\n\tclusterConfig.Group.Mode = cluster.ConsumerModePartitions\n\tclusterConfig.Group.Return.Notifications = true\n\tclusterConfig.Consumer.Offsets.Initial = sarama.OffsetNewest\n\tclusterConfig.Consumer.Return.Errors = true\n\tclientName := generateClientID(cfg.GroupID)\n\tclusterConfig.ClientID = clientName\n\n\tc, err := cluster.NewConsumer(cfg.Brokers, cfg.GroupID, cfg.Topic, clusterConfig)\n\tif err != nil {\n\t\tlog.Printf(\"Kafka Consumer: [%s] init fail, %v\", clientName, err)\n\t\treturn nil, err\n\t}\n\n\tvalidConfigValue(cfg)\n\tconsumer := &Consumer{\n\t\tclientName: clientName,\n\t\tcfg: cfg,\n\t\tconsumer: c,\n\t\tmsgHanlder: handler,\n\t\tpartitionWorkers: make([]*partitionConsumerWorker, 0),\n\t}\n\tlog.Printf(\"Kafka Consumer: [%s] init success\", clientName)\n\n\treturn consumer, nil\n}", "func (db *Database) SetChannelTopic(name string, topic string) error {\n\t_, err := db.db.Exec(`\n\t\tUPDATE melodious.channels SET topic=$2 WHERE name=$1;\n\t`, name, topic)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}", "func (kz *Kazoo) Topic(topic string) *Topic {\n\treturn &Topic{Name: topic, kz: kz}\n}", "func ConsumerSetGroupID(groupID string) model.Option {\n\treturn model.FuncOption(func(d *model.Dispatcher) { d.ConsumerGroupID = groupID })\n}", "func (eventNotifications *EventNotificationsV1) ReplaceTopicWithContext(ctx context.Context, replaceTopicOptions *ReplaceTopicOptions) (result *Topic, response *core.DetailedResponse, err error) {\n\terr = core.ValidateNotNil(replaceTopicOptions, \"replaceTopicOptions cannot be nil\")\n\tif err != nil {\n\t\treturn\n\t}\n\terr = core.ValidateStruct(replaceTopicOptions, \"replaceTopicOptions\")\n\tif err != nil {\n\t\treturn\n\t}\n\n\tpathParamsMap := map[string]string{\n\t\t\"instance_id\": *replaceTopicOptions.InstanceID,\n\t\t\"id\": *replaceTopicOptions.ID,\n\t}\n\n\tbuilder := core.NewRequestBuilder(core.PUT)\n\tbuilder = builder.WithContext(ctx)\n\tbuilder.EnableGzipCompression = eventNotifications.GetEnableGzipCompression()\n\t_, err = builder.ResolveRequestURL(eventNotifications.Service.Options.URL, `/v1/instances/{instance_id}/topics/{id}`, pathParamsMap)\n\tif err != nil {\n\t\treturn\n\t}\n\n\tfor headerName, headerValue := range replaceTopicOptions.Headers {\n\t\tbuilder.AddHeader(headerName, headerValue)\n\t}\n\n\tsdkHeaders := common.GetSdkHeaders(\"event_notifications\", \"V1\", \"ReplaceTopic\")\n\tfor headerName, headerValue := range sdkHeaders {\n\t\tbuilder.AddHeader(headerName, headerValue)\n\t}\n\tbuilder.AddHeader(\"Accept\", \"application/json\")\n\tbuilder.AddHeader(\"Content-Type\", \"application/json\")\n\n\tbody := make(map[string]interface{})\n\tif replaceTopicOptions.Name != nil {\n\t\tbody[\"name\"] = replaceTopicOptions.Name\n\t}\n\tif replaceTopicOptions.Description != nil {\n\t\tbody[\"description\"] = replaceTopicOptions.Description\n\t}\n\tif replaceTopicOptions.Sources != nil {\n\t\tbody[\"sources\"] = replaceTopicOptions.Sources\n\t}\n\t_, err = builder.SetBodyContentJSON(body)\n\tif err != nil {\n\t\treturn\n\t}\n\n\trequest, err := builder.Build()\n\tif err != nil {\n\t\treturn\n\t}\n\n\tvar rawResponse map[string]json.RawMessage\n\tresponse, err = eventNotifications.Service.Request(request, &rawResponse)\n\tif err != nil {\n\t\treturn\n\t}\n\tif rawResponse != nil {\n\t\terr = core.UnmarshalModel(rawResponse, \"\", &result, UnmarshalTopic)\n\t\tif err != nil {\n\t\t\treturn\n\t\t}\n\t\tresponse.Result = result\n\t}\n\n\treturn\n}", "func (o SubscriptionOutput) Topic() pulumi.StringOutput {\n\treturn o.ApplyT(func(v *Subscription) pulumi.StringOutput { return v.Topic }).(pulumi.StringOutput)\n}", "func MainConsumer(handler KafkaEventHandler, consumeTopic string, offset int64, updateOffset func(int64)) {\n\tfmt.Print(\"Initializing main Consumer \\n\")\n\tkafka := newKafkaConsumer()\n\n\tpartitionList, err := kafka.Partitions(consumeTopic)\n\tif err != nil {\n\t\tfmt.Printf(\"Error retrieving partitionList %s\\n\", err)\n\t}\n\n\tevents := make(chan *sarama.ConsumerMessage, simultaneousEvents)\n\n\tfor _, partition := range partitionList {\n\t\tconsumer, err := kafka.ConsumePartition(consumeTopic, partition, offset)\n\t\tif err != nil {\n\t\t\tfmt.Print(\"Unable to consume topic! \\n\")\n\t\t\tfmt.Printf(\"Kafka error: %s\\n\", err)\n\t\t\tos.Exit(-1)\n\t\t}\n\n\t\tgo func(consumer sarama.PartitionConsumer) {\n\t\t\tfor thisEvent := range consumer.Messages() {\n\t\t\t\tevents <- thisEvent\n\t\t\t}\n\t\t}(consumer)\n\t}\n\n\t// Eternal consuming loop\n\tconsumeEvents(events, handler, updateOffset)\n}", "func (q *Qlient) SubOn(topic string) (chan []byte, error) {\n\tsub, ok := q.subs[topic]\n\tif !ok {\n\t\tvar err error\n\t\tsub, err = q.newConsumer(topic)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\treturn sub, nil\n}", "func NewConsumer() (*cluster.Consumer, error) {\n\n\tconfig := cluster.NewConfig()\n\tconfig.Consumer.Return.Errors = true\n\tconfig.Group.Return.Notifications = true\n\tconfig.Config.Net.TLS.Enable = true\n\tconfig.Config.Net.SASL.Enable = true\n\tconfig.Config.Net.SASL.User = viper.GetString(\"kafka.user\")\n\tconfig.Config.Net.SASL.Password = viper.GetString(\"kafka.password\")\n\tconfig.ClientID = \"poke.ssl-checker\"\n\tconfig.Consumer.Offsets.Initial = sarama.OffsetOldest\n\tconfig.Consumer.Offsets.CommitInterval = 10 * time.Second\n\n\tconsumerGroup := config.Config.Net.SASL.User + \".\" + viper.GetString(\"host\")\n\tbrokers := viper.GetStringSlice(\"kafka.brokers\")\n\ttopics := viper.GetStringSlice(\"kafka.topics\")\n\n\tconsumer, err := cluster.NewConsumer(brokers, consumerGroup, topics, config)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn consumer, nil\n}", "func (c *MqClient) DeclareTopic(topic string, mask *int32) (*proto.TopicInfo, error) {\n\treq := NewMessage()\n\treq.SetCmd(proto.Declare)\n\treq.SetTopic(topic)\n\tif mask != nil {\n\t\treq.SetTopicMask(*mask)\n\t}\n\n\tinfo := &proto.TopicInfo{}\n\terr := c.invokeCmd(req, info)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn info, err\n}", "func WithLogger(logger *log.Logger) Option {\n\treturn func(c *Consumer) error {\n\t\tc.logger = logger\n\t\treturn nil\n\t}\n}", "func OverwriteConsumer(overwrite runtime.Consumer) Option {\n\treturn OverwriteConsumerForStatus(overwrite, ForAllStatusCodes)\n}", "func WithBeforeFuncsConsumerOption(fns ...BeforeFunc) ConsumerOption {\n\treturn func(c *Consumer) { c.befores = append(c.befores, fns...) }\n}", "func StartSimpleTopicConsumer(exchange, topic, queueName string, conn *amqp.Connection, work func([]byte)) (func(), error) {\n\tif conn == nil {\n\t\treturn nil, errors.New(\"No rabbit connection supplied\")\n\t}\n\n\tch, err := conn.Channel()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif err = ch.ExchangeDeclare(exchange, \"topic\", true, false, false, false, nil); err != nil {\n\t\treturn nil, err\n\t}\n\n\tq, err := ch.QueueDeclare(queueName, true, false, false, false, nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif err = ch.QueueBind(q.Name, topic, exchange, false, nil); err != nil {\n\t\treturn nil, err\n\t}\n\n\tmsgs, err := ch.Consume(q.Name, \"\", true, false, false, false, nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// Create the cancel context\n\tctx, cancel := context.WithCancel(context.Background())\n\n\tgo func(ctx context.Context) {\n\t\tfor d := range msgs {\n\t\t\tselect {\n\t\t\tcase <-ctx.Done():\n\t\t\t\tlog.Print(`event=\"Canceling consumer\"`)\n\t\t\t\tch.Close()\n\t\t\tdefault:\n\t\t\t\twork(d.Body)\n\t\t\t}\n\t\t}\n\t}(ctx)\n\tlog.Print(`event=\"Started consumer\"`)\n\n\treturn cancel, nil\n}", "func (o KafkaAclOutput) Topic() pulumi.StringOutput {\n\treturn o.ApplyT(func(v *KafkaAcl) pulumi.StringOutput { return v.Topic }).(pulumi.StringOutput)\n}", "func KafkaCommonCobraInit(cmd *cobra.Command, kconf *KafkaCommonConf) {\n\tdefBrokerList := strings.Split(os.Getenv(\"KAFKA_BROKERS\"), \",\")\n\tif len(defBrokerList) == 1 && defBrokerList[0] == \"\" {\n\t\tdefBrokerList = []string{}\n\t}\n\tdefTLSenabled, _ := strconv.ParseBool(os.Getenv(\"KAFKA_TLS_ENABLED\"))\n\tdefTLSinsecure, _ := strconv.ParseBool(os.Getenv(\"KAFKA_TLS_INSECURE\"))\n\tcmd.Flags().StringArrayVarP(&kconf.Brokers, \"brokers\", \"b\", defBrokerList, \"Comma-separated list of bootstrap brokers\")\n\tcmd.Flags().StringVarP(&kconf.ClientID, \"clientid\", \"i\", os.Getenv(\"KAFKA_CLIENT_ID\"), \"Client ID (or generated UUID)\")\n\tcmd.Flags().StringVarP(&kconf.ConsumerGroup, \"consumer-group\", \"g\", os.Getenv(\"KAFKA_CONSUMER_GROUP\"), \"Client ID (or generated UUID)\")\n\tcmd.Flags().StringVarP(&kconf.TopicIn, \"topic-in\", \"t\", os.Getenv(\"KAFKA_TOPIC_IN\"), \"Topic to listen to\")\n\tcmd.Flags().StringVarP(&kconf.TopicOut, \"topic-out\", \"T\", os.Getenv(\"KAFKA_TOPIC_OUT\"), \"Topic to send events to\")\n\tcmd.Flags().StringVarP(&kconf.TLS.ClientCertsFile, \"tls-clientcerts\", \"c\", os.Getenv(\"KAFKA_TLS_CLIENT_CERT\"), \"A client certificate file, for mutual TLS auth\")\n\tcmd.Flags().StringVarP(&kconf.TLS.ClientKeyFile, \"tls-clientkey\", \"k\", os.Getenv(\"KAFKA_TLS_CLIENT_KEY\"), \"A client private key file, for mutual TLS auth\")\n\tcmd.Flags().StringVarP(&kconf.TLS.CACertsFile, \"tls-cacerts\", \"C\", os.Getenv(\"KAFKA_TLS_CA_CERTS\"), \"CA certificates file (or host CAs will be used)\")\n\tcmd.Flags().BoolVarP(&kconf.TLS.Enabled, \"tls-enabled\", \"e\", defTLSenabled, \"Encrypt network connection with TLS (SSL)\")\n\tcmd.Flags().BoolVarP(&kconf.TLS.InsecureSkipVerify, \"tls-insecure\", \"z\", defTLSinsecure, \"Disable verification of TLS certificate chain\")\n\tcmd.Flags().StringVarP(&kconf.SASL.Username, \"sasl-username\", \"u\", os.Getenv(\"KAFKA_SASL_USERNAME\"), \"Username for SASL authentication\")\n\tcmd.Flags().StringVarP(&kconf.SASL.Password, \"sasl-password\", \"p\", os.Getenv(\"KAFKA_SASL_PASSWORD\"), \"Password for SASL authentication\")\n\treturn\n}", "func OverwriteConsumerForStatus(overwrite runtime.Consumer, forStatusCode int) Option {\n\treturn func(rt *runtime.ClientOperation) {\n\t\trt.Reader = &overwriteConsumerReader{\n\t\t\trequestReader: rt.Reader,\n\t\t\tconsumer: overwrite,\n\t\t\tforStatusCode: forStatusCode,\n\t\t}\n\t}\n}", "func (client *Client) MetastoreCreateKafkaTopicWithCallback(request *MetastoreCreateKafkaTopicRequest, callback func(response *MetastoreCreateKafkaTopicResponse, err error)) <-chan int {\n\tresult := make(chan int, 1)\n\terr := client.AddAsyncTask(func() {\n\t\tvar response *MetastoreCreateKafkaTopicResponse\n\t\tvar err error\n\t\tdefer close(result)\n\t\tresponse, err = client.MetastoreCreateKafkaTopic(request)\n\t\tcallback(response, err)\n\t\tresult <- 1\n\t})\n\tif err != nil {\n\t\tdefer close(result)\n\t\tcallback(nil, err)\n\t\tresult <- 0\n\t}\n\treturn result\n}", "func NewConsumer(addrs, zookeepers []string, group, topic string, config *Config) (*Consumer, error) {\n\tif config == nil {\n\t\tconfig = new(Config)\n\t}\n\n\tclient, err := sarama.NewClient(addrs, config.Config)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tc, err := NewConsumerFromClient(client, zookeepers, group, topic, config)\n\tif err != nil {\n\t\tclient.Close()\n\t\treturn nil, err\n\t}\n\tc.ownClient = true\n\treturn c, nil\n}", "func (_Energyconsumption *EnergyconsumptionTransactor) SetConsumer(opts *bind.TransactOpts, _owner string, _deviceType string, _peakPowerPos uint32, _peakPowerNeg uint32, _latitude uint32, _longitude uint32, _voltageLevel uint32, _location string, _installDate string) (*types.Transaction, error) {\n\treturn _Energyconsumption.contract.Transact(opts, \"setConsumer\", _owner, _deviceType, _peakPowerPos, _peakPowerNeg, _latitude, _longitude, _voltageLevel, _location, _installDate)\n}", "func (this *UnsubscribeMessage) AddTopic(topic []byte) {\n\tif this.TopicExists(topic) {\n\t\treturn\n\t}\n\n\tthis.topics = append(this.topics, topic)\n\tthis.dirty = true\n}", "func (o LiteSubscriptionOutput) Topic() pulumi.StringOutput {\n\treturn o.ApplyT(func(v *LiteSubscription) pulumi.StringOutput { return v.Topic }).(pulumi.StringOutput)\n}", "func New(c *kafka.ConfigMap, topic string, clientID string) (*Consumer, error) {\n\tif err := c.SetKey(\"group.id\", clientID); err != nil {\n\t\treturn nil, err\n\t}\n\tconsumer, err := kafka.NewConsumer(c)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn &Consumer{topic, consumer}, nil\n}", "func WithMaxMinByteConsumerOption(min, max int) ConsumerOption {\n\treturn func(c *Consumer) {\n\t\tc.config.MinBytes = min\n\t\tc.config.MaxBytes = max\n\t}\n}", "func NewConsumer(c *aws.Config, stream string, shard string, optionFns ...ConsumerOptionsFn) (*Consumer, error) {\n\tconsumer := &Consumer{consumerOptions: defaultConsumerOptions()}\n\tfor _, optionFn := range optionFns {\n\t\toptionFn(consumer)\n\t}\n\n\tif consumer.reader == nil {\n\t\tr, err := NewKinesisReader(c, stream, shard)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tconsumer.reader = r\n\t}\n\n\tconsumer.LogHelper = &LogHelper{\n\t\tLogLevel: consumer.logLevel,\n\t\tLogger: c.Logger,\n\t}\n\n\treturn consumer, nil\n}", "func New(opts ...Option) (*Kafka, error) {\n\tk := Kafka{\n\t\twriteSig: make(chan struct{}),\n\t\tstream: make(chan *sarama.ConsumerMessage),\n\t\tquitCh: make(chan struct{}),\n\t}\n\n\tfor _, opt := range opts {\n\t\tif err := opt.Apply(&k); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\tgo k.timer()\n\n\treturn &k, nil\n}", "func openTopic(brokers []string, config *sarama.Config, topicName string, opts *TopicOptions) (*topic, error) {\n\tif opts == nil {\n\t\topts = &TopicOptions{}\n\t}\n\tproducer, err := sarama.NewSyncProducer(brokers, config)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &topic{producer: producer, topicName: topicName, opts: *opts}, nil\n}", "func (ms *MqttSocket) Listen(topic string) error {\n\tif token := ms.client.Subscribe(topic, 2, nil); token.Wait() && token.Error() != nil {\n\t\treturn token.Error()\n\t}\n\treturn nil\n}", "func (b *Kafka) Consume(ctx context.Context, topic string, offset int64, imm bool, max int64) ([]string, error) {\n\n\tb.lockForTopic(topic)\n\n\tdefer b.unlockForTopic(topic)\n\t// Fetch offsets\n\tnewOff, err := b.Client.GetOffset(topic, 0, sarama.OffsetNewest)\n\n\tif err != nil {\n\t\tlog.Error(err.Error())\n\t}\n\n\toldOff, err := b.Client.GetOffset(topic, 0, sarama.OffsetOldest)\n\tif err != nil {\n\t\tlog.Error(err.Error())\n\t}\n\n\tlog.Infof(\"Consuming topic: %v, min_offset: %v, max_offset: %v, current_offset: %v\", topic, oldOff, newOff, offset)\n\n\t// If tracked offset is equal or bigger than topic offset means no new messages\n\tif offset >= newOff {\n\t\treturn []string{}, nil\n\t}\n\n\t// If tracked offset is left behind increment it to topic's min. offset\n\tif offset < oldOff {\n\t\tlog.Infof(\"Tracked offset is off for topic: %v, broker_offset %v, tracked_offset: %v\", topic, offset, oldOff)\n\t\treturn []string{}, errors.New(\"offset is off\")\n\t}\n\n\tpartitionConsumer, err := b.Consumer.ConsumePartition(topic, 0, offset)\n\n\tif err != nil {\n\t\tlog.Errorf(\"Unable to consume topic %v, %v, min_offset: %v, max_offset: %v, current_offset: %v\", topic, err.Error(), newOff, oldOff, offset)\n\t\treturn []string{}, err\n\n\t}\n\n\tdefer func() {\n\t\tif err := partitionConsumer.Close(); err != nil {\n\t\t\tlog.Error(err)\n\t\t}\n\t}()\n\n\tmessages := make([]string, 0)\n\tvar consumed int64\n\ttimeout := time.After(300 * time.Second)\n\n\tif imm {\n\t\ttimeout = time.After(100 * time.Millisecond)\n\t}\n\nConsumerLoop:\n\tfor {\n\t\tselect {\n\t\t// If the http client cancels the http request break consume loop\n\t\tcase <-ctx.Done():\n\t\t\t{\n\t\t\t\tbreak ConsumerLoop\n\t\t\t}\n\t\tcase <-timeout:\n\t\t\t{\n\t\t\t\tbreak ConsumerLoop\n\t\t\t}\n\t\tcase msg := <-partitionConsumer.Messages():\n\n\t\t\tmessages = append(messages, string(msg.Value[:]))\n\n\t\t\tconsumed++\n\n\t\t\tlog.Infof(\"Consumed: %v, Max: %v, Latest Message: %v\", consumed, max, string(msg.Value[:]))\n\n\t\t\t// if we pass over the available messages and still want more\n\t\t\tif consumed >= max {\n\t\t\t\tbreak ConsumerLoop\n\t\t\t}\n\n\t\t\tif offset+consumed > newOff-1 {\n\t\t\t\t// if returnImmediately is set don't wait for more\n\t\t\t\tif imm {\n\t\t\t\t\tbreak ConsumerLoop\n\t\t\t\t}\n\n\t\t\t}\n\n\t\t}\n\t}\n\n\treturn messages, nil\n}", "func (d *distEventBus) setupTopicSub(topicName, subscriberName string) error {\n\tctx := context.TODO()\n\n\t// Create the topic if it doesn't exist yet.\n\td.topic = d.client.Topic(topicName)\n\tif exists, err := d.topic.Exists(ctx); err != nil {\n\t\treturn skerr.Fmt(\"Error checking whether topic exits: %s\", err)\n\t} else if !exists {\n\t\tif d.topic, err = d.client.CreateTopic(ctx, topicName); err != nil {\n\t\t\treturn skerr.Fmt(\"Error creating pubsub topic '%s': %s\", topicName, err)\n\t\t}\n\t}\n\n\t// Create the subscription if it doesn't exist.\n\tsubName := fmt.Sprintf(\"%s+%s\", subscriberName, topicName)\n\td.sub = d.client.Subscription(subName)\n\tif exists, err := d.sub.Exists(ctx); err != nil {\n\t\treturn skerr.Fmt(\"Error checking existence of pubsub subscription '%s': %s\", subName, err)\n\t} else if !exists {\n\t\td.sub, err = d.client.CreateSubscription(ctx, subName, pubsub.SubscriptionConfig{\n\t\t\tTopic: d.topic,\n\t\t})\n\t\tif err != nil {\n\t\t\treturn skerr.Fmt(\"Error creating pubsub subscription '%s': %s\", subName, err)\n\t\t}\n\t}\n\td.sub.ReceiveSettings.MaxOutstandingMessages = MaximumConcurrentPublishesPerTopic\n\t// Make the subscription also the id of this client.\n\td.clientID = subName\n\treturn nil\n}", "func (k *KafkaBroker) Subscribe(topic string, output chan<- Message, offset int64) (err error) {\n\tconsumer, err := sarama.NewConsumerFromClient(k.Client)\n\n\tif err != nil {\n\t\tpanic(\"Got an error while trying to create a consumer: \" + err.Error())\n\t}\n\n\tconn, err := consumer.ConsumePartition(\n\t\ttopic,\n\t\t0,\n\t\toffset, // Start from the next unread message\n\t)\n\n\tif err != nil {\n\t\tpanic(\"Got an error while trying to consume a partition: \" + err.Error())\n\t}\n\n\tgo func() {\n\t\tfor msg := range conn.Messages() {\n\t\t\toutput <- Message{\n\t\t\t\tKey: msg.Key,\n\t\t\t\tValue: msg.Value,\n\t\t\t\tOffset: msg.Offset,\n\t\t\t}\n\t\t}\n\t}()\n\n\treturn err\n}", "func (e *Emitter) Off(topic string, ch <-chan interface{}) {\n\te.mu.Lock()\n\tdefer e.mu.Unlock()\n\n\tif e.topicListeners == nil {\n\t\treturn\n\t}\n\tlns, ok := e.topicListeners[topic]\n\tif !ok {\n\t\treturn\n\t}\n\tfor i, lnch := range lns {\n\t\tif lnch == ch {\n\t\t\te.topicListeners[topic] = append(lns[:i], lns[i+1:]...)\n\t\t\tclose(lnch)\n\t\t}\n\t}\n}", "func (_options *CreateSubscriptionOptions) SetTopicID(topicID string) *CreateSubscriptionOptions {\n\t_options.TopicID = core.StringPtr(topicID)\n\treturn _options\n}", "func (ctx *TemplateContext) Topic(index int) (string, error) {\n\tif index >= len(ctx.Parts) {\n\t\treturn \"\", errors.New(\"Topic index out of range\")\n\t}\n\n\treturn ctx.Parts[index], nil\n}", "func WithPublishSettings(f func(*pubsub.PublishSettings)) Option {\n\treturn func(c *Config) {\n\t\tc.PublishSettingsFunc = f\n\t}\n}" ]
[ "0.6210682", "0.6181558", "0.5921562", "0.57679456", "0.57265645", "0.5627791", "0.56116796", "0.56046695", "0.5534199", "0.5490605", "0.5454656", "0.54239225", "0.5391212", "0.5385388", "0.53655887", "0.5318569", "0.53060013", "0.52522916", "0.5226082", "0.5217438", "0.51836675", "0.5163402", "0.5160722", "0.51599324", "0.51169115", "0.5111125", "0.50800896", "0.5059972", "0.50502306", "0.5043582", "0.5036317", "0.4978769", "0.49768576", "0.4958254", "0.495567", "0.49436608", "0.49183583", "0.48883966", "0.48652238", "0.48567393", "0.48538595", "0.48445693", "0.48396534", "0.48205784", "0.4793719", "0.4784847", "0.47777796", "0.47677526", "0.47400305", "0.47363672", "0.46925855", "0.46880612", "0.46721664", "0.46599346", "0.46574917", "0.46484822", "0.46292463", "0.4622635", "0.4620243", "0.46171013", "0.45942447", "0.4583241", "0.4574267", "0.45725077", "0.45689616", "0.45590964", "0.45572937", "0.45557138", "0.45530745", "0.45524484", "0.45503196", "0.45440203", "0.4541039", "0.45333898", "0.45326564", "0.45309374", "0.45264181", "0.45248815", "0.45210445", "0.4511363", "0.44798508", "0.4472683", "0.44680575", "0.4453454", "0.4453429", "0.44480407", "0.44386274", "0.44373176", "0.443577", "0.44357035", "0.44327888", "0.44321492", "0.4431936", "0.44291544", "0.44290057", "0.44191667", "0.44181713", "0.4408332", "0.43931213", "0.43870872" ]
0.79187584
0
WithMaxMinByteConsumerOption provides an option to modify the min/max byte that can written to kafka
func WithMaxMinByteConsumerOption(min, max int) ConsumerOption { return func(c *Consumer) { c.config.MinBytes = min c.config.MaxBytes = max } }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func WithMaxBytes(maxBytes int) ProducerOption {\n\treturn func(p *ProducerConfiguration) {\n\t\t_ = p.KafkaConfig.SetKey(\"message.max.bytes\", maxBytes)\n\t}\n}", "func MaxRequestMaxBytes(max int) ConsumerOption {\n\treturn func(o *api.ConsumerConfig) error {\n\t\to.MaxRequestMaxBytes = max\n\t\treturn nil\n\t}\n}", "func WithMaxInflightBytes(n int) WriterOption {\n\treturn func(ms *ManagedStream) {\n\t\tms.streamSettings.MaxInflightBytes = n\n\t}\n}", "func MaxBytes(m int64) optionSetter {\n\treturn func(o *options) error {\n\t\to.maxBytes = m\n\t\treturn nil\n\t}\n}", "func WithMaxInFlight(maxInFlight int) ProducerOption {\n\treturn func(p *ProducerConfiguration) {\n\t\t_ = p.KafkaConfig.SetKey(\"max.in.flight\", maxInFlight)\n\t}\n}", "func MaxRequestBatch(max uint) ConsumerOption {\n\treturn func(o *api.ConsumerConfig) error {\n\t\to.MaxRequestBatch = int(max)\n\t\treturn nil\n\t}\n}", "func WithMaxBatchSizeBytes(n int64) WriteHandlerOption {\n\treturn func(w *WriteHandler) {\n\t\tw.maxBatchSizeBytes = n\n\t}\n}", "func WithMaxInboxSize(size int) ConvergerOpt {\n\treturn func(converger *converger) {\n\t\tconverger.inbox = make(chan convergeRequest, size)\n\t}\n}", "func MaxMessageSize(size int64) Option {\n\tif size < 0 {\n\t\tpanic(\"size must be non-negative\")\n\t}\n\treturn func(ws *websocket) {\n\t\tws.options.maxMessageSize = size\n\t}\n}", "func EntryByteLimit(n int) LoggerOption { return entryByteLimit(n) }", "func (m *MailTips) SetMaxMessageSize(value *int32)() {\n err := m.GetBackingStore().Set(\"maxMessageSize\", value)\n if err != nil {\n panic(err)\n }\n}", "func WithMaxBytes(maxBytes int) LimitedWriterOption {\n\tbytesWritten := 0\n\treturn func(w io.WriteCloser) io.WriteCloser {\n\t\tpreCheck := NewPreWriteCallbacks(w, func(p []byte) error {\n\t\t\tif bytesWritten+len(p) > maxBytes {\n\t\t\t\tif err := w.Close(); err != nil {\n\t\t\t\t\treturn fmt.Errorf(\"failed to close WriteCloser writing maxBytes; Close error was: %w\", err)\n\t\t\t\t}\n\t\t\t\treturn ErrTooLargeWrite\n\t\t\t}\n\t\t\treturn nil\n\t\t})\n\n\t\treturn NewPostWriteCallbacks(preCheck, func(p []byte, n int, err error) {\n\t\t\tbytesWritten += n\n\t\t})\n\t}\n}", "func MaxMsgSize(n int) Option {\n\treturn func(o *Options) {\n\t\to.MaxMsgSize = n\n\t}\n}", "func BufferedByteLimit(n int) LoggerOption { return bufferedByteLimit(n) }", "func StreamQueryFewerConsumersThan(c uint) StreamQueryOpt {\n\treturn func(q *streamQuery) error {\n\t\ti := int(c)\n\t\tq.consumersLimit = &i\n\t\treturn nil\n\t}\n}", "func MaxHeaderBytes(v int) Option {\n\treturn optionSetter(func(opt *Options) {\n\t\topt.MaxHeaderBytes = v\n\t})\n}", "func (builder *Builder) MaxSizeInKb(maxSizeInKb uint64) *Builder {\n\tbuilder.maxSizeInKb = maxSizeInKb\n\treturn builder\n}", "func MaxAckPending(pending uint) ConsumerOption {\n\treturn func(o *api.ConsumerConfig) error {\n\t\to.MaxAckPending = int(pending)\n\t\treturn nil\n\t}\n}", "func WithMaxInflightRequests(n int) WriterOption {\n\treturn func(ms *ManagedStream) {\n\t\tms.streamSettings.MaxInflightRequests = n\n\t}\n}", "func (cl *Client) maxRecordBatchBytesForTopic(topic string) int32 {\n\t// At a minimum, we will have a produce request containing this one\n\t// topic with one partition and its record batch.\n\t//\n\t// The maximum topic length is 249, which has a 2 byte prefix for\n\t// flexible or non-flexible.\n\t//\n\t// Non-flexible versions will have a 4 byte length topic array prefix\n\t// and a 4 byte length partition array prefix.\n\t//\n\t// Flexible versions would have a 1 byte length topic array prefix and\n\t// a 1 byte length partition array prefix, and would also have 3 empty\n\t// tag sections resulting in 3 extra bytes.\n\t//\n\t// Non-flexible versions would have a 4 byte length record bytes\n\t// prefix. Flexible versions could have up to 5.\n\t//\n\t// For the message header itself, with flexible versions, we have one\n\t// extra byte for an empty tag section.\n\t//\n\t// Thus in the worst case, the flexible encoding would still be two\n\t// bytes short of the non-flexible encoding. We will use the\n\t// non-flexible encoding for our max size calculations.\n\tminOnePartitionBatchLength := cl.baseProduceRequestLength() +\n\t\t2 + // topic string length prefix length\n\t\tint32(len(topic)) +\n\t\t4 + // partitions array length\n\t\t4 + // partition int32 encoding length\n\t\t4 // record bytes array length\n\n\twireLengthLimit := cl.cfg.maxBrokerWriteBytes\n\n\trecordBatchLimit := wireLengthLimit - minOnePartitionBatchLength\n\tif cfgLimit := cl.cfg.maxRecordBatchBytes; cfgLimit < recordBatchLimit {\n\t\trecordBatchLimit = cfgLimit\n\t}\n\treturn recordBatchLimit\n}", "func (conn *Conn) SetMaxTopics(max int) {\n\tif max < 1 {\n\t\tmax = 50\n\t}\n\tconn.length = max\n}", "func MaxKeys(value int) Option {\n\treturn addParam(\"maxkeys\", strconv.Itoa(value))\n}", "func MaxSendMsgSize(s int) client.Option {\n\treturn func(o *client.Options) {\n\t\tif o.Context == nil {\n\t\t\to.Context = context.Background()\n\t\t}\n\t\to.Context = context.WithValue(o.Context, maxSendMsgSizeKey{}, s)\n\t}\n}", "func BoostedTreesCreateQuantileStreamResourceMaxElements(value int64) BoostedTreesCreateQuantileStreamResourceAttr {\n\treturn func(m optionalAttr) {\n\t\tm[\"max_elements\"] = value\n\t}\n}", "func defaultMaxInflightBytes(n int) option.ClientOption {\n\treturn &defaultInflightBytesSetting{maxBytes: n}\n}", "func MaxWaiting(pulls uint) ConsumerOption {\n\treturn func(o *api.ConsumerConfig) error {\n\t\to.MaxWaiting = int(pulls)\n\t\treturn nil\n\t}\n}", "func WithMaxVersion(version Version) func(*segmentEncoder) {\n\treturn func(s *segmentEncoder) {\n\t\ts.minVersion = version\n\t}\n}", "func (p *Policy) setMaxBlockSize(ic *interop.Context, args []stackitem.Item) stackitem.Item {\n\tvalue := uint32(toBigInt(args[0]).Int64())\n\tif value > payload.MaxSize {\n\t\tpanic(fmt.Errorf(\"MaxBlockSize cannot be more than the maximum payload size = %d\", payload.MaxSize))\n\t}\n\tok, err := p.checkValidators(ic)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tif !ok {\n\t\treturn stackitem.NewBool(false)\n\t}\n\tp.lock.Lock()\n\tdefer p.lock.Unlock()\n\terr = p.setUint32WithKey(ic.DAO, maxBlockSizeKey, value)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tp.isValid = false\n\treturn stackitem.NewBool(true)\n}", "func PoolMaxStreams(n int) client.Option {\n\treturn func(o *client.Options) {\n\t\tif o.Context == nil {\n\t\t\to.Context = context.Background()\n\t\t}\n\t\to.Context = context.WithValue(o.Context, poolMaxStreams{}, n)\n\t}\n}", "func MaxMsgSize(s int) server.Option {\n\treturn server.SetOption(maxMsgSizeKey{}, s)\n}", "func MaxRecvMsgSize(s int) client.Option {\n\treturn func(o *client.Options) {\n\t\tif o.Context == nil {\n\t\t\to.Context = context.Background()\n\t\t}\n\t\to.Context = context.WithValue(o.Context, maxRecvMsgSizeKey{}, s)\n\t}\n}", "func WithMaxSortedBuffer(maxBufferSize int) func(*redisDAL) {\n\treturn func(r *redisDAL) {\n\t\tr.maxSetSize = maxBufferSize\n\t}\n}", "func Buffer(max int) option {\n\treturn func(o *scanner) {\n\t\to.buffer_size = max\n\t}\n}", "func EntryByteThreshold(n int) LoggerOption { return entryByteThreshold(n) }", "func WithOffsetConsumerOption(offset int64) ConsumerOption {\n\treturn func(c *Consumer) {\n\t\tswitch offset {\n\t\tcase LastOffset:\n\t\t\tc.config.StartOffset = LastOffset\n\t\tcase FirstOffset:\n\t\t\tc.config.StartOffset = FirstOffset\n\t\tdefault:\n\t\t\tc.config.StartOffset = FirstOffset\n\t\t}\n\t}\n}", "func (c *Config) MaxSize(stream string) (uint, error) {\n\tkey, err := keyName(stream, \"maxsize\")\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\n\treturn c.v.GetSizeInBytes(key), nil\n}", "func TestMaxGRPCMessageSize(t *testing.T) {\n\tvar maxmax int\n\n\tfor _, s := range splitter.SupportedAlgorithms() {\n\t\tif max := splitter.GetFactory(s)().MaxSegmentSize(); max > maxmax {\n\t\t\tmaxmax = max\n\t\t}\n\t}\n\n\tif got, want := maxmax, repo.MaxGRPCMessageSize-maxGRPCMessageOverhead; got > want {\n\t\tt.Fatalf(\"invalid constant MaxGRPCMessageSize: %v, want >=%v\", got, want)\n\t}\n}", "func WithMaxSize(s uint) LogBufferOption {\n\treturn func(o *logBufferOptions) {\n\t\to.maxSize = s\n\t}\n}", "func MaxKeys(max int) Option {\n\treturn func(lc *memoryCache) error {\n\t\tlc.maxKeys = max\n\t\treturn nil\n\t}\n}", "func MaxBufferSize(size int) Options {\n\treturn func(c *config) {\n\t\tc.maxBufferSize = size\n\t}\n}", "func MaxValSize(max int) Option {\n\treturn func(lc *memoryCache) error {\n\t\tlc.maxValueSize = max\n\t\treturn nil\n\t}\n}", "func WithMaxSize(maxMB int) Option {\n\treturn WithDialOpts(grpc.WithDefaultCallOptions(\n\t\tgrpc.MaxCallRecvMsgSize(maxMB*MB),\n\t\tgrpc.MaxCallSendMsgSize(maxMB*MB),\n\t))\n}", "func (s *settings) SetMaxReadSize(size uint) {\n\ts.rMaxSize = size\n}", "func WithMaxLen(maxLen int) Option {\n\treturn func(o *options) {\n\t\to.maxLength = maxLen\n\t}\n}", "func OptMaxLen(maxLen int) Option {\n\treturn func(afb *Buffer) {\n\t\tafb.MaxLen = maxLen\n\t}\n}", "func WithMax(m uint) Option {\n\treturn func(opt *options) {\n\t\topt.max = m\n\t}\n}", "func defaultConsumerOptions() *consumerOptions {\n\treturn &consumerOptions{\n\t\tqueueDepth: 10000,\n\t\tconcurrency: 10,\n\t\tStats: &NilConsumerStatsCollector{},\n\t}\n}", "func (b *Kafka) Consume(ctx context.Context, topic string, offset int64, imm bool, max int64) ([]string, error) {\n\n\tb.lockForTopic(topic)\n\n\tdefer b.unlockForTopic(topic)\n\t// Fetch offsets\n\tnewOff, err := b.Client.GetOffset(topic, 0, sarama.OffsetNewest)\n\n\tif err != nil {\n\t\tlog.Error(err.Error())\n\t}\n\n\toldOff, err := b.Client.GetOffset(topic, 0, sarama.OffsetOldest)\n\tif err != nil {\n\t\tlog.Error(err.Error())\n\t}\n\n\tlog.Infof(\"Consuming topic: %v, min_offset: %v, max_offset: %v, current_offset: %v\", topic, oldOff, newOff, offset)\n\n\t// If tracked offset is equal or bigger than topic offset means no new messages\n\tif offset >= newOff {\n\t\treturn []string{}, nil\n\t}\n\n\t// If tracked offset is left behind increment it to topic's min. offset\n\tif offset < oldOff {\n\t\tlog.Infof(\"Tracked offset is off for topic: %v, broker_offset %v, tracked_offset: %v\", topic, offset, oldOff)\n\t\treturn []string{}, errors.New(\"offset is off\")\n\t}\n\n\tpartitionConsumer, err := b.Consumer.ConsumePartition(topic, 0, offset)\n\n\tif err != nil {\n\t\tlog.Errorf(\"Unable to consume topic %v, %v, min_offset: %v, max_offset: %v, current_offset: %v\", topic, err.Error(), newOff, oldOff, offset)\n\t\treturn []string{}, err\n\n\t}\n\n\tdefer func() {\n\t\tif err := partitionConsumer.Close(); err != nil {\n\t\t\tlog.Error(err)\n\t\t}\n\t}()\n\n\tmessages := make([]string, 0)\n\tvar consumed int64\n\ttimeout := time.After(300 * time.Second)\n\n\tif imm {\n\t\ttimeout = time.After(100 * time.Millisecond)\n\t}\n\nConsumerLoop:\n\tfor {\n\t\tselect {\n\t\t// If the http client cancels the http request break consume loop\n\t\tcase <-ctx.Done():\n\t\t\t{\n\t\t\t\tbreak ConsumerLoop\n\t\t\t}\n\t\tcase <-timeout:\n\t\t\t{\n\t\t\t\tbreak ConsumerLoop\n\t\t\t}\n\t\tcase msg := <-partitionConsumer.Messages():\n\n\t\t\tmessages = append(messages, string(msg.Value[:]))\n\n\t\t\tconsumed++\n\n\t\t\tlog.Infof(\"Consumed: %v, Max: %v, Latest Message: %v\", consumed, max, string(msg.Value[:]))\n\n\t\t\t// if we pass over the available messages and still want more\n\t\t\tif consumed >= max {\n\t\t\t\tbreak ConsumerLoop\n\t\t\t}\n\n\t\t\tif offset+consumed > newOff-1 {\n\t\t\t\t// if returnImmediately is set don't wait for more\n\t\t\t\tif imm {\n\t\t\t\t\tbreak ConsumerLoop\n\t\t\t\t}\n\n\t\t\t}\n\n\t\t}\n\t}\n\n\treturn messages, nil\n}", "func MaxValSize(max int) Option {\n\treturn func(lc *loadingCache) error {\n\t\tlc.maxValueSize = max\n\t\treturn nil\n\t}\n}", "func (this *FeedableBuffer) Maximize() {\n\tthis.ExpandTo(this.maxByteCount)\n}", "func ConsumerSetAsyncNum(num int) model.Option {\n\treturn model.FuncOption(func(d *model.Dispatcher) { d.ConsumerAsyncNum = num })\n}", "func (a *NamespacesApiService) SetMaxConsumersPerTopic(ctx _context.Context, tenant string, namespace string) (*_nethttp.Response, error) {\n\tvar (\n\t\tlocalVarHTTPMethod = _nethttp.MethodPost\n\t\tlocalVarPostBody interface{}\n\t\tlocalVarFormFileName string\n\t\tlocalVarFileName string\n\t\tlocalVarFileBytes []byte\n\t)\n\n\t// create path and map variables\n\tlocalVarPath := a.client.cfg.BasePath + \"/namespaces/{tenant}/{namespace}/maxConsumersPerTopic\"\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"tenant\"+\"}\", _neturl.QueryEscape(fmt.Sprintf(\"%v\", tenant)), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"namespace\"+\"}\", _neturl.QueryEscape(fmt.Sprintf(\"%v\", namespace)), -1)\n\n\tlocalVarHeaderParams := make(map[string]string)\n\tlocalVarQueryParams := _neturl.Values{}\n\tlocalVarFormParams := _neturl.Values{}\n\n\t// to determine the Content-Type header\n\tlocalVarHTTPContentTypes := []string{}\n\n\t// set Content-Type header\n\tlocalVarHTTPContentType := selectHeaderContentType(localVarHTTPContentTypes)\n\tif localVarHTTPContentType != \"\" {\n\t\tlocalVarHeaderParams[\"Content-Type\"] = localVarHTTPContentType\n\t}\n\n\t// to determine the Accept header\n\tlocalVarHTTPHeaderAccepts := []string{}\n\n\t// set Accept header\n\tlocalVarHTTPHeaderAccept := selectHeaderAccept(localVarHTTPHeaderAccepts)\n\tif localVarHTTPHeaderAccept != \"\" {\n\t\tlocalVarHeaderParams[\"Accept\"] = localVarHTTPHeaderAccept\n\t}\n\tr, err := a.client.prepareRequest(ctx, localVarPath, localVarHTTPMethod, localVarPostBody, localVarHeaderParams, localVarQueryParams, localVarFormParams, localVarFormFileName, localVarFileName, localVarFileBytes)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tlocalVarHTTPResponse, err := a.client.callAPI(r)\n\tif err != nil || localVarHTTPResponse == nil {\n\t\treturn localVarHTTPResponse, err\n\t}\n\n\tlocalVarBody, err := _ioutil.ReadAll(localVarHTTPResponse.Body)\n\tlocalVarHTTPResponse.Body.Close()\n\tif err != nil {\n\t\treturn localVarHTTPResponse, err\n\t}\n\n\tif localVarHTTPResponse.StatusCode >= 300 {\n\t\tnewErr := GenericOpenAPIError{\n\t\t\tbody: localVarBody,\n\t\t\terror: localVarHTTPResponse.Status,\n\t\t}\n\t\treturn localVarHTTPResponse, newErr\n\t}\n\n\treturn localVarHTTPResponse, nil\n}", "func MaxAllowedHeaderSize(max uint64) Option {\n\treturn func(o *Options) {\n\t\to.MaxAllowedHeaderSize = max\n\t}\n}", "func WithLargeThreshold(t int) ClientOption {\n\treturn func(c *Client) {\n\t\tif t > 250 {\n\t\t\tt = 250\n\t\t}\n\t\tif t < 0 {\n\t\t\tt = 0\n\t\t}\n\t\tc.largeThreshold = t\n\t}\n}", "func (h *InputHost) SetExtMsgsLimitPerSecond(connLimit int32) {\n\th.logger.WithField(`val`, connLimit).Info(`SetExtMsgsLimitPerSecond`)\n\tatomic.StoreInt32(&h.extMsgsLimitPerSecond, connLimit)\n\th.updateExtTokenBucket(int32(connLimit))\n}", "func MaxPageSize(n int) Option {\n\treturn func() {\n\t\tmaxPageSize = n\n\t}\n}", "func MaxKeys(max int) Option {\n\treturn func(lc *loadingCache) error {\n\t\tlc.maxKeys = max\n\t\treturn nil\n\t}\n}", "func BufferSizeOption(indicator int) Option {\n\treturn func(o *options) {\n\t\to.bufferSize = indicator\n\t}\n}", "func TestCmdClearRangeBytesThreshold(t *testing.T) {\n\tdefer leaktest.AfterTest(t)()\n\tdefer log.Scope(t).Close(t)\n\n\tstartKey := roachpb.Key(\"0000\")\n\tendKey := roachpb.Key(\"9999\")\n\tdesc := roachpb.RangeDescriptor{\n\t\tRangeID: 99,\n\t\tStartKey: roachpb.RKey(startKey),\n\t\tEndKey: roachpb.RKey(endKey),\n\t}\n\tvalueStr := strings.Repeat(\"0123456789\", 1024)\n\tvar value roachpb.Value\n\tvalue.SetString(valueStr) // 10KiB\n\thalfFull := ClearRangeBytesThreshold / (2 * len(valueStr))\n\toverFull := ClearRangeBytesThreshold/len(valueStr) + 1\n\ttests := []struct {\n\t\tkeyCount int\n\t\texpClearIterCount int\n\t\texpClearRangeCount int\n\t}{\n\t\t{\n\t\t\tkeyCount: 1,\n\t\t\texpClearIterCount: 1,\n\t\t\texpClearRangeCount: 0,\n\t\t},\n\t\t// More than a single key, but not enough to use ClearRange.\n\t\t{\n\t\t\tkeyCount: halfFull,\n\t\t\texpClearIterCount: 1,\n\t\t\texpClearRangeCount: 0,\n\t\t},\n\t\t// With key sizes requiring additional space, this will overshoot.\n\t\t{\n\t\t\tkeyCount: overFull,\n\t\t\texpClearIterCount: 0,\n\t\t\texpClearRangeCount: 1,\n\t\t},\n\t}\n\n\tfor _, test := range tests {\n\t\tt.Run(\"\", func(t *testing.T) {\n\t\t\tctx := context.Background()\n\t\t\teng := storage.NewDefaultInMemForTesting()\n\t\t\tdefer eng.Close()\n\n\t\t\tvar stats enginepb.MVCCStats\n\t\t\tfor i := 0; i < test.keyCount; i++ {\n\t\t\t\tkey := roachpb.Key(fmt.Sprintf(\"%04d\", i))\n\t\t\t\tif err := storage.MVCCPut(ctx, eng, &stats, key, hlc.Timestamp{WallTime: int64(i % 2)}, value, nil); err != nil {\n\t\t\t\t\tt.Fatal(err)\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tbatch := &wrappedBatch{Batch: eng.NewBatch()}\n\t\t\tdefer batch.Close()\n\n\t\t\tvar h roachpb.Header\n\t\t\th.RangeID = desc.RangeID\n\n\t\t\tcArgs := CommandArgs{Header: h}\n\t\t\tcArgs.EvalCtx = (&MockEvalCtx{Desc: &desc, Clock: hlc.NewClock(hlc.UnixNano, time.Nanosecond), Stats: stats}).EvalContext()\n\t\t\tcArgs.Args = &roachpb.ClearRangeRequest{\n\t\t\t\tRequestHeader: roachpb.RequestHeader{\n\t\t\t\t\tKey: startKey,\n\t\t\t\t\tEndKey: endKey,\n\t\t\t\t},\n\t\t\t}\n\t\t\tcArgs.Stats = &enginepb.MVCCStats{}\n\n\t\t\tif _, err := ClearRange(ctx, batch, cArgs, &roachpb.ClearRangeResponse{}); err != nil {\n\t\t\t\tt.Fatal(err)\n\t\t\t}\n\n\t\t\t// Verify cArgs.Stats is equal to the stats we wrote.\n\t\t\tnewStats := stats\n\t\t\tnewStats.SysBytes, newStats.SysCount, newStats.AbortSpanBytes = 0, 0, 0 // ignore these values\n\t\t\tcArgs.Stats.SysBytes, cArgs.Stats.SysCount, cArgs.Stats.AbortSpanBytes = 0, 0, 0 // these too, as GC threshold is updated\n\t\t\tnewStats.Add(*cArgs.Stats)\n\t\t\tnewStats.AgeTo(0) // pin at LastUpdateNanos==0\n\t\t\tif !newStats.Equal(enginepb.MVCCStats{}) {\n\t\t\t\tt.Errorf(\"expected stats on original writes to be negated on clear range: %+v vs %+v\", stats, *cArgs.Stats)\n\t\t\t}\n\n\t\t\t// Verify we see the correct counts for Clear and ClearRange.\n\t\t\tif a, e := batch.clearIterCount, test.expClearIterCount; a != e {\n\t\t\t\tt.Errorf(\"expected %d iter range clears; got %d\", e, a)\n\t\t\t}\n\t\t\tif a, e := batch.clearRangeCount, test.expClearRangeCount; a != e {\n\t\t\t\tt.Errorf(\"expected %d clear ranges; got %d\", e, a)\n\t\t\t}\n\n\t\t\t// Now ensure that the data is gone, whether it was a ClearRange or individual calls to clear.\n\t\t\tif err := batch.Commit(true /* commit */); err != nil {\n\t\t\t\tt.Fatal(err)\n\t\t\t}\n\t\t\tif err := eng.MVCCIterate(startKey, endKey, storage.MVCCKeyAndIntentsIterKind, func(kv storage.MVCCKeyValue) error {\n\t\t\t\treturn errors.New(\"expected no data in underlying engine\")\n\t\t\t}); err != nil {\n\t\t\t\tt.Fatal(err)\n\t\t\t}\n\t\t})\n\t}\n}", "func OptMaxFlushes(maxFlushes int) Option {\n\treturn func(afb *Buffer) {\n\t\tafb.MaxFlushes = maxFlushes\n\t}\n}", "func (st *Settings) SetMaxConcurrentStreams(streams uint32) {\n\tst.maxStreams = streams\n}", "func (st *Settings) SetMaxConcurrentStreams(streams uint32) {\n\tst.maxStreams = streams\n}", "func (mb *MetricsBuilder) RecordRedisClientsMaxOutputBufferDataPoint(ts pcommon.Timestamp, val int64) {\n\tmb.metricRedisClientsMaxOutputBuffer.recordDataPoint(mb.startTime, ts, val)\n}", "func (m *DeviceEnrollmentWindowsHelloForBusinessConfiguration) SetPinMaximumLength(value *int32)() {\n err := m.GetBackingStore().Set(\"pinMaximumLength\", value)\n if err != nil {\n panic(err)\n }\n}", "func MaxMemoryConsumptionPerIterator(i uint64) QueryOption {\n\treturn func(q *queryOptions) error {\n\t\tq.requestProperties.Options[MaxMemoryConsumptionPerIteratorValue] = i\n\t\treturn nil\n\t}\n}", "func WithMaxPoints(n int) Option {\n\treturn func(c *config) {\n\t\tc.MaxPoints = n\n\t}\n}", "func WithMaxPoints(n int) Option {\n\treturn func(c *config) {\n\t\tc.MaxPoints = n\n\t}\n}", "func WithDecoderConsumerOption(fn Decoder) ConsumerOption {\n\treturn func(c *Consumer) { c.dec = fn }\n}", "func (params *KeyParameters) MaxMsgBytes() int {\n\treturn (params.P.BitLen() / 8) - 4\n}", "func (conn *extHost) SetMsgsLimitPerSecond(connLimit int32) {\n\tatomic.StoreInt32(&conn.extentMsgsLimitPerSecond, connLimit)\n\tconn.SetExtTokenBucketValue(int32(connLimit))\n}", "func WithPublishAsyncMaxPending(max int) JetStreamOpt {\n\treturn func(opts *jsOpts) error {\n\t\tif max < 1 {\n\t\t\treturn fmt.Errorf(\"%w: max ack pending should be >= 1\", ErrInvalidOption)\n\t\t}\n\t\topts.publisherOpts.maxpa = max\n\t\treturn nil\n\t}\n}", "func (w *Whisper) MaxMessageSize() uint32 {\n\tval, _ := w.settings.Load(maxMsgSizeIdx)\n\treturn val.(uint32)\n}", "func MaxPageSize(m int) func(*ParquetWriter) error {\n\treturn func(p *ParquetWriter) error {\n\t\tp.max = m\n\t\treturn nil\n\t}\n}", "func (s *PartitionCsmSuite) TestInitialOffsetTooLarge(c *C) {\n\toldestOffsets := s.kh.GetOldestOffsets(topic)\n\tnewestOffsets := s.kh.GetNewestOffsets(topic)\n\tlog.Infof(\"*** test.1 offsets: oldest=%v, newest=%v\", oldestOffsets, newestOffsets)\n\ts.kh.SetOffsets(group, topic, []offsetmgr.Offset{{Val: newestOffsets[partition] + 3}})\n\tpc := Spawn(s.ns, group, topic, partition, s.cfg, s.groupMember, s.msgFetcherF, s.offsetMgrF)\n\tdefer pc.Stop()\n\t// Wait for the partition consumer to initialize.\n\tinitialOffset := <-s.initOffsetCh\n\n\t// When\n\tmessages := s.kh.PutMessages(\"pc\", topic, map[string]int{\"\": 4})\n\tvar msg consumer.Message\n\tselect {\n\tcase msg = <-pc.Messages():\n\tcase <-time.After(time.Second):\n\t\tc.Errorf(\"Message is not consumed\")\n\t}\n\t// Then\n\tc.Assert(msg.Offset, Equals, messages[\"\"][3].Offset)\n\tc.Assert(msg.Offset, Equals, initialOffset.Val)\n}", "func WithReaderConsumerOption(reader *kafgo.Reader) ConsumerOption {\n\treturn func(c *Consumer) { c.reader = reader }\n}", "func (w *Whisper) SetMaxMessageSize(size uint32) error {\n\tif size > MaxMessageSize {\n\t\treturn fmt.Errorf(\"message size too large [%d>%d]\", size, MaxMessageSize)\n\t}\n\tw.settings.Store(maxMsgSizeIdx, size)\n\treturn nil\n}", "func MaxCallRecvMsgSize(v int) Configer {\n\treturn func(c *clientv3.Config) {\n\t\tc.MaxCallRecvMsgSize = v\n\t}\n}", "func MessageBatcherSetBufferSize(size int) MessageBatcherConfiguration {\n\treturn func(batcher *MessageBatcher) {\n\t\tbatcher.maxBuffer = size\n\t}\n}", "func WithRequestTimeoutMs(requestTimeoutMs int) ProducerOption {\n\treturn func(p *ProducerConfiguration) {\n\t\t_ = p.KafkaConfig.SetKey(\"request.timeout.ms\", requestTimeoutMs)\n\t}\n}", "func (s *PartitionCsmSuite) TestInitialOffsetTooLarge(c *C) {\n\toldestOffsets := s.kh.GetOldestOffsets(topic)\n\tnewestOffsets := s.kh.GetNewestOffsets(topic)\n\tlog.Infof(\"*** test.1 offsets: oldest=%v, newest=%v\", oldestOffsets, newestOffsets)\n\ts.kh.SetOffsets(group, topic, []offsetmgr.Offset{{newestOffsets[partition] + 100, \"\"}})\n\tpc := Spawn(s.ns, group, topic, partition, s.cfg, s.groupMember, s.msgIStreamF, s.offsetMgrF)\n\tdefer pc.Stop()\n\t// Wait for the partition consumer to initialize.\n\tinitialOffset := <-s.initOffsetCh\n\n\t// When\n\tmessages := s.kh.PutMessages(\"pc\", topic, map[string]int{\"\": 1})\n\tmsg := <-pc.Messages()\n\n\t// Then\n\tc.Assert(msg.Offset, Equals, messages[\"\"][0].Offset)\n\tc.Assert(msg.Offset, Equals, initialOffset.Val)\n}", "func (c ConsumerGroup) Consume(partitionIds []uint8, offset uint, n uint) (msgs map[uint8][][]byte, err error) {\n\n\tresult := make(map[uint8][][]byte)\n\tnumConsumers := c.NumConsumers\n\tpartitionLength := len(partitionIds)\n\n\tvar brokerAddr string\n\terr = c.ServerConn.Call(\"Server.GetBrokerAddr\", true, &brokerAddr)\n\tcheckError(err)\n\tif err != nil {\n\t\treturn result, err\n\t}\n\n\t// connect to broker\n\tbrokerConn, err := rpc.Dial(\"tcp\", brokerAddr)\n\tcheckError(err)\n\tdefer brokerConn.Close()\n\n\t// generate manifest for each consumer\n\tmanifests := make([][]uint8, numConsumers)\n\tacc := 0\n\tfor i := 0; i < numConsumers; i++ {\n\t\tif acc >= partitionLength {\n\t\t\tbreak\n\t\t}\n\t\tvar temp []uint8\n\t\tfor j := 0; j < int(math.Ceil(float64(partitionLength)/float64(numConsumers))); j++ {\n\t\t\tif acc >= partitionLength {\n\t\t\t\tbreak\n\t\t\t}\n\t\t\ttemp = append(temp, partitionIds[acc])\n\t\t\tacc++\n\t\t}\n\t\tmanifests = append(manifests, temp)\n\t}\n\n\t// set max processes (threads)\n\toldMaxProcs := runtime.GOMAXPROCS(numConsumers)\n\n\t// start consumer goroutines\n\tfor i := 0; i < numConsumers; i++ {\n\t\tgo startConsumer(result, manifests[i], offset, n, brokerConn, i)\n\t}\n\n\t// revert maxprocs to old value\n\truntime.GOMAXPROCS(oldMaxProcs)\n\n\treturn result, nil\n}", "func (nsc *NilConsumerStatsCollector) UpdateBatchSize(int) {}", "func (_options *ListTopicsOptions) SetLimit(limit int64) *ListTopicsOptions {\n\t_options.Limit = core.Int64Ptr(limit)\n\treturn _options\n}", "func MaxCallSendMsgSize(v int) Configer {\n\treturn func(c *clientv3.Config) {\n\t\tc.MaxCallSendMsgSize = v\n\t}\n}", "func (arg1 *UConverter) GetMaxCharSize() int", "func MaxKeys(max int) Option {\n\treturn func(lc *cacheImpl) error {\n\t\tlc.maxKeys = max\n\t\treturn nil\n\t}\n}", "func ModifyMaxMessageCount(config *common.Config) (uint32, error) {\n\n\t// Modify Config\n\tbatchSizeBytes := config.ChannelGroup.Groups[\"Orderer\"].Values[\"BatchSize\"].Value\n\tbatchSize := &orderer.BatchSize{}\n\tif err := proto.Unmarshal(batchSizeBytes, batchSize); err != nil {\n\t\treturn 0, err\n\t}\n\tbatchSize.MaxMessageCount = batchSize.MaxMessageCount + 1\n\tnewMatchSizeBytes, err := proto.Marshal(batchSize)\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\tconfig.ChannelGroup.Groups[\"Orderer\"].Values[\"BatchSize\"].Value = newMatchSizeBytes\n\n\treturn batchSize.MaxMessageCount, nil\n}", "func WithBatchNumMessages(batchNumMessages int) ProducerOption {\n\treturn func(p *ProducerConfiguration) {\n\t\t_ = p.KafkaConfig.SetKey(\"batch.num.messages\", batchNumMessages)\n\t}\n}", "func TestWithMaxScans(t *testing.T) {\n\tgot := &config{maxScans: 0}\n\texpect := &config{maxScans: 1024}\n\n\tWithMaxScans(1024).applyTo(got)\n\tif !isConfigEquals(got, expect) {\n\t\tt.Errorf(\"got %+v != expect %+v\", got, expect)\n\t}\n}", "func WithSortBufferMaxLen(i int) Option {\n\treturn func(o *options) error {\n\t\tif i <= 0 {\n\t\t\treturn fmt.Errorf(\"value for SortBufferMaxLen must be greater than 0: %d\", i)\n\t\t}\n\t\to.sortBufferMaxLen = i\n\t\treturn nil\n\t}\n}", "func NonMaxSuppressionV5PadToMaxOutputSize(value bool) NonMaxSuppressionV5Attr {\n\treturn func(m optionalAttr) {\n\t\tm[\"pad_to_max_output_size\"] = value\n\t}\n}", "func WithMaxFileSZ(sz int) Option {\n\treturn func(opts *options) {\n\t\topts.maxFileSZ = sz\n\t}\n}", "func (b *MessagesGetLongPollHistoryBuilder) MsgsLimit(v int) *MessagesGetLongPollHistoryBuilder {\n\tb.Params[\"msgs_limit\"] = v\n\treturn b\n}", "func TestV3Curl_MaxStreams_BelowLimit_NoTLS_Small(t *testing.T) {\n\ttestV3CurlMaxStream(t, false, withCfg(*e2e.NewConfigNoTLS()), withMaxConcurrentStreams(3))\n}", "func MaxCallRecvMsgSize(s int) CallOption {\n\treturn beforeCall(func(o *callInfo) error {\n\t\to.maxReceiveMessageSize = &s\n\t\treturn nil\n\t})\n}", "func (s *settings) SetMaxWriteSize(size uint) {\n\ts.wMaxSize = size\n}", "func MaxOutstandingMessages(n int) func(e *Endpoint) {\n\treturn func(e *Endpoint) { e.maxOutstandingMessages = n }\n}", "func MinMaxUint8(x, min, max uint8) uint8 { return x }", "func MaxReadahead(n uint32) MountOption {\n\treturn func(conf *mountConfig) error {\n\t\tconf.maxReadahead = n\n\t\treturn nil\n\t}\n}", "func CongestionThreshold(n uint16) MountOption {\n\t// TODO to test this, we'd have to figure out our connection id\n\t// and read /sys\n\treturn func(conf *mountConfig) error {\n\t\tconf.congestionThreshold = n\n\t\treturn nil\n\t}\n}" ]
[ "0.6460316", "0.6222933", "0.57933277", "0.5723617", "0.5691092", "0.55269736", "0.5415435", "0.52666736", "0.5261587", "0.5226586", "0.52105325", "0.5204843", "0.51837397", "0.5139786", "0.5101226", "0.5093622", "0.5091504", "0.50828505", "0.50730497", "0.50669616", "0.5047451", "0.5028769", "0.5023742", "0.50199896", "0.50156367", "0.4993855", "0.49901527", "0.49669912", "0.49250895", "0.4924908", "0.48847547", "0.48675102", "0.4860942", "0.4851183", "0.48376057", "0.4834756", "0.48345324", "0.48335108", "0.48014772", "0.47980303", "0.47810727", "0.47606418", "0.4758587", "0.4747579", "0.47453445", "0.4734788", "0.4732476", "0.47305623", "0.4719252", "0.4718179", "0.47135794", "0.4707647", "0.46972883", "0.4686837", "0.4678065", "0.46499524", "0.4647053", "0.46411762", "0.46357226", "0.4619681", "0.46181893", "0.46181893", "0.46064553", "0.4588424", "0.45835003", "0.45826873", "0.45826873", "0.4576844", "0.45747584", "0.45644993", "0.45555297", "0.45500213", "0.45471972", "0.45455447", "0.45404837", "0.45267892", "0.45248204", "0.45182395", "0.45172957", "0.4511567", "0.45110208", "0.4507421", "0.4504594", "0.44992536", "0.44904885", "0.4487458", "0.4485104", "0.44816583", "0.44780126", "0.44705954", "0.44687653", "0.44589797", "0.44439572", "0.4432784", "0.44218236", "0.44163358", "0.4404771", "0.44011158", "0.43977287", "0.4397279" ]
0.811482
0
WithAutoCommitConsumerOption sets the autocommit property of consumer
func WithAutoCommitConsumerOption(flag bool) ConsumerOption { return func(c *Consumer) { c.autocommit = flag } }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func WithAutoCommitTimeConsumerOption(dur time.Duration) ConsumerOption {\n\treturn func(c *Consumer) { c.config.CommitInterval = dur }\n}", "func (m *Eth) AutoCommit(toggle bool) {\n\tif toggle {\n\t\tm.StartMining()\n\t} else {\n\t\tm.StopMining()\n\t}\n}", "func CommitSync() OptionFunc {\n\treturn func(c *Component) error {\n\t\tif c.saramaConfig != nil && c.saramaConfig.Consumer.Offsets.AutoCommit.Enable {\n\t\t\t// redundant commits warning\n\t\t\tlog.Warn(\"consumer is set to commit offsets after processing each batch and auto-commit is enabled\")\n\t\t}\n\t\tc.commitSync = true\n\t\treturn nil\n\t}\n}", "func consumerTestWithCommits(t *testing.T, testname string, msgcnt int, useChannel bool, consumeFunc func(c *Consumer, mt *msgtracker, expCnt int), rebalanceCb func(c *Consumer, event Event) error) {\n\tconsumerTest(t, testname+\" auto commit\",\n\t\tmsgcnt, consumerCtrl{useChannel: useChannel, autoCommit: true}, consumeFunc, rebalanceCb)\n\n\tconsumerTest(t, testname+\" using CommitMessage() API\",\n\t\tmsgcnt, consumerCtrl{useChannel: useChannel, commitMode: ViaCommitMessageAPI}, consumeFunc, rebalanceCb)\n\n\tconsumerTest(t, testname+\" using CommitOffsets() API\",\n\t\tmsgcnt, consumerCtrl{useChannel: useChannel, commitMode: ViaCommitOffsetsAPI}, consumeFunc, rebalanceCb)\n\n\tconsumerTest(t, testname+\" using Commit() API\",\n\t\tmsgcnt, consumerCtrl{useChannel: useChannel, commitMode: ViaCommitAPI}, consumeFunc, rebalanceCb)\n\n}", "func WithOffsetConsumerOption(offset int64) ConsumerOption {\n\treturn func(c *Consumer) {\n\t\tswitch offset {\n\t\tcase LastOffset:\n\t\t\tc.config.StartOffset = LastOffset\n\t\tcase FirstOffset:\n\t\t\tc.config.StartOffset = FirstOffset\n\t\tdefault:\n\t\t\tc.config.StartOffset = FirstOffset\n\t\t}\n\t}\n}", "func AutoCheckpoint(interval time.Duration) ConsumerOption {\n\treturn func(o *ConsumerOptions) error {\n\t\to.AutoCheckpointInterval = interval\n\t\treturn nil\n\t}\n}", "func AutoOffsetNone() ConsumerOption {\n\treturn func(o *ConsumerOptions) error {\n\t\to.AutoOffset = autoOffsetNone\n\t\treturn nil\n\t}\n}", "func AutoOffsetLatest() ConsumerOption {\n\treturn func(o *ConsumerOptions) error {\n\t\to.AutoOffset = autoOffsetLatest\n\t\treturn nil\n\t}\n}", "func (c *Consumer) Commit() error {\n\tsnap := c.resetAcked()\n\tif len(snap) < 1 {\n\t\treturn nil\n\t}\n\n\tfor partitionID, offset := range snap {\n\t\t// fmt.Printf(\"$,%s,%d,%d\\n\", c.id, partitionID, offset+1)\n\t\tif err := c.zoo.Commit(c.group, c.topic, partitionID, offset+1); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}", "func AutoAck(opt bool) Option {\n\treturn func(o *Options) {\n\t\to.AutoAck = opt\n\t}\n}", "func (tc *consumer) Commit(topic string, partition int32, offset int64) error {\n\treturn nil\n}", "func (c CommitterProbe) SetUseAsyncCommit() {\n\tc.useAsyncCommit = 1\n}", "func AckWait(t time.Duration) ConsumerOption {\n\treturn func(o *api.ConsumerConfig) error {\n\t\to.AckWait = t\n\t\treturn nil\n\t}\n}", "func (c *Consumer) CommitOffset(msg *sarama.ConsumerMessage) {\n\tc.consumer.MarkOffset(msg, \"\")\n}", "func (g *groupConsumer) commitTxn(\n\tctx context.Context,\n\tuncommitted map[string]map[int32]EpochOffset,\n\tonDone func(*kmsg.TxnOffsetCommitRequest, *kmsg.TxnOffsetCommitResponse, error),\n) {\n\tif onDone == nil { // note we must always call onDone\n\t\tonDone = func(_ *kmsg.TxnOffsetCommitRequest, _ *kmsg.TxnOffsetCommitResponse, _ error) {}\n\t}\n\tif len(uncommitted) == 0 { // only empty if called thru autocommit / default revoke\n\t\tonDone(new(kmsg.TxnOffsetCommitRequest), new(kmsg.TxnOffsetCommitResponse), nil)\n\t\treturn\n\t}\n\n\tif g.commitCancel != nil {\n\t\tg.commitCancel() // cancel any prior commit\n\t}\n\tpriorCancel := g.commitCancel\n\tpriorDone := g.commitDone\n\n\tcommitCtx, commitCancel := context.WithCancel(g.ctx) // enable ours to be canceled and waited for\n\tcommitDone := make(chan struct{})\n\n\tg.commitCancel = commitCancel\n\tg.commitDone = commitDone\n\n\t// We issue this request even if the producer ID is failed; the request\n\t// will fail if it is.\n\t//\n\t// The id must have been set at least once by this point because of\n\t// addOffsetsToTxn.\n\tid, epoch, _ := g.cl.producerID()\n\tmemberID := g.memberID\n\treq := &kmsg.TxnOffsetCommitRequest{\n\t\tTransactionalID: *g.cl.cfg.txnID,\n\t\tGroup: g.id,\n\t\tProducerID: id,\n\t\tProducerEpoch: epoch,\n\t\tGeneration: g.generation,\n\t\tMemberID: memberID,\n\t\tInstanceID: g.instanceID,\n\t}\n\n\tif ctx.Done() != nil {\n\t\tgo func() {\n\t\t\tselect {\n\t\t\tcase <-ctx.Done():\n\t\t\t\tcommitCancel()\n\t\t\tcase <-commitCtx.Done():\n\t\t\t}\n\t\t}()\n\t}\n\n\tgo func() {\n\t\tdefer close(commitDone) // allow future commits to continue when we are done\n\t\tdefer commitCancel()\n\t\tif priorDone != nil {\n\t\t\tselect {\n\t\t\tcase <-priorDone:\n\t\t\tdefault:\n\t\t\t\tg.cl.cfg.logger.Log(LogLevelDebug, \"canceling prior txn offset commit to issue another\")\n\t\t\t\tpriorCancel()\n\t\t\t\t<-priorDone // wait for any prior request to finish\n\t\t\t}\n\t\t}\n\t\tg.cl.cfg.logger.Log(LogLevelDebug, \"issuing txn offset commit\", \"uncommitted\", uncommitted)\n\n\t\tfor topic, partitions := range uncommitted {\n\t\t\treq.Topics = append(req.Topics, kmsg.TxnOffsetCommitRequestTopic{\n\t\t\t\tTopic: topic,\n\t\t\t})\n\t\t\treqTopic := &req.Topics[len(req.Topics)-1]\n\t\t\tfor partition, eo := range partitions {\n\t\t\t\treqTopic.Partitions = append(reqTopic.Partitions, kmsg.TxnOffsetCommitRequestTopicPartition{\n\t\t\t\t\tPartition: partition,\n\t\t\t\t\tOffset: eo.Offset,\n\t\t\t\t\tLeaderEpoch: eo.Epoch,\n\t\t\t\t\tMetadata: &memberID,\n\t\t\t\t})\n\t\t\t}\n\t\t}\n\n\t\tvar resp *kmsg.TxnOffsetCommitResponse\n\t\tvar err error\n\t\tif len(req.Topics) > 0 {\n\t\t\tresp, err = req.RequestWith(commitCtx, g.cl)\n\t\t}\n\t\tif err != nil {\n\t\t\tonDone(req, nil, err)\n\t\t\treturn\n\t\t}\n\t\tonDone(req, resp, nil)\n\t}()\n}", "func WithReaderConsumerOption(reader *kafgo.Reader) ConsumerOption {\n\treturn func(c *Consumer) { c.reader = reader }\n}", "func (c *offsetCoordinator) commit(\n\ttopic string, partition int32, offset int64, metadata string) (resErr error) {\n\t// Eliminate the scenario where Kafka erroneously returns -1 as the offset\n\t// which then gets made permanent via an immediate flush.\n\t//\n\t// Technically this disallows a valid use case of rewinding a consumer\n\t// group to the beginning, but 1) this isn't possible through any API we\n\t// currently expose since you cannot have a message numbered -1 in hand;\n\t// 2) this restriction only applies to partitions with a non-expired\n\t// message at offset 0.\n\tif offset < 0 {\n\t\treturn fmt.Errorf(\"Cannot commit negative offset %d for [%s:%d].\",\n\t\t\toffset, topic, partition)\n\t}\n\n\tretry := &backoff.Backoff{Min: c.conf.RetryErrWait, Jitter: true}\n\tfor try := 0; try < c.conf.RetryErrLimit; try++ {\n\t\tif try != 0 {\n\t\t\ttime.Sleep(retry.Duration())\n\t\t}\n\n\t\t// get a copy of our connection with the lock, this might establish a new\n\t\t// connection so can take a bit\n\t\tconn, err := c.broker.coordinatorConnection(c.conf.ConsumerGroup)\n\t\tif conn == nil {\n\t\t\tresErr = err\n\t\t\tcontinue\n\t\t}\n\t\tdefer func(lconn *connection) { go c.broker.conns.Idle(lconn) }(conn)\n\n\t\tresp, err := conn.OffsetCommit(&proto.OffsetCommitReq{\n\t\t\tClientID: c.broker.conf.ClientID,\n\t\t\tConsumerGroup: c.conf.ConsumerGroup,\n\t\t\tTopics: []proto.OffsetCommitReqTopic{\n\t\t\t\t{\n\t\t\t\t\tName: topic,\n\t\t\t\t\tPartitions: []proto.OffsetCommitReqPartition{\n\t\t\t\t\t\t{ID: partition, Offset: offset, Metadata: metadata},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t})\n\t\tresErr = err\n\n\t\tif _, ok := err.(*net.OpError); ok || err == io.EOF || err == syscall.EPIPE {\n\t\t\tlog.Debugf(\"connection died while committing on %s:%d for %s: %s\",\n\t\t\t\ttopic, partition, c.conf.ConsumerGroup, err)\n\t\t\t_ = conn.Close()\n\n\t\t} else if err == nil {\n\t\t\t// Should be a single response in the payload.\n\t\t\tfor _, t := range resp.Topics {\n\t\t\t\tfor _, p := range t.Partitions {\n\t\t\t\t\tif t.Name != topic || p.ID != partition {\n\t\t\t\t\t\tlog.Warningf(\"commit response with unexpected data for %s:%d\",\n\t\t\t\t\t\t\tt.Name, p.ID)\n\t\t\t\t\t\tcontinue\n\t\t\t\t\t}\n\t\t\t\t\treturn p.Err\n\t\t\t\t}\n\t\t\t}\n\t\t\treturn errors.New(\"response does not contain commit information\")\n\t\t}\n\t}\n\treturn resErr\n}", "func WithCommitHandler(handler CommitHandlerFactory) Option {\n\treturn func(gw *Gateway) error {\n\t\tgw.options.CommitHandler = handler\n\t\treturn nil\n\t}\n}", "func (operator *AccessOperator) CreateCommit(cxt context.Context, option *CreateCommitOption) (string, error) {\n\tif option == nil {\n\t\treturn \"\", fmt.Errorf(\"Lost create Commit info\")\n\t}\n\tgrpcOptions := []grpc.CallOption{\n\t\tgrpc.WaitForReady(true),\n\t}\n\t//query business and app first\n\tbusiness, app, err := getBusinessAndApp(operator, operator.Business, option.AppName)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\t//query ConfigSet information for specified ID\n\tcfgSet, err := operator.innergetConfigSet(cxt, business.Bid, app.Appid, option.ConfigSetName)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tif cfgSet == nil {\n\t\treturn \"\", fmt.Errorf(\"Found no ConfigSet info\")\n\t}\n\n\trequest := &accessserver.CreateCommitReq{\n\t\tSeq: pkgcommon.Sequence(),\n\t\tBid: business.Bid,\n\t\tAppid: app.Appid,\n\t\tCfgsetid: cfgSet.Cfgsetid,\n\t\tOp: 0,\n\t\tOperator: operator.User,\n\t\tTemplateid: \"\",\n\t\tTemplate: \"\",\n\t\tConfigs: option.Content,\n\t\tChanges: \"\",\n\t}\n\tresponse, err := operator.Client.CreateCommit(cxt, request, grpcOptions...)\n\tif err != nil {\n\t\tlogger.V(3).Infof(\"CreateCommit: post new Commit for ConfigSet [%s] failed, %s\", option.ConfigSetName, err.Error())\n\t\treturn \"\", err\n\t}\n\tif response.ErrCode != common.ErrCode_E_OK {\n\t\tlogger.V(3).Infof(\n\t\t\t\"CreateCommit: post new new Commit for ConfigSet [%s] successfully, but response failed: %s\",\n\t\t\toption.ConfigSetName, response.ErrMsg,\n\t\t)\n\t\treturn \"\", fmt.Errorf(\"%s\", response.ErrMsg)\n\t}\n\tif len(response.Commitid) == 0 {\n\t\tlogger.V(3).Infof(\"CreateConfigSet: BSCP system error, No CommitID response\")\n\t\treturn \"\", fmt.Errorf(\"Lost CommitID from configuraiotn platform\")\n\t}\n\treturn response.Commitid, nil\n}", "func WithCheckpoint(checkpoint Checkpoint) Option {\n\treturn func(c *Consumer) error {\n\t\tc.checkpoint = checkpoint\n\t\treturn nil\n\t}\n}", "func (w *mongoAutoCommitWorker) commit() (err error) {\n\tw.docMu.Lock()\n\tdefer w.docMu.Unlock()\n\tif w.capacity() > 0 {\n\t\tif w.p.verbose {\n\t\t\tlog.Info(fmt.Sprintf(\"AutoCommitter-%s(%s)-%d commiting size: %d\", w.p.name, w.p.coll, w.i, len(w.docInsert)))\n\t\t}\n\t\tsession := w.p.client.Session.Copy()\n\t\tdefer session.Close()\n\t\tdb := w.p.client.Db\n\t\tcollName := w.p.coll\n\t\tcoll := session.DB(db).C(collName)\n\t\tbulk := coll.Bulk()\n\t\tbulk.Unordered()\n\n\t\tif len(w.docInsert) > 0 {\n\t\t\tbulk.Insert(w.docInsert...)\n\t\t}\n\n\t\tif len(w.docUpdate) > 0 {\n\t\t\tbulk.Update(w.docUpdate...)\n\t\t}\n\n\t\tif len(w.docUpsert) > 0 {\n\t\t\tbulk.Upsert(w.docUpsert...)\n\t\t}\n\n\t\tw.docInsert = []interface{}{}\n\t\tw.docUpdate = []interface{}{}\n\t\tw.docUpsert = []interface{}{}\n\n\t\tif _, err = bulk.Run(); err != nil {\n\t\t\tlog.Info(fmt.Sprintf(\"AutoCommitter-%s(%s)-%d >>ERROR<<: %v\", w.p.name, w.p.coll, w.i, err.Error()))\n\t\t}\n\t\tif w.p.verbose {\n\t\t\tlog.Info(fmt.Sprintf(\"AutoCommitter-%s(%s)-%d committed size: %d\", w.p.name, w.p.coll, w.i, len(w.docInsert)))\n\t\t}\n\t} else {\n\t\tif w.p.verbose {\n\t\t\tlog.Info(fmt.Sprintf(\"AutoCommitter-%s(%s)-%d committed nothing\", w.p.name, w.p.coll, w.i))\n\t\t}\n\t}\n\treturn\n}", "func (c *Consumer) Open() error {\n\tif c.reader == nil {\n\t\tc.reader = kafgo.NewReader(*c.config)\n\t}\n\n\tfor {\n\t\t// start a new context\n\t\tvar (\n\t\t\tctx = context.Background()\n\t\t\tmsg kafgo.Message\n\t\t\terr error\n\t\t)\n\n\t\tif c.autocommit {\n\t\t\tmsg, err = c.reader.ReadMessage(ctx)\n\t\t} else {\n\t\t\tmsg, err = c.reader.FetchMessage(ctx)\n\t\t}\n\n\t\tif err != nil {\n\t\t\tc.errFn(ctx, msg, errors.Wrap(\n\t\t\t\terr, \"read message from kafka failed\",\n\t\t\t))\n\t\t\tc.errHandler.Handle(ctx, err)\n\t\t\tcontinue\n\t\t}\n\n\t\t// before endpoint\n\t\tfor _, fn := range c.befores {\n\t\t\tctx = fn(ctx, msg)\n\t\t}\n\n\t\trq, err := c.dec(ctx, msg)\n\t\tif err != nil {\n\t\t\tc.errFn(ctx, msg, err)\n\t\t\tc.errHandler.Handle(ctx, err)\n\t\t\tcontinue\n\t\t}\n\n\t\t// execute endpoint\n\t\trs, err := c.end(ctx, rq)\n\t\tif err != nil {\n\t\t\tc.errFn(ctx, msg, err)\n\t\t\tc.errHandler.Handle(ctx, err)\n\t\t\tcontinue\n\t\t}\n\n\t\tfor _, fn := range c.afters {\n\t\t\tctx = fn(ctx, msg, rs)\n\t\t}\n\n\t\tif !c.autocommit {\n\t\t\terr = c.reader.CommitMessages(ctx, msg)\n\t\t\tif err != nil {\n\t\t\t\tc.errFn(ctx, msg, err)\n\t\t\t\tc.errHandler.Handle(ctx, err)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t}\n\t}\n}", "func WithEndpointConsumerOption(end endpoint.Endpoint) ConsumerOption {\n\treturn func(c *Consumer) { c.end = end }\n}", "func AutoOffsetEarliest() ConsumerOption {\n\treturn func(o *ConsumerOptions) error {\n\t\to.AutoOffset = autoOffsetEarliest\n\t\treturn nil\n\t}\n}", "func (oc *OrdererConfig) Commit() {\n\toc.ordererGroup.OrdererConfig = oc\n}", "func WithTopicConsumerOption(topic string) ConsumerOption {\n\treturn func(c *Consumer) {\n\t\tc.config.Topic = topic\n\t}\n}", "func (c *Conn) GetAutocommit() bool {\n\treturn C.sqlite3_get_autocommit(c.db) != 0\n}", "func AcknowledgeNone() ConsumerOption {\n\treturn func(o *api.ConsumerConfig) error {\n\t\to.AckPolicy = api.AckNone\n\t\treturn nil\n\t}\n}", "func (c *Cyclone) commit(msg *erebos.Transport) {\n\tmsg.Commit <- &erebos.Commit{\n\t\tTopic: msg.Topic,\n\t\tPartition: msg.Partition,\n\t\tOffset: msg.Offset,\n\t}\n}", "func maybeCommit(c *kafka.Consumer, topicPartition kafka.TopicPartition) error {\n\t// Commit the already-stored offsets to Kafka whenever the offset is divisible\n\t// by 10, otherwise return early.\n\t// This logic is completely arbitrary. We can use any other internal or\n\t// external variables to decide when we commit the already-stored offsets.\n\tif topicPartition.Offset%10 != 0 {\n\t\treturn nil\n\t}\n\n\tcommitedOffsets, err := c.Commit()\n\n\t// ErrNoOffset occurs when there are no stored offsets to commit. This\n\t// can happen if we haven't stored anything since the last commit.\n\t// While this will never happen for this example since we call this method\n\t// per-message, and thus, always have something to commit, the error\n\t// handling is illustrative of how to handle it in cases we call Commit()\n\t// in another way, for example, every N seconds.\n\tif err != nil && err.(kafka.Error).Code() != kafka.ErrNoOffset {\n\t\treturn err\n\t}\n\n\tfmt.Printf(\"%% Commited offsets to Kafka: %v\\n\", commitedOffsets)\n\treturn nil\n}", "func (s *Service) onOffsetCommit(brokerId int32, duration time.Duration) {\n\n\t// todo:\n\t// if the commit took too long, don't count it in 'commits' but add it to the histogram?\n\t// and how do we want to handle cases where we get an error??\n\t// should we have another metric that tells us about failed commits? or a label on the counter?\n\tbrokerIdStr := fmt.Sprintf(\"%v\", brokerId)\n\ts.endToEndCommitLatency.WithLabelValues(brokerIdStr).Observe(duration.Seconds())\n\n\tif duration > s.config.Consumer.CommitSla {\n\t\treturn\n\t}\n\n\ts.endToEndCommits.Inc()\n}", "func OverwriteConsumer(overwrite runtime.Consumer) Option {\n\treturn OverwriteConsumerForStatus(overwrite, ForAllStatusCodes)\n}", "func (orm *Xorm) SetAutoTransaction(b bool) {\n\torm.autoTx = b\n}", "func WithDecoderConsumerOption(fn Decoder) ConsumerOption {\n\treturn func(c *Consumer) { c.dec = fn }\n}", "func (a v3ioAppender) Commit() error { return nil }", "func WithAfterFuncsConsumerOption(fns ...AfterFunc) ConsumerOption {\n\treturn func(c *Consumer) { c.afters = append(c.afters, fns...) }\n}", "func OverwriteConsumerForStatus(overwrite runtime.Consumer, forStatusCode int) Option {\n\treturn func(rt *runtime.ClientOperation) {\n\t\trt.Reader = &overwriteConsumerReader{\n\t\t\trequestReader: rt.Reader,\n\t\t\tconsumer: overwrite,\n\t\t\tforStatusCode: forStatusCode,\n\t\t}\n\t}\n}", "func (p *AutoCommitter) Close() error {\n\tp.startedMu.Lock()\n\tdefer p.startedMu.Unlock()\n\n\t// Already stopped? Do nothing.\n\tif !p.started {\n\t\treturn nil\n\t}\n\n\t// Stop flusher (if enabled)\n\tif p.flusherStopC != nil {\n\t\tp.flusherStopC <- struct{}{}\n\t\t<-p.flusherStopC\n\t\tclose(p.flusherStopC)\n\t\tp.flusherStopC = nil\n\t}\n\n\t// Stop all workers.\n\tclose(p.docsInsert)\n\tclose(p.docsUpdate)\n\tclose(p.docsUpsert)\n\tp.workerWg.Wait()\n\n\tp.started = false\n\n\treturn nil\n}", "func consumerTest(t *testing.T, testname string, msgcnt int, cc consumerCtrl, consumeFunc func(c *Consumer, mt *msgtracker, expCnt int), rebalanceCb func(c *Consumer, event Event) error) {\n\n\tif msgcnt == 0 {\n\t\tcreateTestMessages()\n\t\tproducerTest(t, \"Priming producer\", p0TestMsgs, producerCtrl{},\n\t\t\tfunc(p *Producer, m *Message, drChan chan Event) {\n\t\t\t\tp.ProduceChannel() <- m\n\t\t\t})\n\t\tmsgcnt = len(p0TestMsgs)\n\t}\n\n\tconf := ConfigMap{\"bootstrap.servers\": testconf.Brokers,\n\t\t\"go.events.channel.enable\": cc.useChannel,\n\t\t\"group.id\": testconf.GroupID,\n\t\t\"session.timeout.ms\": 6000,\n\t\t\"api.version.request\": \"true\",\n\t\t\"enable.auto.commit\": cc.autoCommit,\n\t\t\"debug\": \",\",\n\t\t\"auto.offset.reset\": \"earliest\"}\n\n\tconf.updateFromTestconf()\n\n\tc, err := NewConsumer(&conf)\n\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tdefer c.Close()\n\n\texpCnt := msgcnt\n\tmt := msgtrackerStart(t, expCnt)\n\n\tt.Logf(\"%s, expecting %d messages\", testname, expCnt)\n\tc.Subscribe(testconf.Topic, rebalanceCb)\n\n\tconsumeFunc(c, &mt, expCnt)\n\n\t//test commits\n\tswitch cc.commitMode {\n\tcase ViaCommitMessageAPI:\n\t\t// verify CommitMessage() API\n\t\tfor _, message := range mt.msgs {\n\t\t\t_, commitErr := c.CommitMessage(message)\n\t\t\tif commitErr != nil {\n\t\t\t\tt.Errorf(\"Cannot commit message. Error: %s\\n\", commitErr)\n\t\t\t}\n\t\t}\n\tcase ViaCommitOffsetsAPI:\n\t\t// verify CommitOffset\n\t\tpartitions := make([]TopicPartition, len(mt.msgs))\n\t\tfor index, message := range mt.msgs {\n\t\t\tpartitions[index] = message.TopicPartition\n\t\t}\n\t\t_, commitErr := c.CommitOffsets(partitions)\n\t\tif commitErr != nil {\n\t\t\tt.Errorf(\"Failed to commit using CommitOffsets. Error: %s\\n\", commitErr)\n\t\t}\n\tcase ViaCommitAPI:\n\t\t// verify Commit() API\n\t\t_, commitErr := c.Commit()\n\t\tif commitErr != nil {\n\t\t\tt.Errorf(\"Failed to commit. Error: %s\", commitErr)\n\t\t}\n\n\t}\n\n\t// Trigger RevokePartitions\n\tc.Unsubscribe()\n\n\t// Handle RevokePartitions\n\tc.Poll(500)\n\n}", "func initConsumer() sarama.Consumer {\n\tsarama.Logger = log.New(os.Stdout, \"\", log.Ltime)\n\n\tconfig := sarama.NewConfig()\n\tconfig.ClientID = CLIENTID\n\tconfig.Consumer.Return.Errors = true\n\n\tbrokers := []string{BROKERS}\n\n\tmaster, err := sarama.NewConsumer(brokers, config)\n\tif err != nil {\n\t\tfmt.Println(\"error create master consumer: \")\n\t\tpanic(err)\n\t}\n\n\treturn master\n}", "func (c *Coordinator) CommitOffset(topic string, partition int32, offset int64) error {\n\tb, err := c.client.Coordinator(c.cfg.GroupID)\n\tif err != nil {\n\t\treturn err\n\t}\n\t// OffsetCommitRequest retention time should be -1 to signify to use the broker default.\n\tvar rt int64 = -1\n\tif c.cfg.RetentionTime.Nanoseconds() != 0 {\n\t\trt = c.cfg.RetentionTime.Nanoseconds() / int64(time.Millisecond)\n\t}\n\treq := &sarama.OffsetCommitRequest{\n\t\tConsumerGroup: c.cfg.GroupID,\n\t\tConsumerGroupGeneration: c.gid,\n\t\tConsumerID: c.mid,\n\t\tRetentionTime: rt,\n\t\tVersion: offsetCommitRequestVersion,\n\t}\n\treq.AddBlock(topic, partition, offset, 0, \"\")\n\tresp, err := b.CommitOffset(req)\n\tif err != nil {\n\t\treturn err\n\t}\n\t// return first error we happen to iterate into (if any).\n\tfor _, topicErrs := range resp.Errors {\n\t\tfor _, partitionErr := range topicErrs {\n\t\t\tif partitionErr == sarama.ErrNoError {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\treturn partitionErr\n\t\t}\n\t}\n\treturn nil\n}", "func (c *Consumer) commitLoop() error {\n\tfor {\n\t\tselect {\n\t\tcase <-c.closer.Dying():\n\t\t\treturn nil\n\t\tcase <-time.After(c.config.CommitEvery):\n\t\t}\n\n\t\tif err := c.Commit(); err != nil {\n\t\t\tc.config.Notifier.CommitError(c, err)\n\t\t}\n\t}\n}", "func AcknowledgeExplicit() ConsumerOption {\n\treturn func(o *api.ConsumerConfig) error {\n\t\to.AckPolicy = api.AckExplicit\n\t\treturn nil\n\t}\n}", "func InitAfterTxCommitCallback(ctx context.Context) context.Context {\n\tafterCommitFunc := []AfterCommitCallback{}\n\treturn context.WithValue(ctx, CtxKeyAfterCommitCallback, &afterCommitFunc)\n}", "func (_Energyconsumption *EnergyconsumptionTransactor) SetConsumer(opts *bind.TransactOpts, _owner string, _deviceType string, _peakPowerPos uint32, _peakPowerNeg uint32, _latitude uint32, _longitude uint32, _voltageLevel uint32, _location string, _installDate string) (*types.Transaction, error) {\n\treturn _Energyconsumption.contract.Transact(opts, \"setConsumer\", _owner, _deviceType, _peakPowerPos, _peakPowerNeg, _latitude, _longitude, _voltageLevel, _location, _installDate)\n}", "func (ep *eventsProvider) Commit() error {\n\treturn nil\n}", "func WithGroupIDConsumerOption(groupID string) ConsumerOption {\n\treturn func(c *Consumer) {\n\t\tc.config.GroupID = groupID\n\t}\n}", "func TestLeaderAcknowledgeCommit(t *testing.T) {\n\ttests := []struct {\n\t\tsize int\n\t\tacceptors map[uint64]bool\n\t\twack bool\n\t}{\n\t\t{1, nil, true},\n\t\t{3, nil, false},\n\t\t{3, map[uint64]bool{2: true}, true},\n\t\t{3, map[uint64]bool{2: true, 3: true}, true},\n\t\t{5, nil, false},\n\t\t{5, map[uint64]bool{2: true}, false},\n\t\t{5, map[uint64]bool{2: true, 3: true}, true},\n\t\t{5, map[uint64]bool{2: true, 3: true, 4: true}, true},\n\t\t{5, map[uint64]bool{2: true, 3: true, 4: true, 5: true}, true},\n\t}\n\tfor i, tt := range tests {\n\t\ts := NewMemoryStorage()\n\t\tr := newTestRaft(1, idsBySize(tt.size), 10, 1, s)\n\t\tdefer closeAndFreeRaft(r)\n\t\tr.becomeCandidate()\n\t\tr.becomeLeader()\n\t\tcommitNoopEntry(r, s)\n\t\tli := r.raftLog.lastIndex()\n\t\tr.Step(pb.Message{From: 1, To: 1, Type: pb.MsgProp, Entries: []pb.Entry{{Data: []byte(\"some data\")}}})\n\n\t\tfor _, m := range r.readMessages() {\n\t\t\tif tt.acceptors[m.To] {\n\t\t\t\tr.Step(acceptAndReply(m))\n\t\t\t}\n\t\t}\n\n\t\tif g := r.raftLog.committed > li; g != tt.wack {\n\t\t\tt.Errorf(\"#%d: ack commit = %v, want %v\", i, g, tt.wack)\n\t\t}\n\t}\n}", "func (cl *Client) commitTransactionOffsets(\n\tctx context.Context,\n\tuncommitted map[string]map[int32]EpochOffset,\n\tonDone func(*kmsg.TxnOffsetCommitRequest, *kmsg.TxnOffsetCommitResponse, error),\n) {\n\tcl.cfg.logger.Log(LogLevelDebug, \"in commitTransactionOffsets\", \"with\", uncommitted)\n\tdefer cl.cfg.logger.Log(LogLevelDebug, \"left commitTransactionOffsets\")\n\n\tif cl.cfg.txnID == nil {\n\t\tonDone(nil, nil, ErrNotTransactional)\n\t\treturn\n\t}\n\n\t// Before committing, ensure we are at least in a transaction. We\n\t// unlock the producer txnMu before committing to allow EndTransaction\n\t// to go through, even though that could cut off our commit.\n\tcl.producer.txnMu.Lock()\n\tif !cl.producer.inTxn {\n\t\tonDone(nil, nil, ErrNotInTransaction)\n\t\tcl.producer.txnMu.Unlock()\n\t\treturn\n\t}\n\tcl.consumer.mu.Lock()\n\tcl.producer.txnMu.Unlock()\n\n\tdefer cl.consumer.mu.Unlock()\n\tif cl.consumer.typ != consumerTypeGroup {\n\t\tonDone(new(kmsg.TxnOffsetCommitRequest), new(kmsg.TxnOffsetCommitResponse), ErrNotGroup)\n\t\treturn\n\t}\n\tif len(uncommitted) == 0 {\n\t\tonDone(new(kmsg.TxnOffsetCommitRequest), new(kmsg.TxnOffsetCommitResponse), nil)\n\t\treturn\n\t}\n\n\tg := cl.consumer.group\n\tg.mu.Lock()\n\tdefer g.mu.Unlock()\n\n\tif !g.offsetsAddedToTxn {\n\t\tif err := cl.addOffsetsToTxn(g.ctx, g.id); err != nil {\n\t\t\tif onDone != nil {\n\t\t\t\tonDone(nil, nil, err)\n\t\t\t}\n\t\t\treturn\n\t\t}\n\t}\n\n\tg.commitTxn(ctx, uncommitted, onDone)\n}", "func NewConsumer(topics []string, valueFactory ValueFactory, opts ...ConsumerOption) (*Consumer, error) {\n\tc := &Consumer{\n\t\tvalueFactory: valueFactory,\n\t\tavroAPI: avro.DefaultConfig,\n\t\tensureTopics: true,\n\t}\n\t// Loop through each option\n\tfor _, opt := range opts {\n\t\t// apply option\n\t\topt.applyC(c)\n\t}\n\n\tvar err error\n\n\t// if consumer not provided - make one\n\tif c.KafkaConsumer == nil {\n\t\t// if kafka config not provided - build default one\n\t\tif c.kafkaCfg == nil {\n\t\t\tvar envCfg struct {\n\t\t\t\tBroker string `env:\"KAFKA_BROKER\" envDefault:\"localhost:9092\"`\n\t\t\t\tCAFile string `env:\"KAFKA_CA_FILE\"`\n\t\t\t\tKeyFile string `env:\"KAFKA_KEY_FILE\"`\n\t\t\t\tCertificateFile string `env:\"KAFKA_CERTIFICATE_FILE\"`\n\t\t\t\tGroupID string `env:\"KAFKA_GROUP_ID\"`\n\t\t\t}\n\t\t\tif err := env.Parse(&envCfg); err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\n\t\t\t// default configuration\n\t\t\tc.kafkaCfg = &kafka.ConfigMap{\n\t\t\t\t\"bootstrap.servers\": envCfg.Broker,\n\t\t\t\t\"socket.keepalive.enable\": true,\n\t\t\t\t\"enable.auto.commit\": false,\n\t\t\t\t\"enable.partition.eof\": true,\n\t\t\t\t\"session.timeout.ms\": 6000,\n\t\t\t\t\"auto.offset.reset\": \"earliest\",\n\t\t\t\t\"group.id\": envCfg.GroupID,\n\t\t\t}\n\n\t\t\tif envCfg.CAFile != \"\" {\n\t\t\t\t// configure SSL\n\t\t\t\tc.kafkaCfg.SetKey(\"security.protocol\", \"ssl\")\n\t\t\t\tc.kafkaCfg.SetKey(\"ssl.ca.location\", envCfg.CAFile)\n\t\t\t\tc.kafkaCfg.SetKey(\"ssl.key.location\", envCfg.KeyFile)\n\t\t\t\tc.kafkaCfg.SetKey(\"ssl.certificate.location\", envCfg.CertificateFile)\n\t\t\t}\n\t\t}\n\n\t\tif c.KafkaConsumer, err = kafka.NewConsumer(c.kafkaCfg); err != nil {\n\t\t\treturn nil, errors.WithMessage(err, \"cannot initialize kafka consumer\")\n\t\t}\n\t}\n\n\tif c.srClient == nil {\n\t\tif c.srURL == nil {\n\t\t\tvar envCfg struct {\n\t\t\t\tSchemaRegistry *url.URL `env:\"KAFKA_SCHEMA_REGISTRY\" envDefault:\"http://localhost:8081\"`\n\t\t\t}\n\t\t\tif err := env.Parse(&envCfg); err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\tc.srURL = envCfg.SchemaRegistry\n\t\t}\n\n\t\tif c.srClient, err = NewCachedSchemaRegistryClient(c.srURL.String()); err != nil {\n\t\t\treturn nil, errors.WithMessage(err, \"cannot initialize schema registry client\")\n\t\t}\n\t}\n\n\tif c.eventHandler == nil {\n\t\tc.eventHandler = func(event kafka.Event) {\n\t\t\tlog.Println(event)\n\t\t}\n\t}\n\n\tif topics != nil {\n\t\tif err := c.KafkaConsumer.SubscribeTopics(topics, nil); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tif c.ensureTopics {\n\t\t\tif err = c.EnsureTopics(topics); err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t}\n\t}\n\n\treturn c, nil\n}", "func NewConsumer(log logrus.FieldLogger, conf Config, opts ...ConfigOpt) (Consumer, error) {\n\t// See Reference at https://github.com/edenhill/librdkafka/blob/master/CONFIGURATION.md\n\tkafkaConf := conf.baseKafkaConfig()\n\t_ = kafkaConf.SetKey(\"enable.auto.offset.store\", false) // manually StoreOffset after processing a message. Otherwise races may happen.)\n\n\t// In case we try to assign an offset out of range (greater than log-end-offset), consumer will use start consuming from offset zero.\n\t_ = kafkaConf.SetKey(\"auto.offset.reset\", \"earliest\")\n\n\tconf.Consumer.Apply(kafkaConf)\n\tfor _, opt := range opts {\n\t\topt(kafkaConf)\n\t}\n\n\tif err := conf.configureAuth(kafkaConf); err != nil {\n\t\treturn nil, errors.Wrap(err, \"error configuring auth for the Kafka consumer\")\n\t}\n\n\tconsumer, err := kafkalib.NewConsumer(kafkaConf)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif conf.RequestTimeout == 0 {\n\t\tconf.RequestTimeout = DefaultTimeout\n\t}\n\n\tcc := &ConfluentConsumer{\n\t\tc: consumer,\n\t\tconf: conf,\n\t\tlog: log,\n\t}\n\n\tlogFields := logrus.Fields{\"kafka_topic\": cc.conf.Topic}\n\n\tif cc.conf.Consumer.Partition != nil || cc.conf.Consumer.PartitionKey != \"\" {\n\t\t// set the default partitioner algorithm\n\t\tif cc.conf.Consumer.PartitionerAlgorithm == \"\" {\n\t\t\tcc.conf.Consumer.PartitionerAlgorithm = PartitionerMurMur2\n\t\t}\n\t\t// Set the partition if a key is set to determine the partition\n\t\tif cc.conf.Consumer.PartitionKey != \"\" && cc.conf.Consumer.PartitionerAlgorithm != \"\" {\n\t\t\tcc.AssignPartitionByKey(cc.conf.Consumer.PartitionKey, cc.conf.Consumer.PartitionerAlgorithm)\n\t\t}\n\n\t\tlogFields[\"kafka_partition_key\"] = cc.conf.Consumer.PartitionKey\n\t\tlogFields[\"kafka_partition\"] = *cc.conf.Consumer.Partition\n\t}\n\n\tcc.setupRebalanceHandler()\n\tcc.log.WithFields(logFields).Debug(\"Subscribing to Kafka topic\")\n\tif serr := cc.c.Subscribe(cc.conf.Topic, cc.rebalanceHandler); serr != nil {\n\t\terr = errors.Wrap(serr, \"error subscribing to topic\")\n\t}\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn cc, nil\n}", "func (c *offsetCoordinator) Commit(topic string, partition int32, offset int64) error {\n\treturn c.commit(topic, partition, offset, \"\")\n}", "func WithURL(url string) GitCommitOptionsFunc {\n\treturn func(c *CreateCommit) error {\n\t\tif url == \"\" {\n\t\t\treturn errors.New(\"url is required\")\n\t\t}\n\t\tc.URL = url\n\t\treturn nil\n\t}\n}", "func (c *consumer) Ack() error {\n\tif err := c.store.Ack(c.topic, c.ackOffset); err != nil {\n\t\treturn fmt.Errorf(\"acking topic %s with offset %d: %v\", c.topic, c.ackOffset, err)\n\t}\n\n\tc.outstanding = false\n\n\treturn nil\n}", "func (d *dispatcher) dispatchBlockCommit(msg proto.Message, done chan bool) {\n\tif atomic.LoadInt32(&d.shutdown) != 0 {\n\t\tif done != nil {\n\t\t\tclose(done)\n\t\t}\n\t\treturn\n\t}\n\td.newsChan <- &blockMsg{(msg).(*pb.BlockPb), pb.MsgBlockProtoMsgType, done}\n}", "func NewAutoCommitTxRunMessage(query string, params map[string]interface{}, timeout time.Duration, txConfig map[string]interface{}, dbName string, mode bolt_mode.AccessMode) RunWithMetadataMessage {\n\treturn newRunMessageWithMetadata(query, params, BuildTxMetadataWithDatabase(&timeout, txConfig, dbName, mode, nil))\n}", "func (m *ManagedConsumer) Ack(ctx context.Context, msg Message) error {\n\tfor {\n\t\tm.mu.RLock()\n\t\tconsumer := m.consumer\n\t\twait := m.waitc\n\t\tm.mu.RUnlock()\n\n\t\tif consumer == nil {\n\t\t\tselect {\n\t\t\tcase <-wait:\n\t\t\t\t// a new consumer was established.\n\t\t\t\t// Re-enter read-lock to obtain it.\n\t\t\t\tcontinue\n\t\t\tcase <-ctx.Done():\n\t\t\t\treturn ctx.Err()\n\t\t\t}\n\t\t}\n\n\t\treturn consumer.Ack(msg)\n\t}\n}", "func ConsumerSetAsyncNum(num int) model.Option {\n\treturn model.FuncOption(func(d *model.Dispatcher) { d.ConsumerAsyncNum = num })\n}", "func (server *Server) LeaderCommitOp(op *rpc.Operation, idx string) *common.Future {\n\treq := &rpc.CommitRequest{\n\t\tIdx: idx,\n\t\tOp: op,\n\t}\n\n\t// Async RPC to followers\n\tcommitNum := 0\n\tvar commitLock sync.Mutex\n\tcommitCv := sync.NewCond(&commitLock)\n\tfor _, addr := range server.FollowerAddrList {\n\t\tgo func(addr string) {\n\t\t\tserver.SendCommitRequest(addr, req)\n\n\t\t\tcommitLock.Lock()\n\t\t\tcommitNum++\n\t\t\tcommitLock.Unlock()\n\t\t\tcommitCv.Signal()\n\t\t}(addr)\n\t}\n\n\t// Async local commit\n\tgo func() {\n\t\tserver.CommitOp(op, idx).GetValue()\n\t\tcommitLock.Lock()\n\t\tcommitNum++\n\t\tcommitLock.Unlock()\n\t\tcommitCv.Signal()\n\t}()\n\n\tdone := common.NewFuture()\n\n\tgo func() {\n\t\tcommitLock.Lock()\n\t\tfor commitNum < server.MajorityNum {\n\t\t\tcommitCv.Wait()\n\t\t}\n\t\tcommitLock.Unlock()\n\t\tdone.SetValue(true)\n\t}()\n\n\treturn done\n}", "func ConsumerConcurrency(count int) ConsumerOptionsFn {\n\treturn func(o *Consumer) error {\n\t\tif count > 0 {\n\t\t\to.concurrency = count\n\t\t\treturn nil\n\t\t}\n\t\treturn ErrInvalidConcurrency\n\t}\n}", "func WithCloseDelay(delay time.Duration) ClientOption {\n\treturn func(cc *committeeClient) {\n\t\tcc.closeDelay = delay\n\t}\n}", "func WithBeforeFuncsConsumerOption(fns ...BeforeFunc) ConsumerOption {\n\treturn func(c *Consumer) { c.befores = append(c.befores, fns...) }\n}", "func (o AppSharedCredentialsOutput) AutoSubmitToolbar() pulumi.BoolPtrOutput {\n\treturn o.ApplyT(func(v *AppSharedCredentials) pulumi.BoolPtrOutput { return v.AutoSubmitToolbar }).(pulumi.BoolPtrOutput)\n}", "func (w *writer) Commit() error {\n\tif w.closed {\n\t\treturn fmt.Errorf(\"already closed\")\n\t} else if w.committed {\n\t\treturn fmt.Errorf(\"already committed\")\n\t} else if w.cancelled {\n\t\treturn fmt.Errorf(\"already cancelled\")\n\t}\n\tfmt.Println(\"commit:\",\"block size=\",len(w.blocks))\n\n\treturn w.flushBlock()\n}", "func createConsumer(c *cli.Context) error {\n\tusername := c.String(\"username\")\n\tcustomID := c.String(\"custom_id\")\n\n\tif username == \"\" && customID == \"\" {\n\t\treturn fmt.Errorf(\"username: %s or custom id: %s invalid\", username, customID)\n\t}\n\n\tcfg := &ConsumerConfig{\n\t\tUsername: username,\n\t\tCustomID: customID,\n\t}\n\n\tctx, cannel := context.WithTimeout(context.Background(), 30*time.Second)\n\tdefer cannel()\n\n\tserverResponse, err := client.GatewayClient.Post(ctx, CONSUMER_RESOURCE_OBJECT, nil, cfg, nil)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tbody, err := ioutil.ReadAll(serverResponse.Body)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\ttools.IndentFromBody(body)\n\n\treturn nil\n}", "func StartWithLastReceived() ConsumerOption {\n\treturn func(o *api.ConsumerConfig) error {\n\t\tresetDeliverPolicy(o)\n\t\to.DeliverPolicy = api.DeliverLast\n\t\treturn nil\n\t}\n}", "func TriggerLatestOffset() *feature.Feature {\n\tf := feature.NewFeatureNamed(\"Consumer latest auto.offset.reset config\")\n\n\tbrokerName := feature.MakeRandomK8sName(\"broker\")\n\tcmName := feature.MakeRandomK8sName(\"cm-config\")\n\n\tsource1 := feature.MakeRandomK8sName(\"source1\")\n\tsource2 := feature.MakeRandomK8sName(\"source2\")\n\n\ttrigger1Name := feature.MakeRandomK8sName(\"trigger1\")\n\tsink1 := feature.MakeRandomK8sName(\"sink1\")\n\n\ttrigger2Name := feature.MakeRandomK8sName(\"trigger2\")\n\tsink2 := feature.MakeRandomK8sName(\"sink2\")\n\n\tevent1 := test.FullEvent()\n\teventID1 := uuid.New().String()\n\tevent1.SetID(eventID1)\n\n\tevent2 := test.FullEvent()\n\teventID2 := uuid.New().String()\n\tevent2.SetID(eventID2)\n\n\tf.Setup(\"install config\", configmap.Copy(types.NamespacedName{Namespace: system.Namespace(), Name: \"kafka-broker-config\"}, cmName))\n\tf.Setup(\"install broker\", broker.Install(brokerName, append(broker.WithEnvConfig(), broker.WithConfig(cmName))...))\n\tf.Setup(\"broker is ready\", broker.IsReady(brokerName))\n\n\tf.Setup(\"install sink1\", eventshub.Install(sink1, eventshub.StartReceiver))\n\tf.Setup(\"install sink2\", eventshub.Install(sink2, eventshub.StartReceiver))\n\tf.Setup(\"install trigger 1\", trigger.Install(trigger1Name, brokerName, trigger.WithSubscriber(service.AsKReference(sink1), \"\")))\n\tf.Setup(\"trigger 1 is ready\", trigger.IsReady(trigger1Name))\n\n\tf.Requirement(\"send event 1\", eventshub.Install(source1, eventshub.InputEvent(event1), eventshub.StartSenderToResource(broker.GVR(), brokerName)))\n\tf.Requirement(\"event 1 received\", assert.OnStore(sink1).MatchEvent(test.HasId(eventID1)).Exact(1))\n\n\tf.Assert(\"install trigger 2\", trigger.Install(trigger2Name, brokerName, trigger.WithSubscriber(service.AsKReference(sink2), \"\")))\n\tf.Assert(\"trigger 2 is ready\", trigger.IsReady(trigger2Name))\n\n\tf.Assert(\"send event 2\", func(ctx context.Context, t feature.T) {\n\t\ttrigger.IsReady(trigger2Name)(ctx, t) // Wait for trigger ready\n\t\teventshub.Install(source2, eventshub.InputEvent(event2), eventshub.StartSenderToResource(broker.GVR(), brokerName))(ctx, t)\n\t})\n\n\t// Both triggers receive event 1 and 2.\n\tf.Assert(\"event 2 is received by sink 1\", assert.OnStore(sink1).MatchEvent(test.HasId(eventID2)).Exact(1))\n\tf.Assert(\"event 2 is received by sink 2\", assert.OnStore(sink2).MatchEvent(test.HasId(eventID2)).Exact(1))\n\n\t// Trigger 2 doesn't receive event 1 (sent before it was ready)\n\tf.Assert(\"event 1 is not received by sink 2\", func(ctx context.Context, t feature.T) {\n\t\ttrigger.IsReady(trigger2Name)(ctx, t) // Wait for trigger ready\n\t\ttime.Sleep(20 * time.Second) // eventually\n\t\tassert.OnStore(sink2).MatchEvent(test.HasId(eventID1)).Not()(ctx, t)\n\t})\n\n\treturn f\n}", "func (dbi *DB) Commit() {\r\n\tif dbi.status {\r\n\t\tdbi.createOperation(\"DB_COMMIT\")\r\n\t\tdbi.data.commPrepare()\r\n\t\tdbi.data.comm()\r\n\t\tdbi.data.commParse()\r\n\t\tif Show {\r\n\t\t\tmylog.Println(\"[\" + dbi.Sid + \"]SQL=COMMIT;\")\r\n\t\t}\r\n\t}\r\n}", "func (_Editable *EditableTransactor) Commit(opts *bind.TransactOpts, _objectHash string) (*types.Transaction, error) {\n\treturn _Editable.contract.Transact(opts, \"commit\", _objectHash)\n}", "func (*txDriver) Commit() error { return nil }", "func (*txDriver) Commit() error { return nil }", "func (f *Factory) Create() (async.Consumer, error) {\n\tcc := kafka.ConsumerConfig{\n\t\tBrokers: f.brokers,\n\t\tSaramaConfig: f.cfg,\n\t\tBuffer: f.cfg.ChannelBufferSize,\n\t}\n\n\tc := &consumer{\n\t\ttopics: f.topics,\n\t\tgroup: f.group,\n\t\ttraceTag: opentracing.Tag{Key: \"group\", Value: f.group},\n\t\tconfig: cc,\n\t}\n\n\tvar err error\n\tfor _, o := range f.oo {\n\t\terr = o(&c.config)\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"could not apply OptionFunc to consumer : %w\", err)\n\t\t}\n\t}\n\n\treturn c, nil\n}", "func commitTransaction(partitionPosition []kafka.TopicPartition,\n\tp *kafka.Producer,\n\tc *kafka.Consumer,\n\ttransactionCommitNum int,\n\thwmarks, hwmarksLastCommitted map[string]uint64,\n\tpartitionToMsgIDMap, partitionToMsgIDMapLastCommitted map[uint64]uint64) (commitNum int, err error) {\n\tsoaktest.InfoLogger.Printf(\"=== Committing transaction===\\n\")\n\n\tcgmd, err := c.GetConsumerGroupMetadata()\n\tif err != nil {\n\t\tsoaktest.ErrorLogger.Printf(\"Failed to get Consumer Group \"+\n\t\t\t\"Metadata %v\\n\", err)\n\t\treturn transactionCommitNum, err\n\t}\n\n\t// If SendOffsetsToTransaction succeed, will continue to commit\n\t// or abort transaction\n\t// If SendOffsetsToTransaction failed with err.(kafka.Error).IsRetriable(),\n\t// sleep 3 seconds then retry\n\t// If SendOffsetsToTransaction failed with err.(kafka.Error).TxnRequiresAbort(),\n\t// AbortTransaction and return (transactionCommitNum, err)\n\t// If failed with other errors, return transactionCommitNum and the\n\t// fatal error\n\tfor i := 0; i < retryNum; i++ {\n\t\terr = p.SendOffsetsToTransaction(nil, partitionPosition, cgmd)\n\t\tif err != nil {\n\t\t\tsoaktest.ErrorLogger.Printf(\"SendOffsetsToTransaction() \"+\n\t\t\t\t\"failed: %s\\n\", err)\n\t\t\tif err.(kafka.Error).IsRetriable() {\n\t\t\t\tif i == retryNum-1 {\n\t\t\t\t\tsoaktest.ErrorLogger.Printf(\"SendOffsetsToTransaction() \"+\n\t\t\t\t\t\t\"failed with max retry %d times: %s\\n\", retryNum, err)\n\t\t\t\t\treturn transactionCommitNum, err\n\t\t\t\t}\n\t\t\t\ttime.Sleep(3 * time.Second)\n\t\t\t\tcontinue\n\t\t\t} else if err.(kafka.Error).TxnRequiresAbort() {\n\t\t\t\terr = p.AbortTransaction(nil)\n\t\t\t\tif err != nil {\n\t\t\t\t\tsoaktest.ErrorLogger.Printf(\"AbortTransaction() \"+\n\t\t\t\t\t\t\"failed: %s\\n\", err)\n\t\t\t\t\treturn transactionCommitNum, err\n\t\t\t\t}\n\t\t\t\trewindConsumerPosition(c)\n\t\t\t\treturn transactionCommitNum, nil\n\t\t\t} else {\n\t\t\t\treturn transactionCommitNum, err\n\t\t\t}\n\t\t} else {\n\t\t\tbreak\n\t\t}\n\t}\n\n\tif transactionCommitNum%100 != 0 {\n\t\t// If CommitTransaction succeed, transactionCommitNum + 1 and return\n\t\t// If CommitTransaction failed with err.(kafka.Error).IsRetriable(),\n\t\t// sleep 3 seconds then retry\n\t\t// If CommitTransaction failed with\n\t\t// err.(kafka.Error).TxnRequiresAbort(),\n\t\t// AbortTransaction and return (transactionCommitNum, err)\n\t\t// If failed with other errors, return transactionCommitNum and the\n\t\t// fatal error\n\t\tfor i := 0; i < retryNum; i++ {\n\t\t\terr = p.CommitTransaction(nil)\n\t\t\tif err != nil {\n\t\t\t\tif i == 0 {\n\t\t\t\t\tsoaktest.DatadogIncrement(\n\t\t\t\t\t\tproducerTransactionCommitFailed, 1, nil)\n\t\t\t\t}\n\t\t\t\tsoaktest.ErrorLogger.Printf(\"CommitTransaction() failed: %s\\n\",\n\t\t\t\t\terr)\n\t\t\t\tif err.(kafka.Error).IsRetriable() {\n\t\t\t\t\tif i == retryNum-1 {\n\t\t\t\t\t\tsoaktest.ErrorLogger.Printf(\"CommitTransaction() \"+\n\t\t\t\t\t\t\t\"failed with max retry %d times: %s\\n\",\n\t\t\t\t\t\t\tretryNum, err)\n\t\t\t\t\t\treturn transactionCommitNum, err\n\t\t\t\t\t}\n\t\t\t\t\ttime.Sleep(3 * time.Second)\n\t\t\t\t\tcontinue\n\t\t\t\t} else if err.(kafka.Error).TxnRequiresAbort() {\n\t\t\t\t\terr = p.AbortTransaction(nil)\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\tsoaktest.ErrorLogger.Printf(\"AbortTransaction() \"+\n\t\t\t\t\t\t\t\"failed: %s\\n\", err)\n\t\t\t\t\t\treturn transactionCommitNum, err\n\t\t\t\t\t}\n\t\t\t\t\terr = rewindConsumerPosition(c)\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\tsoaktest.ErrorLogger.Printf(\"rewindConsumerPosition()\"+\n\t\t\t\t\t\t\t\" failed: %s\\n\", err)\n\t\t\t\t\t}\n\t\t\t\t\treturn transactionCommitNum, nil\n\t\t\t\t} else {\n\t\t\t\t\treturn transactionCommitNum, err\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\tfor k, v := range hwmarks {\n\t\t\t\t\thwmarksLastCommitted[k] = v\n\t\t\t\t}\n\t\t\t\tfor k, v := range partitionToMsgIDMap {\n\t\t\t\t\tpartitionToMsgIDMapLastCommitted[k] = v\n\t\t\t\t}\n\t\t\t\tsoaktest.DatadogIncrement(\n\t\t\t\t\tproducerTransactionCommitSucceed, 1, nil)\n\t\t\t\ttransactionCommitNum++\n\t\t\t\treturn transactionCommitNum, nil\n\t\t\t}\n\t\t}\n\t} else {\n\t\t// If AbortTransaction succeed, transactionCommitNum = 1 and return\n\t\t// If AbortTransaction failed with err.(kafka.Error).IsRetriable(),\n\t\t// sleep 3 seconds then retry\n\t\t// If failed with other errors, return transactionCommitNum and the\n\t\t// fatal error\n\t\tfor i := 0; i < retryNum; i++ {\n\t\t\terr = p.AbortTransaction(nil)\n\t\t\tif err != nil {\n\t\t\t\tif i == 0 {\n\t\t\t\t\tsoaktest.DatadogIncrement(\n\t\t\t\t\t\tproducerTransactionAbortFailed, 1, nil)\n\t\t\t\t}\n\t\t\t\tsoaktest.ErrorLogger.Printf(\"AbortTransaction() failed: %s\\n\",\n\t\t\t\t\terr)\n\t\t\t\tif err.(kafka.Error).IsRetriable() {\n\t\t\t\t\tif i == retryNum-1 {\n\t\t\t\t\t\tsoaktest.ErrorLogger.Printf(\"AbortTransaction() \"+\n\t\t\t\t\t\t\t\"failed with max retry %d times: %s\\n\", retryNum, err)\n\t\t\t\t\t\treturn transactionCommitNum, err\n\t\t\t\t\t}\n\t\t\t\t\ttime.Sleep(3 * time.Second)\n\t\t\t\t\tcontinue\n\t\t\t\t} else {\n\t\t\t\t\tsoaktest.ErrorLogger.Printf(\"AbortTransaction() \"+\n\t\t\t\t\t\t\"failed: %s\\n\", err)\n\t\t\t\t\treturn transactionCommitNum, err\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\tsoaktest.DatadogIncrement(\n\t\t\t\t\tproducerTransactionAbortSucceed,\n\t\t\t\t\t1,\n\t\t\t\t\tnil)\n\t\t\t\ttransactionCommitNum = 1\n\t\t\t\terr = rewindConsumerPosition(c)\n\t\t\t\tif err != nil {\n\t\t\t\t\tsoaktest.ErrorLogger.Printf(\"rewindConsumerPosition() \"+\n\t\t\t\t\t\t\"failed: %s\\n\", err)\n\t\t\t\t}\n\t\t\t\t// If AbortTransaction() happens, rewind the msgid to the\n\t\t\t\t// last committed msid per partition\n\t\t\t\tfor k, v := range hwmarksLastCommitted {\n\t\t\t\t\thwmarks[k] = v\n\t\t\t\t}\n\t\t\t\tfor k, v := range partitionToMsgIDMapLastCommitted {\n\t\t\t\t\tpartitionToMsgIDMap[k] = v\n\t\t\t\t}\n\t\t\t\treturn transactionCommitNum, err\n\t\t\t}\n\t\t}\n\t}\n\n\treturn transactionCommitNum, err\n}", "func (p *AutoCommitter) Flush() error {\n\tif p.verbose {\n\t\tlog.Info(fmt.Sprintf(\"AutoCommitter-%s(%s) a new flush is comming\", p.name, p.coll))\n\t}\n\tfor _, w := range p.workers {\n\t\tw.flushC <- struct{}{}\n\t\t<-w.flushAckC // wait for completion\n\t}\n\tif p.verbose {\n\t\tlog.Info(fmt.Sprintf(\"AutoCommitter-%s(%s) a new flush is finished\", p.name, p.coll))\n\t}\n\treturn nil\n}", "func (cfg *ConsensusConfiguration) Commit(t time.Time) time.Time {\n\treturn t.Add(cfg.TimeoutCommit)\n}", "func (s *scheduler) onCommitBlkDone(e sched.Event) {\n\tif err := e.GetError(); err != nil {\n\t\ts.opts.EventListener.BackgroundErrorCB(err)\n\t\treturn\n\t}\n\tevent := e.(*commitBlkEvent)\n\tif !event.Ctx.HasDataScope() {\n\t\treturn\n\t}\n\tnewMeta := event.Meta\n\tmctx := &Context{Opts: s.opts}\n\ttableData, err := s.tables.StrongRefTable(newMeta.Segment.Table.Id)\n\tif err != nil {\n\t\ts.opts.EventListener.BackgroundErrorCB(err)\n\t\treturn\n\t}\n\tlogutil.Infof(\" %s | Block %d | UpgradeBlkEvent | Started\", sched.EventPrefix, newMeta.Id)\n\tnewevent := NewUpgradeBlkEvent(mctx, newMeta, tableData)\n\ts.Schedule(newevent)\n}", "func (psm *ProtocolStateMachine) updateCommit(newCommit uint64) {\n\tif psm.debug && psm.l() {\n\t\tpsm.logger.Debug(\n\t\t\t\"updating commit\",\n\t\t\tzap.Uint64(\"oldCommit\", psm.state.Commit), zap.Uint64(\"newCommit\", newCommit))\n\t}\n\tpsm.state.Commit = newCommit\n\tpsm.commitChan <- newCommit\n\n\tcanAckProp := psm.state.Proposal.pending &&\n\t\tpsm.state.Proposal.Index <= psm.state.Commit &&\n\t\tpsm.state.Proposal.Term <= psm.state.LogTerm\n\tif canAckProp {\n\t\tpsm.endPendingProposal()\n\t}\n}", "func AcknowledgeAll() ConsumerOption {\n\treturn func(o *api.ConsumerConfig) error {\n\t\to.AckPolicy = api.AckAll\n\t\treturn nil\n\t}\n}", "func (m *ManagedConsumer) set(c *Consumer) {\n\tm.mu.Lock()\n\n\tm.consumer = c\n\n\tif m.waitc != nil {\n\t\tclose(m.waitc)\n\t\tm.waitc = nil\n\t}\n\n\tm.mu.Unlock()\n}", "func WithSegmentFactory(segmentFactory StorageFactory) ConcurrentOption {\n\treturn func(options *ConcurrentConfig) {\n\t\toptions.segmentFactory = segmentFactory\n\t}\n}", "func AllowEmptyMessage(c *commitConfig) { c.allowEmptyMessage = true }", "func (_Container *ContainerTransactor) Commit(opts *bind.TransactOpts, _objectHash string) (*types.Transaction, error) {\n\treturn _Container.contract.Transact(opts, \"commit\", _objectHash)\n}", "func WithCommitter(committer *CommitAuthor) GitCommitOptionsFunc {\n\treturn func(c *CreateCommit) error {\n\t\tif committer == nil {\n\t\t\treturn errors.New(\"Committer is required\")\n\t\t}\n\t\tc.Committer = committer\n\t\treturn nil\n\t}\n}", "func InitConsumer(broker, group string) {\n\tvar err error\n\tutils.ConsumerObject, err = kafka.NewConsumer(&kafka.ConfigMap{\n\t\t\"bootstrap.servers\": broker,\n\t\t\"group.id\": group,\n\t\t\"session.timeout.ms\": 6000,\n\t\t\"go.events.channel.enable\": true,\n\t\t\"go.application.rebalance.enable\": true,\n\t\t// Enable generation of PartitionEOF when the\n\t\t// end of a partition is reached.\n\t\t\"enable.partition.eof\": true,\n\t\t\"auto.offset.reset\": \"earliest\"})\n\n\tif err != nil {\n\t\tfmt.Fprintf(os.Stderr, \"Failed to create consumer: %s\\n\", err)\n\t\tos.Exit(1)\n\t}\n}", "func (cons *ConsumerObject) Init(broker string, group string) {\n\tvar err error\n\tC, err = kafka.NewConsumer(&kafka.ConfigMap{\n\t\t\"bootstrap.servers\": broker,\n\t\t\"broker.address.family\": \"v4\",\n\t\t\"group.id\": group,\n\t\t\"session.timeout.ms\": 6000,\n\t\t\"enable.auto.commit\": false,\n\t\t\"auto.offset.reset\": \"latest\"})\n\n\tif err != nil {\n\t\tfmt.Fprintf(os.Stderr, \"Failed to create consumer: %s\\n\", err)\n\t\tos.Exit(1)\n\t}\n\n\tfmt.Printf(\"Created Consumer %v\\n\", C)\n}", "func (w *Writer) Commit() (err error) {\n\t//var writeOpts opt.WriteOptions\n\treturn w.db.Write(&w.batch, nil)\n}", "func MainConsumer(handler KafkaEventHandler, consumeTopic string, offset int64, updateOffset func(int64)) {\n\tfmt.Print(\"Initializing main Consumer \\n\")\n\tkafka := newKafkaConsumer()\n\n\tpartitionList, err := kafka.Partitions(consumeTopic)\n\tif err != nil {\n\t\tfmt.Printf(\"Error retrieving partitionList %s\\n\", err)\n\t}\n\n\tevents := make(chan *sarama.ConsumerMessage, simultaneousEvents)\n\n\tfor _, partition := range partitionList {\n\t\tconsumer, err := kafka.ConsumePartition(consumeTopic, partition, offset)\n\t\tif err != nil {\n\t\t\tfmt.Print(\"Unable to consume topic! \\n\")\n\t\t\tfmt.Printf(\"Kafka error: %s\\n\", err)\n\t\t\tos.Exit(-1)\n\t\t}\n\n\t\tgo func(consumer sarama.PartitionConsumer) {\n\t\t\tfor thisEvent := range consumer.Messages() {\n\t\t\t\tevents <- thisEvent\n\t\t\t}\n\t\t}(consumer)\n\t}\n\n\t// Eternal consuming loop\n\tconsumeEvents(events, handler, updateOffset)\n}", "func (o *CommitOptions) WithFormat(value string) *CommitOptions {\n\to.Format = &value\n\treturn o\n}", "func (_Energyconsumption *EnergyconsumptionTransactorSession) SetConsumer(_owner string, _deviceType string, _peakPowerPos uint32, _peakPowerNeg uint32, _latitude uint32, _longitude uint32, _voltageLevel uint32, _location string, _installDate string) (*types.Transaction, error) {\n\treturn _Energyconsumption.Contract.SetConsumer(&_Energyconsumption.TransactOpts, _owner, _deviceType, _peakPowerPos, _peakPowerNeg, _latitude, _longitude, _voltageLevel, _location, _installDate)\n}", "func WithAcks(acks int) ProducerOption {\n\treturn func(p *ProducerConfiguration) {\n\t\t_ = p.KafkaConfig.SetKey(\"acks\", acks)\n\t}\n}", "func (c *KafkaClient) CommitOffset(group string, topic string, partition int32, offset int64) error {\n\tfor i := 0; i <= c.config.CommitOffsetRetries; i++ {\n\t\terr := c.tryCommitOffset(group, topic, partition, offset)\n\t\tif err == nil {\n\t\t\treturn nil\n\t\t}\n\n\t\tlog.Debugf(\"Failed to commit offset %d for group %s, topic %s, partition %d after %d try: %s\", offset, group, topic, partition, i, err)\n\t\ttime.Sleep(c.config.CommitOffsetBackoff)\n\t}\n\n\treturn fmt.Errorf(\"Could not get commit offset %d for group %s, topic %s, partition %d after %d retries\", offset, group, topic, partition, c.config.CommitOffsetRetries)\n}", "func defaultConsumerOptions() *consumerOptions {\n\treturn &consumerOptions{\n\t\tqueueDepth: 10000,\n\t\tconcurrency: 10,\n\t\tStats: &NilConsumerStatsCollector{},\n\t}\n}", "func PushFlowControl() ConsumerOption {\n\treturn func(o *api.ConsumerConfig) error {\n\t\to.FlowControl = true\n\t\treturn nil\n\t}\n}", "func (orchCol *OrchestratorCollection) Commit() error {\n\n\tfor _, orch := range orchCol.Orchestrators {\n\t\tif err := orch.Commit(); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}", "func (p *AutoCommitter) flusher(interval time.Duration) {\n\tticker := time.NewTicker(interval)\n\tdefer ticker.Stop()\n\n\tfor {\n\t\tselect {\n\t\tcase <-ticker.C: // Periodic flush\n\t\t\tp.Flush()\n\t\tcase <-p.flusherStopC:\n\t\t\tp.flusherStopC <- struct{}{}\n\t\t\treturn\n\t\t}\n\t}\n}", "func TestNewConsumer(tb testing.TB, defaults bool, options ...Option) Consumer {\n\tc, err := NewConsumer()\n\trequire.NoError(tb, err)\n\n\tif !defaults {\n\t\tc.Inmem = inmemconfig.Consumer{Store: nil}\n\t\tc.Kafka = kafkaconfig.Consumer{}\n\t\tc.Pubsub = pubsubconfig.Consumer{}\n\t\tc.Standardstream = standardstreamconfig.Consumer{}\n\t\tc.Logger = nil\n\t\tc.HandleInterrupt = false\n\t\tc.HandleErrors = false\n\t\tc.Name = \"\"\n\t\tc.AllowEnvironmentBasedConfiguration = false\n\t}\n\n\tfor _, option := range options {\n\t\toption.apply(&c, nil)\n\t}\n\n\terr = envconfig.Process(c.Name, &c)\n\trequire.NoError(tb, err)\n\n\treturn c\n}", "func CommitCaller(){\n\tfor{\n\t commitlog :=<-CommitCh\n\t\tfor i:=r.CommitIndex+1;i<=commitlog.CommitIndex && i<=commitlog.LogIndex;i++ {\n\t\t\t\tr.Log[i].IsCommitted=true\n\t\t\t\tInput_ch <- Log_Conn{r.Log[i], nil}\n\t\t\t\tr.CommitIndex=r.Log[i].SequenceNumber\n\t\t\t//r.File.WriteString(\"From Commit Caller \"+strconv.Itoa(r.CommitIndex)+\" Leader Commit \" +strconv.Itoa(commitlog.CommitIndex)+\" Log index \"+strconv.Itoa(commitlog.LogIndex))\n\t\t\t\tr.File.WriteString(strconv.Itoa(r.Log[i].Term)+\" \"+strconv.Itoa(r.Log[i].SequenceNumber)+\" \"+strings.TrimSpace(strings.Replace(string(r.Log[i].Command),\"\\n\",\" \",-1))+\" \"+\" \"+strconv.FormatBool(r.Log[i].IsCommitted))\n\t\t\t\tr.File.WriteString(\"\\t\\r\\n\");\n\t\t\t\n\t\t}\n\t}\n}", "func (c *ConsumerManager) AddConsumer(topic, channel string, client ConsumerClient) error {\n\n}", "func TestLegacyExecuteFailOnAutocommit(t *testing.T) {\n\tctx := utils.LeakCheckContext(t)\n\n\tcreateSandbox(\"TestExecuteFailOnAutocommit\")\n\thc := discovery.NewFakeHealthCheck(nil)\n\tsc := newTestScatterConn(ctx, hc, newSandboxForCells(ctx, []string{\"aa\"}), \"aa\")\n\tsbc0 := hc.AddTestTablet(\"aa\", \"0\", 1, \"TestExecuteFailOnAutocommit\", \"0\", topodatapb.TabletType_PRIMARY, true, 1, nil)\n\tsbc1 := hc.AddTestTablet(\"aa\", \"1\", 1, \"TestExecuteFailOnAutocommit\", \"1\", topodatapb.TabletType_PRIMARY, true, 1, nil)\n\n\trss := []*srvtopo.ResolvedShard{\n\t\t{\n\t\t\tTarget: &querypb.Target{\n\t\t\t\tKeyspace: \"TestExecuteFailOnAutocommit\",\n\t\t\t\tShard: \"0\",\n\t\t\t\tTabletType: topodatapb.TabletType_PRIMARY,\n\t\t\t},\n\t\t\tGateway: sbc0,\n\t\t},\n\t\t{\n\t\t\tTarget: &querypb.Target{\n\t\t\t\tKeyspace: \"TestExecuteFailOnAutocommit\",\n\t\t\t\tShard: \"1\",\n\t\t\t\tTabletType: topodatapb.TabletType_PRIMARY,\n\t\t\t},\n\t\t\tGateway: sbc1,\n\t\t},\n\t}\n\tqueries := []*querypb.BoundQuery{\n\t\t{\n\t\t\t// This will fail to go to shard. It will be rejected at vtgate.\n\t\t\tSql: \"query1\",\n\t\t\tBindVariables: map[string]*querypb.BindVariable{\n\t\t\t\t\"bv0\": sqltypes.Int64BindVariable(0),\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\t// This will go to shard.\n\t\t\tSql: \"query2\",\n\t\t\tBindVariables: map[string]*querypb.BindVariable{\n\t\t\t\t\"bv1\": sqltypes.Int64BindVariable(1),\n\t\t\t},\n\t\t},\n\t}\n\t// shard 0 - has transaction\n\t// shard 1 - does not have transaction.\n\tsession := &vtgatepb.Session{\n\t\tInTransaction: true,\n\t\tShardSessions: []*vtgatepb.Session_ShardSession{\n\t\t\t{\n\t\t\t\tTarget: &querypb.Target{Keyspace: \"TestExecuteFailOnAutocommit\", Shard: \"0\", TabletType: topodatapb.TabletType_PRIMARY, Cell: \"aa\"},\n\t\t\t\tTransactionId: 123,\n\t\t\t\tTabletAlias: nil,\n\t\t\t},\n\t\t},\n\t\tAutocommit: false,\n\t}\n\t_, errs := sc.ExecuteMultiShard(ctx, nil, rss, queries, NewSafeSession(session), true /*autocommit*/, false)\n\terr := vterrors.Aggregate(errs)\n\trequire.Error(t, err)\n\trequire.Contains(t, err.Error(), \"in autocommit mode, transactionID should be zero but was: 123\")\n\tutils.MustMatch(t, 0, len(sbc0.Queries), \"\")\n\tutils.MustMatch(t, []*querypb.BoundQuery{queries[1]}, sbc1.Queries, \"\")\n}", "func MessageBatcherDoNotWaitForCommit() MessageBatcherConfiguration {\n\treturn func(batcher *MessageBatcher) {\n\t\tbatcher.waitForCommit = false\n\t}\n}" ]
[ "0.7708878", "0.62646776", "0.5866283", "0.5665535", "0.5649291", "0.56325513", "0.5467695", "0.5445847", "0.5381265", "0.53750414", "0.5210893", "0.5156814", "0.51398385", "0.51015705", "0.5085359", "0.49919894", "0.49556273", "0.48541895", "0.48460233", "0.4829172", "0.48114946", "0.48033452", "0.47970855", "0.47945988", "0.47723353", "0.47722226", "0.47683397", "0.47582522", "0.47462392", "0.47197244", "0.47034848", "0.469921", "0.4679708", "0.46694246", "0.4594305", "0.45878935", "0.4576344", "0.45575175", "0.45136875", "0.45064905", "0.44833246", "0.44758666", "0.44640064", "0.44601715", "0.44304764", "0.4409379", "0.43869174", "0.438", "0.4350837", "0.43504643", "0.43331534", "0.4331105", "0.43251407", "0.43101242", "0.4308756", "0.43018106", "0.42908105", "0.42898118", "0.42607948", "0.42389807", "0.4223144", "0.42066067", "0.41996688", "0.41949221", "0.41938695", "0.41920218", "0.4191672", "0.41912866", "0.4188963", "0.41874298", "0.41874298", "0.4183808", "0.41794485", "0.41741535", "0.41631308", "0.4157961", "0.41480786", "0.4144847", "0.4130256", "0.4126198", "0.41096887", "0.41044655", "0.41009918", "0.40999165", "0.4099738", "0.40946278", "0.40926346", "0.4089972", "0.4087884", "0.4086397", "0.40765733", "0.4075342", "0.407523", "0.4069405", "0.40654752", "0.406382", "0.40631208", "0.404862", "0.40481", "0.40445837" ]
0.88928133
0
WithAutoCommitTimeConsumerOption sets the auto commit time for Consumer
func WithAutoCommitTimeConsumerOption(dur time.Duration) ConsumerOption { return func(c *Consumer) { c.config.CommitInterval = dur } }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func WithAutoCommitConsumerOption(flag bool) ConsumerOption {\n\treturn func(c *Consumer) { c.autocommit = flag }\n}", "func (m *Eth) AutoCommit(toggle bool) {\n\tif toggle {\n\t\tm.StartMining()\n\t} else {\n\t\tm.StopMining()\n\t}\n}", "func AutoCheckpoint(interval time.Duration) ConsumerOption {\n\treturn func(o *ConsumerOptions) error {\n\t\to.AutoCheckpointInterval = interval\n\t\treturn nil\n\t}\n}", "func AckWait(t time.Duration) ConsumerOption {\n\treturn func(o *api.ConsumerConfig) error {\n\t\to.AckWait = t\n\t\treturn nil\n\t}\n}", "func AutoOffsetLatest() ConsumerOption {\n\treturn func(o *ConsumerOptions) error {\n\t\to.AutoOffset = autoOffsetLatest\n\t\treturn nil\n\t}\n}", "func WithOffsetConsumerOption(offset int64) ConsumerOption {\n\treturn func(c *Consumer) {\n\t\tswitch offset {\n\t\tcase LastOffset:\n\t\t\tc.config.StartOffset = LastOffset\n\t\tcase FirstOffset:\n\t\t\tc.config.StartOffset = FirstOffset\n\t\tdefault:\n\t\t\tc.config.StartOffset = FirstOffset\n\t\t}\n\t}\n}", "func AutoOffsetNone() ConsumerOption {\n\treturn func(o *ConsumerOptions) error {\n\t\to.AutoOffset = autoOffsetNone\n\t\treturn nil\n\t}\n}", "func consumerTestWithCommits(t *testing.T, testname string, msgcnt int, useChannel bool, consumeFunc func(c *Consumer, mt *msgtracker, expCnt int), rebalanceCb func(c *Consumer, event Event) error) {\n\tconsumerTest(t, testname+\" auto commit\",\n\t\tmsgcnt, consumerCtrl{useChannel: useChannel, autoCommit: true}, consumeFunc, rebalanceCb)\n\n\tconsumerTest(t, testname+\" using CommitMessage() API\",\n\t\tmsgcnt, consumerCtrl{useChannel: useChannel, commitMode: ViaCommitMessageAPI}, consumeFunc, rebalanceCb)\n\n\tconsumerTest(t, testname+\" using CommitOffsets() API\",\n\t\tmsgcnt, consumerCtrl{useChannel: useChannel, commitMode: ViaCommitOffsetsAPI}, consumeFunc, rebalanceCb)\n\n\tconsumerTest(t, testname+\" using Commit() API\",\n\t\tmsgcnt, consumerCtrl{useChannel: useChannel, commitMode: ViaCommitAPI}, consumeFunc, rebalanceCb)\n\n}", "func CommitSync() OptionFunc {\n\treturn func(c *Component) error {\n\t\tif c.saramaConfig != nil && c.saramaConfig.Consumer.Offsets.AutoCommit.Enable {\n\t\t\t// redundant commits warning\n\t\t\tlog.Warn(\"consumer is set to commit offsets after processing each batch and auto-commit is enabled\")\n\t\t}\n\t\tc.commitSync = true\n\t\treturn nil\n\t}\n}", "func (c *Consumer) Commit() error {\n\tsnap := c.resetAcked()\n\tif len(snap) < 1 {\n\t\treturn nil\n\t}\n\n\tfor partitionID, offset := range snap {\n\t\t// fmt.Printf(\"$,%s,%d,%d\\n\", c.id, partitionID, offset+1)\n\t\tif err := c.zoo.Commit(c.group, c.topic, partitionID, offset+1); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}", "func (cfg *ConsensusConfiguration) Commit(t time.Time) time.Time {\n\treturn t.Add(cfg.TimeoutCommit)\n}", "func AutoAck(opt bool) Option {\n\treturn func(o *Options) {\n\t\to.AutoAck = opt\n\t}\n}", "func WithTopicConsumerOption(topic string) ConsumerOption {\n\treturn func(c *Consumer) {\n\t\tc.config.Topic = topic\n\t}\n}", "func (tc *consumer) Commit(topic string, partition int32, offset int64) error {\n\treturn nil\n}", "func WithEndpointConsumerOption(end endpoint.Endpoint) ConsumerOption {\n\treturn func(c *Consumer) { c.end = end }\n}", "func WithCloseDelay(delay time.Duration) ClientOption {\n\treturn func(cc *committeeClient) {\n\t\tcc.closeDelay = delay\n\t}\n}", "func StartWithLastReceived() ConsumerOption {\n\treturn func(o *api.ConsumerConfig) error {\n\t\tresetDeliverPolicy(o)\n\t\to.DeliverPolicy = api.DeliverLast\n\t\treturn nil\n\t}\n}", "func WithAfterFuncsConsumerOption(fns ...AfterFunc) ConsumerOption {\n\treturn func(c *Consumer) { c.afters = append(c.afters, fns...) }\n}", "func WithCheckpoint(checkpoint Checkpoint) Option {\n\treturn func(c *Consumer) error {\n\t\tc.checkpoint = checkpoint\n\t\treturn nil\n\t}\n}", "func AcknowledgeNone() ConsumerOption {\n\treturn func(o *api.ConsumerConfig) error {\n\t\to.AckPolicy = api.AckNone\n\t\treturn nil\n\t}\n}", "func WithConsumeTimeout(s string) (OptionFunc, error) {\n\ttimeout, err := time.ParseDuration(s)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn func(c *Config) error {\n\t\tc.consumeTimeout = timeout\n\t\treturn nil\n\t}, nil\n}", "func (c *Consumer) CommitOffset(msg *sarama.ConsumerMessage) {\n\tc.consumer.MarkOffset(msg, \"\")\n}", "func WithReaderConsumerOption(reader *kafgo.Reader) ConsumerOption {\n\treturn func(c *Consumer) { c.reader = reader }\n}", "func StartAtTime(t time.Time) ConsumerOption {\n\treturn func(o *api.ConsumerConfig) error {\n\t\tresetDeliverPolicy(o)\n\t\to.DeliverPolicy = api.DeliverByStartTime\n\t\tut := t.UTC()\n\t\to.OptStartTime = &ut\n\t\treturn nil\n\t}\n}", "func (s *Service) onOffsetCommit(brokerId int32, duration time.Duration) {\n\n\t// todo:\n\t// if the commit took too long, don't count it in 'commits' but add it to the histogram?\n\t// and how do we want to handle cases where we get an error??\n\t// should we have another metric that tells us about failed commits? or a label on the counter?\n\tbrokerIdStr := fmt.Sprintf(\"%v\", brokerId)\n\ts.endToEndCommitLatency.WithLabelValues(brokerIdStr).Observe(duration.Seconds())\n\n\tif duration > s.config.Consumer.CommitSla {\n\t\treturn\n\t}\n\n\ts.endToEndCommits.Inc()\n}", "func consumerTest(t *testing.T, testname string, msgcnt int, cc consumerCtrl, consumeFunc func(c *Consumer, mt *msgtracker, expCnt int), rebalanceCb func(c *Consumer, event Event) error) {\n\n\tif msgcnt == 0 {\n\t\tcreateTestMessages()\n\t\tproducerTest(t, \"Priming producer\", p0TestMsgs, producerCtrl{},\n\t\t\tfunc(p *Producer, m *Message, drChan chan Event) {\n\t\t\t\tp.ProduceChannel() <- m\n\t\t\t})\n\t\tmsgcnt = len(p0TestMsgs)\n\t}\n\n\tconf := ConfigMap{\"bootstrap.servers\": testconf.Brokers,\n\t\t\"go.events.channel.enable\": cc.useChannel,\n\t\t\"group.id\": testconf.GroupID,\n\t\t\"session.timeout.ms\": 6000,\n\t\t\"api.version.request\": \"true\",\n\t\t\"enable.auto.commit\": cc.autoCommit,\n\t\t\"debug\": \",\",\n\t\t\"auto.offset.reset\": \"earliest\"}\n\n\tconf.updateFromTestconf()\n\n\tc, err := NewConsumer(&conf)\n\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tdefer c.Close()\n\n\texpCnt := msgcnt\n\tmt := msgtrackerStart(t, expCnt)\n\n\tt.Logf(\"%s, expecting %d messages\", testname, expCnt)\n\tc.Subscribe(testconf.Topic, rebalanceCb)\n\n\tconsumeFunc(c, &mt, expCnt)\n\n\t//test commits\n\tswitch cc.commitMode {\n\tcase ViaCommitMessageAPI:\n\t\t// verify CommitMessage() API\n\t\tfor _, message := range mt.msgs {\n\t\t\t_, commitErr := c.CommitMessage(message)\n\t\t\tif commitErr != nil {\n\t\t\t\tt.Errorf(\"Cannot commit message. Error: %s\\n\", commitErr)\n\t\t\t}\n\t\t}\n\tcase ViaCommitOffsetsAPI:\n\t\t// verify CommitOffset\n\t\tpartitions := make([]TopicPartition, len(mt.msgs))\n\t\tfor index, message := range mt.msgs {\n\t\t\tpartitions[index] = message.TopicPartition\n\t\t}\n\t\t_, commitErr := c.CommitOffsets(partitions)\n\t\tif commitErr != nil {\n\t\t\tt.Errorf(\"Failed to commit using CommitOffsets. Error: %s\\n\", commitErr)\n\t\t}\n\tcase ViaCommitAPI:\n\t\t// verify Commit() API\n\t\t_, commitErr := c.Commit()\n\t\tif commitErr != nil {\n\t\t\tt.Errorf(\"Failed to commit. Error: %s\", commitErr)\n\t\t}\n\n\t}\n\n\t// Trigger RevokePartitions\n\tc.Unsubscribe()\n\n\t// Handle RevokePartitions\n\tc.Poll(500)\n\n}", "func AutoOffsetEarliest() ConsumerOption {\n\treturn func(o *ConsumerOptions) error {\n\t\to.AutoOffset = autoOffsetEarliest\n\t\treturn nil\n\t}\n}", "func (p *AutoCommitter) flusher(interval time.Duration) {\n\tticker := time.NewTicker(interval)\n\tdefer ticker.Stop()\n\n\tfor {\n\t\tselect {\n\t\tcase <-ticker.C: // Periodic flush\n\t\t\tp.Flush()\n\t\tcase <-p.flusherStopC:\n\t\t\tp.flusherStopC <- struct{}{}\n\t\t\treturn\n\t\t}\n\t}\n}", "func (operator *AccessOperator) CreateCommit(cxt context.Context, option *CreateCommitOption) (string, error) {\n\tif option == nil {\n\t\treturn \"\", fmt.Errorf(\"Lost create Commit info\")\n\t}\n\tgrpcOptions := []grpc.CallOption{\n\t\tgrpc.WaitForReady(true),\n\t}\n\t//query business and app first\n\tbusiness, app, err := getBusinessAndApp(operator, operator.Business, option.AppName)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\t//query ConfigSet information for specified ID\n\tcfgSet, err := operator.innergetConfigSet(cxt, business.Bid, app.Appid, option.ConfigSetName)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tif cfgSet == nil {\n\t\treturn \"\", fmt.Errorf(\"Found no ConfigSet info\")\n\t}\n\n\trequest := &accessserver.CreateCommitReq{\n\t\tSeq: pkgcommon.Sequence(),\n\t\tBid: business.Bid,\n\t\tAppid: app.Appid,\n\t\tCfgsetid: cfgSet.Cfgsetid,\n\t\tOp: 0,\n\t\tOperator: operator.User,\n\t\tTemplateid: \"\",\n\t\tTemplate: \"\",\n\t\tConfigs: option.Content,\n\t\tChanges: \"\",\n\t}\n\tresponse, err := operator.Client.CreateCommit(cxt, request, grpcOptions...)\n\tif err != nil {\n\t\tlogger.V(3).Infof(\"CreateCommit: post new Commit for ConfigSet [%s] failed, %s\", option.ConfigSetName, err.Error())\n\t\treturn \"\", err\n\t}\n\tif response.ErrCode != common.ErrCode_E_OK {\n\t\tlogger.V(3).Infof(\n\t\t\t\"CreateCommit: post new new Commit for ConfigSet [%s] successfully, but response failed: %s\",\n\t\t\toption.ConfigSetName, response.ErrMsg,\n\t\t)\n\t\treturn \"\", fmt.Errorf(\"%s\", response.ErrMsg)\n\t}\n\tif len(response.Commitid) == 0 {\n\t\tlogger.V(3).Infof(\"CreateConfigSet: BSCP system error, No CommitID response\")\n\t\treturn \"\", fmt.Errorf(\"Lost CommitID from configuraiotn platform\")\n\t}\n\treturn response.Commitid, nil\n}", "func (c *Cyclone) commit(msg *erebos.Transport) {\n\tmsg.Commit <- &erebos.Commit{\n\t\tTopic: msg.Topic,\n\t\tPartition: msg.Partition,\n\t\tOffset: msg.Offset,\n\t}\n}", "func (oc *OrdererConfig) Commit() {\n\toc.ordererGroup.OrdererConfig = oc\n}", "func (c *Consumer) Open() error {\n\tif c.reader == nil {\n\t\tc.reader = kafgo.NewReader(*c.config)\n\t}\n\n\tfor {\n\t\t// start a new context\n\t\tvar (\n\t\t\tctx = context.Background()\n\t\t\tmsg kafgo.Message\n\t\t\terr error\n\t\t)\n\n\t\tif c.autocommit {\n\t\t\tmsg, err = c.reader.ReadMessage(ctx)\n\t\t} else {\n\t\t\tmsg, err = c.reader.FetchMessage(ctx)\n\t\t}\n\n\t\tif err != nil {\n\t\t\tc.errFn(ctx, msg, errors.Wrap(\n\t\t\t\terr, \"read message from kafka failed\",\n\t\t\t))\n\t\t\tc.errHandler.Handle(ctx, err)\n\t\t\tcontinue\n\t\t}\n\n\t\t// before endpoint\n\t\tfor _, fn := range c.befores {\n\t\t\tctx = fn(ctx, msg)\n\t\t}\n\n\t\trq, err := c.dec(ctx, msg)\n\t\tif err != nil {\n\t\t\tc.errFn(ctx, msg, err)\n\t\t\tc.errHandler.Handle(ctx, err)\n\t\t\tcontinue\n\t\t}\n\n\t\t// execute endpoint\n\t\trs, err := c.end(ctx, rq)\n\t\tif err != nil {\n\t\t\tc.errFn(ctx, msg, err)\n\t\t\tc.errHandler.Handle(ctx, err)\n\t\t\tcontinue\n\t\t}\n\n\t\tfor _, fn := range c.afters {\n\t\t\tctx = fn(ctx, msg, rs)\n\t\t}\n\n\t\tif !c.autocommit {\n\t\t\terr = c.reader.CommitMessages(ctx, msg)\n\t\t\tif err != nil {\n\t\t\t\tc.errFn(ctx, msg, err)\n\t\t\t\tc.errHandler.Handle(ctx, err)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t}\n\t}\n}", "func WithGroupIDConsumerOption(groupID string) ConsumerOption {\n\treturn func(c *Consumer) {\n\t\tc.config.GroupID = groupID\n\t}\n}", "func WithCommitHandler(handler CommitHandlerFactory) Option {\n\treturn func(gw *Gateway) error {\n\t\tgw.options.CommitHandler = handler\n\t\treturn nil\n\t}\n}", "func WithDecoderConsumerOption(fn Decoder) ConsumerOption {\n\treturn func(c *Consumer) { c.dec = fn }\n}", "func (_Energyconsumption *EnergyconsumptionTransactor) SetConsumer(opts *bind.TransactOpts, _owner string, _deviceType string, _peakPowerPos uint32, _peakPowerNeg uint32, _latitude uint32, _longitude uint32, _voltageLevel uint32, _location string, _installDate string) (*types.Transaction, error) {\n\treturn _Energyconsumption.contract.Transact(opts, \"setConsumer\", _owner, _deviceType, _peakPowerPos, _peakPowerNeg, _latitude, _longitude, _voltageLevel, _location, _installDate)\n}", "func (cons *ConsumerObject) Init(broker string, group string) {\n\tvar err error\n\tC, err = kafka.NewConsumer(&kafka.ConfigMap{\n\t\t\"bootstrap.servers\": broker,\n\t\t\"broker.address.family\": \"v4\",\n\t\t\"group.id\": group,\n\t\t\"session.timeout.ms\": 6000,\n\t\t\"enable.auto.commit\": false,\n\t\t\"auto.offset.reset\": \"latest\"})\n\n\tif err != nil {\n\t\tfmt.Fprintf(os.Stderr, \"Failed to create consumer: %s\\n\", err)\n\t\tos.Exit(1)\n\t}\n\n\tfmt.Printf(\"Created Consumer %v\\n\", C)\n}", "func (p *AutoCommitter) Close() error {\n\tp.startedMu.Lock()\n\tdefer p.startedMu.Unlock()\n\n\t// Already stopped? Do nothing.\n\tif !p.started {\n\t\treturn nil\n\t}\n\n\t// Stop flusher (if enabled)\n\tif p.flusherStopC != nil {\n\t\tp.flusherStopC <- struct{}{}\n\t\t<-p.flusherStopC\n\t\tclose(p.flusherStopC)\n\t\tp.flusherStopC = nil\n\t}\n\n\t// Stop all workers.\n\tclose(p.docsInsert)\n\tclose(p.docsUpdate)\n\tclose(p.docsUpsert)\n\tp.workerWg.Wait()\n\n\tp.started = false\n\n\treturn nil\n}", "func (cs *ConsensusState) Commit(t time.Time) time.Time {\n\treturn t.Add(time.Duration(timeoutCommit.Load().(int32)) * time.Millisecond)\n}", "func NewConsumer(log logrus.FieldLogger, conf Config, opts ...ConfigOpt) (Consumer, error) {\n\t// See Reference at https://github.com/edenhill/librdkafka/blob/master/CONFIGURATION.md\n\tkafkaConf := conf.baseKafkaConfig()\n\t_ = kafkaConf.SetKey(\"enable.auto.offset.store\", false) // manually StoreOffset after processing a message. Otherwise races may happen.)\n\n\t// In case we try to assign an offset out of range (greater than log-end-offset), consumer will use start consuming from offset zero.\n\t_ = kafkaConf.SetKey(\"auto.offset.reset\", \"earliest\")\n\n\tconf.Consumer.Apply(kafkaConf)\n\tfor _, opt := range opts {\n\t\topt(kafkaConf)\n\t}\n\n\tif err := conf.configureAuth(kafkaConf); err != nil {\n\t\treturn nil, errors.Wrap(err, \"error configuring auth for the Kafka consumer\")\n\t}\n\n\tconsumer, err := kafkalib.NewConsumer(kafkaConf)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif conf.RequestTimeout == 0 {\n\t\tconf.RequestTimeout = DefaultTimeout\n\t}\n\n\tcc := &ConfluentConsumer{\n\t\tc: consumer,\n\t\tconf: conf,\n\t\tlog: log,\n\t}\n\n\tlogFields := logrus.Fields{\"kafka_topic\": cc.conf.Topic}\n\n\tif cc.conf.Consumer.Partition != nil || cc.conf.Consumer.PartitionKey != \"\" {\n\t\t// set the default partitioner algorithm\n\t\tif cc.conf.Consumer.PartitionerAlgorithm == \"\" {\n\t\t\tcc.conf.Consumer.PartitionerAlgorithm = PartitionerMurMur2\n\t\t}\n\t\t// Set the partition if a key is set to determine the partition\n\t\tif cc.conf.Consumer.PartitionKey != \"\" && cc.conf.Consumer.PartitionerAlgorithm != \"\" {\n\t\t\tcc.AssignPartitionByKey(cc.conf.Consumer.PartitionKey, cc.conf.Consumer.PartitionerAlgorithm)\n\t\t}\n\n\t\tlogFields[\"kafka_partition_key\"] = cc.conf.Consumer.PartitionKey\n\t\tlogFields[\"kafka_partition\"] = *cc.conf.Consumer.Partition\n\t}\n\n\tcc.setupRebalanceHandler()\n\tcc.log.WithFields(logFields).Debug(\"Subscribing to Kafka topic\")\n\tif serr := cc.c.Subscribe(cc.conf.Topic, cc.rebalanceHandler); serr != nil {\n\t\terr = errors.Wrap(serr, \"error subscribing to topic\")\n\t}\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn cc, nil\n}", "func (g *groupConsumer) commitTxn(\n\tctx context.Context,\n\tuncommitted map[string]map[int32]EpochOffset,\n\tonDone func(*kmsg.TxnOffsetCommitRequest, *kmsg.TxnOffsetCommitResponse, error),\n) {\n\tif onDone == nil { // note we must always call onDone\n\t\tonDone = func(_ *kmsg.TxnOffsetCommitRequest, _ *kmsg.TxnOffsetCommitResponse, _ error) {}\n\t}\n\tif len(uncommitted) == 0 { // only empty if called thru autocommit / default revoke\n\t\tonDone(new(kmsg.TxnOffsetCommitRequest), new(kmsg.TxnOffsetCommitResponse), nil)\n\t\treturn\n\t}\n\n\tif g.commitCancel != nil {\n\t\tg.commitCancel() // cancel any prior commit\n\t}\n\tpriorCancel := g.commitCancel\n\tpriorDone := g.commitDone\n\n\tcommitCtx, commitCancel := context.WithCancel(g.ctx) // enable ours to be canceled and waited for\n\tcommitDone := make(chan struct{})\n\n\tg.commitCancel = commitCancel\n\tg.commitDone = commitDone\n\n\t// We issue this request even if the producer ID is failed; the request\n\t// will fail if it is.\n\t//\n\t// The id must have been set at least once by this point because of\n\t// addOffsetsToTxn.\n\tid, epoch, _ := g.cl.producerID()\n\tmemberID := g.memberID\n\treq := &kmsg.TxnOffsetCommitRequest{\n\t\tTransactionalID: *g.cl.cfg.txnID,\n\t\tGroup: g.id,\n\t\tProducerID: id,\n\t\tProducerEpoch: epoch,\n\t\tGeneration: g.generation,\n\t\tMemberID: memberID,\n\t\tInstanceID: g.instanceID,\n\t}\n\n\tif ctx.Done() != nil {\n\t\tgo func() {\n\t\t\tselect {\n\t\t\tcase <-ctx.Done():\n\t\t\t\tcommitCancel()\n\t\t\tcase <-commitCtx.Done():\n\t\t\t}\n\t\t}()\n\t}\n\n\tgo func() {\n\t\tdefer close(commitDone) // allow future commits to continue when we are done\n\t\tdefer commitCancel()\n\t\tif priorDone != nil {\n\t\t\tselect {\n\t\t\tcase <-priorDone:\n\t\t\tdefault:\n\t\t\t\tg.cl.cfg.logger.Log(LogLevelDebug, \"canceling prior txn offset commit to issue another\")\n\t\t\t\tpriorCancel()\n\t\t\t\t<-priorDone // wait for any prior request to finish\n\t\t\t}\n\t\t}\n\t\tg.cl.cfg.logger.Log(LogLevelDebug, \"issuing txn offset commit\", \"uncommitted\", uncommitted)\n\n\t\tfor topic, partitions := range uncommitted {\n\t\t\treq.Topics = append(req.Topics, kmsg.TxnOffsetCommitRequestTopic{\n\t\t\t\tTopic: topic,\n\t\t\t})\n\t\t\treqTopic := &req.Topics[len(req.Topics)-1]\n\t\t\tfor partition, eo := range partitions {\n\t\t\t\treqTopic.Partitions = append(reqTopic.Partitions, kmsg.TxnOffsetCommitRequestTopicPartition{\n\t\t\t\t\tPartition: partition,\n\t\t\t\t\tOffset: eo.Offset,\n\t\t\t\t\tLeaderEpoch: eo.Epoch,\n\t\t\t\t\tMetadata: &memberID,\n\t\t\t\t})\n\t\t\t}\n\t\t}\n\n\t\tvar resp *kmsg.TxnOffsetCommitResponse\n\t\tvar err error\n\t\tif len(req.Topics) > 0 {\n\t\t\tresp, err = req.RequestWith(commitCtx, g.cl)\n\t\t}\n\t\tif err != nil {\n\t\t\tonDone(req, nil, err)\n\t\t\treturn\n\t\t}\n\t\tonDone(req, resp, nil)\n\t}()\n}", "func WithEventSettings(cfg ConsumerConfig) OptionEvent {\n\treturn func(c *Consumer) {\n\t\tif cfg.MaxInFlight > 0 {\n\t\t\tc.cfg.MaxInFlight = cfg.MaxInFlight\n\t\t}\n\t\tif cfg.MaxAttempts > 0 {\n\t\t\tc.cfg.MaxAttempts = cfg.MaxAttempts\n\t\t}\n\t\tif cfg.Timeout > 0 {\n\t\t\tc.cfg.Timeout = cfg.Timeout\n\t\t}\n\t\tif cfg.RequeueInterval > 0 {\n\t\t\tc.cfg.RequeueInterval = cfg.RequeueInterval\n\t\t}\n\t\tif cfg.NumOfConsumers > 0 {\n\t\t\tc.cfg.NumOfConsumers = cfg.NumOfConsumers\n\t\t}\n\t}\n}", "func NewConsumerConfiguration(dflt api.ConsumerConfig, opts ...ConsumerOption) (*api.ConsumerConfig, error) {\n\tcfg := dflt\n\n\tfor _, o := range opts {\n\t\terr := o(&cfg)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\tif cfg.Durable != \"\" {\n\t\tcfg.Name = cfg.Durable\n\t}\n\n\tif cfg.Name == \"\" {\n\t\tcfg.Name = generateConsName()\n\t}\n\n\treturn &cfg, nil\n}", "func (c CommitterProbe) SetMaxCommitTS(ts uint64) {\n\tc.maxCommitTS = ts\n}", "func defaultConsumerOptions() *consumerOptions {\n\treturn &consumerOptions{\n\t\tqueueDepth: 10000,\n\t\tconcurrency: 10,\n\t\tStats: &NilConsumerStatsCollector{},\n\t}\n}", "func (w *mongoAutoCommitWorker) commit() (err error) {\n\tw.docMu.Lock()\n\tdefer w.docMu.Unlock()\n\tif w.capacity() > 0 {\n\t\tif w.p.verbose {\n\t\t\tlog.Info(fmt.Sprintf(\"AutoCommitter-%s(%s)-%d commiting size: %d\", w.p.name, w.p.coll, w.i, len(w.docInsert)))\n\t\t}\n\t\tsession := w.p.client.Session.Copy()\n\t\tdefer session.Close()\n\t\tdb := w.p.client.Db\n\t\tcollName := w.p.coll\n\t\tcoll := session.DB(db).C(collName)\n\t\tbulk := coll.Bulk()\n\t\tbulk.Unordered()\n\n\t\tif len(w.docInsert) > 0 {\n\t\t\tbulk.Insert(w.docInsert...)\n\t\t}\n\n\t\tif len(w.docUpdate) > 0 {\n\t\t\tbulk.Update(w.docUpdate...)\n\t\t}\n\n\t\tif len(w.docUpsert) > 0 {\n\t\t\tbulk.Upsert(w.docUpsert...)\n\t\t}\n\n\t\tw.docInsert = []interface{}{}\n\t\tw.docUpdate = []interface{}{}\n\t\tw.docUpsert = []interface{}{}\n\n\t\tif _, err = bulk.Run(); err != nil {\n\t\t\tlog.Info(fmt.Sprintf(\"AutoCommitter-%s(%s)-%d >>ERROR<<: %v\", w.p.name, w.p.coll, w.i, err.Error()))\n\t\t}\n\t\tif w.p.verbose {\n\t\t\tlog.Info(fmt.Sprintf(\"AutoCommitter-%s(%s)-%d committed size: %d\", w.p.name, w.p.coll, w.i, len(w.docInsert)))\n\t\t}\n\t} else {\n\t\tif w.p.verbose {\n\t\t\tlog.Info(fmt.Sprintf(\"AutoCommitter-%s(%s)-%d committed nothing\", w.p.name, w.p.coll, w.i))\n\t\t}\n\t}\n\treturn\n}", "func (m *MetricsProvider) BatchAckTime(value time.Duration) {\n}", "func createConsumer(c *cli.Context) error {\n\tusername := c.String(\"username\")\n\tcustomID := c.String(\"custom_id\")\n\n\tif username == \"\" && customID == \"\" {\n\t\treturn fmt.Errorf(\"username: %s or custom id: %s invalid\", username, customID)\n\t}\n\n\tcfg := &ConsumerConfig{\n\t\tUsername: username,\n\t\tCustomID: customID,\n\t}\n\n\tctx, cannel := context.WithTimeout(context.Background(), 30*time.Second)\n\tdefer cannel()\n\n\tserverResponse, err := client.GatewayClient.Post(ctx, CONSUMER_RESOURCE_OBJECT, nil, cfg, nil)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tbody, err := ioutil.ReadAll(serverResponse.Body)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\ttools.IndentFromBody(body)\n\n\treturn nil\n}", "func TestLeaderAcknowledgeCommit(t *testing.T) {\n\ttests := []struct {\n\t\tsize int\n\t\tacceptors map[uint64]bool\n\t\twack bool\n\t}{\n\t\t{1, nil, true},\n\t\t{3, nil, false},\n\t\t{3, map[uint64]bool{2: true}, true},\n\t\t{3, map[uint64]bool{2: true, 3: true}, true},\n\t\t{5, nil, false},\n\t\t{5, map[uint64]bool{2: true}, false},\n\t\t{5, map[uint64]bool{2: true, 3: true}, true},\n\t\t{5, map[uint64]bool{2: true, 3: true, 4: true}, true},\n\t\t{5, map[uint64]bool{2: true, 3: true, 4: true, 5: true}, true},\n\t}\n\tfor i, tt := range tests {\n\t\ts := NewMemoryStorage()\n\t\tr := newTestRaft(1, idsBySize(tt.size), 10, 1, s)\n\t\tdefer closeAndFreeRaft(r)\n\t\tr.becomeCandidate()\n\t\tr.becomeLeader()\n\t\tcommitNoopEntry(r, s)\n\t\tli := r.raftLog.lastIndex()\n\t\tr.Step(pb.Message{From: 1, To: 1, Type: pb.MsgProp, Entries: []pb.Entry{{Data: []byte(\"some data\")}}})\n\n\t\tfor _, m := range r.readMessages() {\n\t\t\tif tt.acceptors[m.To] {\n\t\t\t\tr.Step(acceptAndReply(m))\n\t\t\t}\n\t\t}\n\n\t\tif g := r.raftLog.committed > li; g != tt.wack {\n\t\t\tt.Errorf(\"#%d: ack commit = %v, want %v\", i, g, tt.wack)\n\t\t}\n\t}\n}", "func ConsumerConcurrency(count int) ConsumerOptionsFn {\n\treturn func(o *Consumer) error {\n\t\tif count > 0 {\n\t\t\to.concurrency = count\n\t\t\treturn nil\n\t\t}\n\t\treturn ErrInvalidConcurrency\n\t}\n}", "func WithCommitter(committer *CommitAuthor) GitCommitOptionsFunc {\n\treturn func(c *CreateCommit) error {\n\t\tif committer == nil {\n\t\t\treturn errors.New(\"Committer is required\")\n\t\t}\n\t\tc.Committer = committer\n\t\treturn nil\n\t}\n}", "func (f *Factory) Create() (async.Consumer, error) {\n\tcc := kafka.ConsumerConfig{\n\t\tBrokers: f.brokers,\n\t\tSaramaConfig: f.cfg,\n\t\tBuffer: f.cfg.ChannelBufferSize,\n\t}\n\n\tc := &consumer{\n\t\ttopics: f.topics,\n\t\tgroup: f.group,\n\t\ttraceTag: opentracing.Tag{Key: \"group\", Value: f.group},\n\t\tconfig: cc,\n\t}\n\n\tvar err error\n\tfor _, o := range f.oo {\n\t\terr = o(&c.config)\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"could not apply OptionFunc to consumer : %w\", err)\n\t\t}\n\t}\n\n\treturn c, nil\n}", "func NewConsumer(topics []string, valueFactory ValueFactory, opts ...ConsumerOption) (*Consumer, error) {\n\tc := &Consumer{\n\t\tvalueFactory: valueFactory,\n\t\tavroAPI: avro.DefaultConfig,\n\t\tensureTopics: true,\n\t}\n\t// Loop through each option\n\tfor _, opt := range opts {\n\t\t// apply option\n\t\topt.applyC(c)\n\t}\n\n\tvar err error\n\n\t// if consumer not provided - make one\n\tif c.KafkaConsumer == nil {\n\t\t// if kafka config not provided - build default one\n\t\tif c.kafkaCfg == nil {\n\t\t\tvar envCfg struct {\n\t\t\t\tBroker string `env:\"KAFKA_BROKER\" envDefault:\"localhost:9092\"`\n\t\t\t\tCAFile string `env:\"KAFKA_CA_FILE\"`\n\t\t\t\tKeyFile string `env:\"KAFKA_KEY_FILE\"`\n\t\t\t\tCertificateFile string `env:\"KAFKA_CERTIFICATE_FILE\"`\n\t\t\t\tGroupID string `env:\"KAFKA_GROUP_ID\"`\n\t\t\t}\n\t\t\tif err := env.Parse(&envCfg); err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\n\t\t\t// default configuration\n\t\t\tc.kafkaCfg = &kafka.ConfigMap{\n\t\t\t\t\"bootstrap.servers\": envCfg.Broker,\n\t\t\t\t\"socket.keepalive.enable\": true,\n\t\t\t\t\"enable.auto.commit\": false,\n\t\t\t\t\"enable.partition.eof\": true,\n\t\t\t\t\"session.timeout.ms\": 6000,\n\t\t\t\t\"auto.offset.reset\": \"earliest\",\n\t\t\t\t\"group.id\": envCfg.GroupID,\n\t\t\t}\n\n\t\t\tif envCfg.CAFile != \"\" {\n\t\t\t\t// configure SSL\n\t\t\t\tc.kafkaCfg.SetKey(\"security.protocol\", \"ssl\")\n\t\t\t\tc.kafkaCfg.SetKey(\"ssl.ca.location\", envCfg.CAFile)\n\t\t\t\tc.kafkaCfg.SetKey(\"ssl.key.location\", envCfg.KeyFile)\n\t\t\t\tc.kafkaCfg.SetKey(\"ssl.certificate.location\", envCfg.CertificateFile)\n\t\t\t}\n\t\t}\n\n\t\tif c.KafkaConsumer, err = kafka.NewConsumer(c.kafkaCfg); err != nil {\n\t\t\treturn nil, errors.WithMessage(err, \"cannot initialize kafka consumer\")\n\t\t}\n\t}\n\n\tif c.srClient == nil {\n\t\tif c.srURL == nil {\n\t\t\tvar envCfg struct {\n\t\t\t\tSchemaRegistry *url.URL `env:\"KAFKA_SCHEMA_REGISTRY\" envDefault:\"http://localhost:8081\"`\n\t\t\t}\n\t\t\tif err := env.Parse(&envCfg); err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\tc.srURL = envCfg.SchemaRegistry\n\t\t}\n\n\t\tif c.srClient, err = NewCachedSchemaRegistryClient(c.srURL.String()); err != nil {\n\t\t\treturn nil, errors.WithMessage(err, \"cannot initialize schema registry client\")\n\t\t}\n\t}\n\n\tif c.eventHandler == nil {\n\t\tc.eventHandler = func(event kafka.Event) {\n\t\t\tlog.Println(event)\n\t\t}\n\t}\n\n\tif topics != nil {\n\t\tif err := c.KafkaConsumer.SubscribeTopics(topics, nil); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tif c.ensureTopics {\n\t\t\tif err = c.EnsureTopics(topics); err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t}\n\t}\n\n\treturn c, nil\n}", "func WithUpdatedTime(t time.Time) DocOption {\n\treturn func(opts *Doc) {\n\t\topts.Updated = &t\n\t}\n}", "func (_Energyconsumption *EnergyconsumptionTransactorSession) SetConsumer(_owner string, _deviceType string, _peakPowerPos uint32, _peakPowerNeg uint32, _latitude uint32, _longitude uint32, _voltageLevel uint32, _location string, _installDate string) (*types.Transaction, error) {\n\treturn _Energyconsumption.Contract.SetConsumer(&_Energyconsumption.TransactOpts, _owner, _deviceType, _peakPowerPos, _peakPowerNeg, _latitude, _longitude, _voltageLevel, _location, _installDate)\n}", "func ConsumerID(id string) ConsumerOption {\n\treturn func(o *ConsumerOptions) error {\n\t\tif id == \"\" {\n\t\t\treturn errors.New(\"invalid consumer id\")\n\t\t}\n\t\to.ConsumerID = id\n\t\treturn nil\n\t}\n}", "func TestNewConsumer(tb testing.TB, defaults bool, options ...Option) Consumer {\n\tc, err := NewConsumer()\n\trequire.NoError(tb, err)\n\n\tif !defaults {\n\t\tc.Inmem = inmemconfig.Consumer{Store: nil}\n\t\tc.Kafka = kafkaconfig.Consumer{}\n\t\tc.Pubsub = pubsubconfig.Consumer{}\n\t\tc.Standardstream = standardstreamconfig.Consumer{}\n\t\tc.Logger = nil\n\t\tc.HandleInterrupt = false\n\t\tc.HandleErrors = false\n\t\tc.Name = \"\"\n\t\tc.AllowEnvironmentBasedConfiguration = false\n\t}\n\n\tfor _, option := range options {\n\t\toption.apply(&c, nil)\n\t}\n\n\terr = envconfig.Process(c.Name, &c)\n\trequire.NoError(tb, err)\n\n\treturn c\n}", "func OverwriteConsumer(overwrite runtime.Consumer) Option {\n\treturn OverwriteConsumerForStatus(overwrite, ForAllStatusCodes)\n}", "func (c Configuration) consumers(db *pkg.AgentDB, genericDiffPaths *[]string) (consumers pkg.BaseConsumers) {\n\tfs := afero.NewOsFs()\n\tvar existingConsumersFiles = make(map[string]bool)\n\tlistOfRegexpsExcludes := c.compileRegex(c.Consumers.Excludes)\n\n\tif c.Consumers.Root != \"\" {\n\t\tfs = afero.NewBasePathFs(fs, c.Consumers.Root)\n\t}\n\tif c.Consumers.Access != \"\" {\n\t\tif !c.isFileToBeExcluded(c.Consumers.Access, existingConsumersFiles, listOfRegexpsExcludes) {\n\t\t\tstate := &pkg.AccessState{\n\t\t\t\tAccessListener: pkg.NewAccessListener(\n\t\t\t\t\tpkg.AccessFileOpt(fs, c.Consumers.Access, c.logger()),\n\t\t\t\t),\n\t\t\t}\n\t\t\tconsumers = append(consumers, &pkg.BaseConsumer{AgentDB: db, ParserLoader: state})\n\t\t\texistingConsumersFiles[c.Consumers.Access] = true\n\t\t}\n\t}\n\tif c.Consumers.Users.Shadow != \"\" && c.Consumers.Users.Passwd != \"\" {\n\t\tif !c.isFileToBeExcluded(c.Consumers.Users.Shadow, existingConsumersFiles, listOfRegexpsExcludes) ||\n\t\t\t!c.isFileToBeExcluded(c.Consumers.Users.Passwd, existingConsumersFiles, listOfRegexpsExcludes) {\n\t\t\tstate := &pkg.UsersState{\n\t\t\t\tUsersListener: pkg.NewUsersListener(func(l *pkg.UsersListener) {\n\t\t\t\t\tl.Passwd = c.Consumers.Users.Passwd\n\t\t\t\t\tl.Shadow = c.Consumers.Users.Shadow\n\t\t\t\t\tl.Fs, l.Logger = fs, c.logger()\n\t\t\t\t}),\n\t\t\t}\n\t\t\tconsumers = append(consumers, &pkg.BaseConsumer{AgentDB: db, ParserLoader: state})\n\t\t\texistingConsumersFiles[c.Consumers.Users.Shadow] = true\n\t\t\texistingConsumersFiles[c.Consumers.Users.Passwd] = true\n\t\t}\n\t}\n\tif len(c.Consumers.GenericDiff) > 0 {\n\t\t//get list of files to watch\n\t\tgenericDiffFiles := c.getListOfFiles(fs, c.Consumers.GenericDiff)\n\t\tfor _, genericDiffFile := range genericDiffFiles {\n\t\t\tif !c.isFileToBeExcluded(genericDiffFile.File, existingConsumersFiles, listOfRegexpsExcludes) {\n\t\t\t\tstate := &pkg.GenericDiffState{\n\t\t\t\t\tGenericDiffListener: pkg.NewGenericDiffListener(\n\t\t\t\t\t\tpkg.GenericDiffFileOpt(fs, genericDiffFile.File, c.logger()),\n\t\t\t\t\t),\n\t\t\t\t}\n\t\t\t\tconsumers = append(consumers, &pkg.BaseConsumer{AgentDB: db, ParserLoader: state})\n\t\t\t\texistingConsumersFiles[genericDiffFile.File] = true\n\t\t\t\t//this variable is used by watcher to get the complete list of paths to monitor, instead of the list from the config\n\t\t\t\t*genericDiffPaths = append(*genericDiffPaths, genericDiffFile.File)\n\t\t\t}\n\t\t}\n\t}\n\tif len(c.Consumers.Generic) > 0 {\n\t\tgenericFiles := c.getListOfFiles(fs, c.Consumers.Generic)\n\t\tfor _, genericFile := range genericFiles {\n\t\t\tif !c.isFileToBeExcluded(genericFile.File, existingConsumersFiles, listOfRegexpsExcludes) {\n\t\t\t\tgenericFile := genericFile\n\t\t\t\tstate := &pkg.GenericState{\n\t\t\t\t\tGenericListener: pkg.NewGenericListener(func(l *pkg.GenericListener) {\n\t\t\t\t\t\tl.File = genericFile.File\n\t\t\t\t\t\tl.IsDir = genericFile.IsDir\n\t\t\t\t\t\tl.Key = c.key\n\t\t\t\t\t\tl.Fs = fs\n\t\t\t\t\t\tl.Logger = c.logger()\n\t\t\t\t\t}),\n\t\t\t\t}\n\t\t\t\tconsumers = append(consumers, &pkg.BaseConsumer{AgentDB: db, ParserLoader: state})\n\t\t\t}\n\t\t}\n\t}\n\treturn consumers\n}", "func InitConsumer(broker, group string) {\n\tvar err error\n\tutils.ConsumerObject, err = kafka.NewConsumer(&kafka.ConfigMap{\n\t\t\"bootstrap.servers\": broker,\n\t\t\"group.id\": group,\n\t\t\"session.timeout.ms\": 6000,\n\t\t\"go.events.channel.enable\": true,\n\t\t\"go.application.rebalance.enable\": true,\n\t\t// Enable generation of PartitionEOF when the\n\t\t// end of a partition is reached.\n\t\t\"enable.partition.eof\": true,\n\t\t\"auto.offset.reset\": \"earliest\"})\n\n\tif err != nil {\n\t\tfmt.Fprintf(os.Stderr, \"Failed to create consumer: %s\\n\", err)\n\t\tos.Exit(1)\n\t}\n}", "func Timeout(timeout time.Duration, timeoutFunction OnTimeout) crOption {\n\treturn func(cr *ConsumerRegistration) *ConsumerRegistration {\n\t\tcr.timeout = timeout\n\t\tcr.onTimeout = timeoutFunction\n\t\treturn cr\n\t}\n}", "func WithCounter(counter Counter) Option {\n\treturn func(c *Consumer) error {\n\t\tc.counter = counter\n\t\treturn nil\n\t}\n}", "func TestConsumerOptions(tb testing.TB, options ...Option) []Option {\n\ttb.Helper()\n\n\tvar defaults []Option\n\n\tconfig := ConsumerOptions(func(c *Consumer) {\n\t\tc.Kafka = kafkaconfig.TestConsumer(tb)\n\t})\n\n\tdefaults = append(defaults, config)\n\n\treturn append(defaults, options...)\n}", "func WithActivation(activateAfter *time.Time) SecretOption {\n\treturn func(op *Options) {\n\t\top.Activates = activateAfter\n\t}\n}", "func (c *offsetCoordinator) commit(\n\ttopic string, partition int32, offset int64, metadata string) (resErr error) {\n\t// Eliminate the scenario where Kafka erroneously returns -1 as the offset\n\t// which then gets made permanent via an immediate flush.\n\t//\n\t// Technically this disallows a valid use case of rewinding a consumer\n\t// group to the beginning, but 1) this isn't possible through any API we\n\t// currently expose since you cannot have a message numbered -1 in hand;\n\t// 2) this restriction only applies to partitions with a non-expired\n\t// message at offset 0.\n\tif offset < 0 {\n\t\treturn fmt.Errorf(\"Cannot commit negative offset %d for [%s:%d].\",\n\t\t\toffset, topic, partition)\n\t}\n\n\tretry := &backoff.Backoff{Min: c.conf.RetryErrWait, Jitter: true}\n\tfor try := 0; try < c.conf.RetryErrLimit; try++ {\n\t\tif try != 0 {\n\t\t\ttime.Sleep(retry.Duration())\n\t\t}\n\n\t\t// get a copy of our connection with the lock, this might establish a new\n\t\t// connection so can take a bit\n\t\tconn, err := c.broker.coordinatorConnection(c.conf.ConsumerGroup)\n\t\tif conn == nil {\n\t\t\tresErr = err\n\t\t\tcontinue\n\t\t}\n\t\tdefer func(lconn *connection) { go c.broker.conns.Idle(lconn) }(conn)\n\n\t\tresp, err := conn.OffsetCommit(&proto.OffsetCommitReq{\n\t\t\tClientID: c.broker.conf.ClientID,\n\t\t\tConsumerGroup: c.conf.ConsumerGroup,\n\t\t\tTopics: []proto.OffsetCommitReqTopic{\n\t\t\t\t{\n\t\t\t\t\tName: topic,\n\t\t\t\t\tPartitions: []proto.OffsetCommitReqPartition{\n\t\t\t\t\t\t{ID: partition, Offset: offset, Metadata: metadata},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t})\n\t\tresErr = err\n\n\t\tif _, ok := err.(*net.OpError); ok || err == io.EOF || err == syscall.EPIPE {\n\t\t\tlog.Debugf(\"connection died while committing on %s:%d for %s: %s\",\n\t\t\t\ttopic, partition, c.conf.ConsumerGroup, err)\n\t\t\t_ = conn.Close()\n\n\t\t} else if err == nil {\n\t\t\t// Should be a single response in the payload.\n\t\t\tfor _, t := range resp.Topics {\n\t\t\t\tfor _, p := range t.Partitions {\n\t\t\t\t\tif t.Name != topic || p.ID != partition {\n\t\t\t\t\t\tlog.Warningf(\"commit response with unexpected data for %s:%d\",\n\t\t\t\t\t\t\tt.Name, p.ID)\n\t\t\t\t\t\tcontinue\n\t\t\t\t\t}\n\t\t\t\t\treturn p.Err\n\t\t\t\t}\n\t\t\t}\n\t\t\treturn errors.New(\"response does not contain commit information\")\n\t\t}\n\t}\n\treturn resErr\n}", "func InitAfterTxCommitCallback(ctx context.Context) context.Context {\n\tafterCommitFunc := []AfterCommitCallback{}\n\treturn context.WithValue(ctx, CtxKeyAfterCommitCallback, &afterCommitFunc)\n}", "func (bw *BufferedWriterMongo) WithAutoFlush(duration time.Duration) *BufferedWriterMongo {\n\tbw.flushDuration = duration\n\tif duration > 0 { // activate background auto-flush\n\t\tbw.once.Do(func() {\n\t\t\tbw.ctx, bw.cancel = context.WithCancel(context.Background())\n\t\t\tticker := time.NewTicker(duration)\n\t\t\tgo func() {\n\t\t\t\tdefer bw.cancel()\n\t\t\t\tfor {\n\t\t\t\t\tselect {\n\t\t\t\t\tcase <-ticker.C:\n\t\t\t\t\t\tvar shouldFlush bool\n\t\t\t\t\t\t_ = bw.synced(func() error {\n\t\t\t\t\t\t\tshouldFlush = time.Now().After(bw.lastWriteTime.Add(bw.flushDuration)) && len(bw.buffer) > 0\n\t\t\t\t\t\t\treturn nil\n\t\t\t\t\t\t})\n\t\t\t\t\t\tif shouldFlush {\n\t\t\t\t\t\t\tif err := bw.Flush(); err != nil {\n\t\t\t\t\t\t\t\tlog.Printf(\"[WARN] flush failed, %s\", err)\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\t\t\t\t\tcase <-bw.ctx.Done():\n\t\t\t\t\t\treturn\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}()\n\t\t})\n\t}\n\treturn bw\n}", "func ConsumerSetAsyncNum(num int) model.Option {\n\treturn model.FuncOption(func(d *model.Dispatcher) { d.ConsumerAsyncNum = num })\n}", "func IdleHeartbeat(hb time.Duration) ConsumerOption {\n\treturn func(o *api.ConsumerConfig) error {\n\t\to.Heartbeat = hb\n\t\treturn nil\n\t}\n}", "func (_Energyconsumption *EnergyconsumptionSession) SetConsumer(_owner string, _deviceType string, _peakPowerPos uint32, _peakPowerNeg uint32, _latitude uint32, _longitude uint32, _voltageLevel uint32, _location string, _installDate string) (*types.Transaction, error) {\n\treturn _Energyconsumption.Contract.SetConsumer(&_Energyconsumption.TransactOpts, _owner, _deviceType, _peakPowerPos, _peakPowerNeg, _latitude, _longitude, _voltageLevel, _location, _installDate)\n}", "func New(client *redis.Client, group, consumer string, options ...Option) *Consumer {\n\tcfg := &config{\n\t\tgroup: group,\n\t\tconsumer: consumer,\n\t}\n\tfor _, opt := range options {\n\t\topt(cfg)\n\t}\n\tlastIDs := make(map[string]string)\n\tfor _, stream := range cfg.streams {\n\t\tlastIDs[stream] = \"0-0\"\n\t}\n\n\treturn &Consumer{\n\t\tclient: client,\n\t\tcfg: cfg,\n\t\tlastIDs: lastIDs,\n\t}\n}", "func WithMaxMinByteConsumerOption(min, max int) ConsumerOption {\n\treturn func(c *Consumer) {\n\t\tc.config.MinBytes = min\n\t\tc.config.MaxBytes = max\n\t}\n}", "func NewAutoCommitTxRunMessage(query string, params map[string]interface{}, timeout time.Duration, txConfig map[string]interface{}, dbName string, mode bolt_mode.AccessMode) RunWithMetadataMessage {\n\treturn newRunMessageWithMetadata(query, params, BuildTxMetadataWithDatabase(&timeout, txConfig, dbName, mode, nil))\n}", "func OpenConsumer(ctx context.Context, driverURL string, fn Handler, maxGoroutines int) (Consumer, error) {\n\tconsumer := Consumer{driverURL: driverURL, fn: fn, maxGoroutines: maxGoroutines}\n\n\tsub, err := gocloudpubsub.OpenSubscription(ctx, driverURL)\n\tif err != nil {\n\t\treturn consumer, err\n\t}\n\n\tconsumer.sub = sub\n\treturn consumer, nil\n}", "func WithTimeNow() Option {\n\treturn func(i interface{}) error {\n\t\tif c, ok := i.(*ceClient); ok {\n\t\t\tc.eventDefaulterFns = append(c.eventDefaulterFns, DefaultTimeToNowIfNotSet)\n\t\t}\n\t\treturn nil\n\t}\n}", "func initConsumer() sarama.Consumer {\n\tsarama.Logger = log.New(os.Stdout, \"\", log.Ltime)\n\n\tconfig := sarama.NewConfig()\n\tconfig.ClientID = CLIENTID\n\tconfig.Consumer.Return.Errors = true\n\n\tbrokers := []string{BROKERS}\n\n\tmaster, err := sarama.NewConsumer(brokers, config)\n\tif err != nil {\n\t\tfmt.Println(\"error create master consumer: \")\n\t\tpanic(err)\n\t}\n\n\treturn master\n}", "func ConcurrencyOption(n int) Option {\n\tif n <= 0 {\n\t\tn = runtime.GOMAXPROCS(0)\n\t}\n\treturn func(a applier) error {\n\t\tswitch rw := a.(type) {\n\t\tcase nil:\n\t\t\ts := fmt.Sprintf(\"ConcurrencyOption(%d)\", n)\n\t\t\treturn lz4errors.Error(s)\n\t\tcase *Writer:\n\t\t\trw.num = n\n\t\t\treturn nil\n\t\tcase *Reader:\n\t\t\trw.num = n\n\t\t\treturn nil\n\t\t}\n\t\treturn lz4errors.ErrOptionNotApplicable\n\t}\n}", "func (mocb *mqttOptionalConfigurationBuilder) AutoReconnect(autoReconnect bool) *mqttOptionalConfigurationBuilder {\n\tmocb.options[pkg.AutoReconnect] = strconv.FormatBool(autoReconnect)\n\treturn mocb\n}", "func WithCreatedTime(t time.Time) DocOption {\n\treturn func(opts *Doc) {\n\t\topts.Created = &t\n\t}\n}", "func (c *ConsumerManager) AddConsumer(topic, channel string, client ConsumerClient) error {\n\n}", "func (a v3ioAppender) Commit() error { return nil }", "func NewKafkaCommitter(\n\toutputHostUUID string,\n\tcgUUID string,\n\tlogger bark.Logger,\n\tclient **sc.Consumer) *KafkaCommitter {\n\tnow := time.Now()\n\n\tmeta := KafkaOffsetMetadata{\n\t\tVersion: kafkaOffsetMetadataVersion,\n\t\tOutputHostUUID: outputHostUUID,\n\t\tCGUUID: cgUUID,\n\t\tOutputHostStartTime: outputHostStartTime.Format(time.RFC3339),\n\t\tCommitterStartTime: now.Format(time.RFC3339),\n\t}\n\n\tmetaJSON, _ := json.Marshal(meta)\n\treturn &KafkaCommitter{\n\t\tOffsetStash: sc.NewOffsetStash(),\n\t\tmetadataString: string(metaJSON),\n\t\tKafkaOffsetMetadata: meta,\n\t\tlogger: logger,\n\t\tconsumer: client,\n\t}\n}", "func (b MemberBuilder) WithAutoRenewal(t bool) MemberBuilder {\n\tb.autoRenewal = t\n\treturn b\n}", "func CreateConsumer(topic string) *kafka.Consumer {\n\t//create at least consumer\n\t//enable.auto.commit is true and sync processing and auto.commit.interval.ms = 10000 -> atleast once/atmost once\n\t//enable.auto.commit is true and async processing and auto.commit.interval.ms = 0 -> atmost once\n\t//enable.auto.commit is false -> atleast once\n\n\tconsumer, err := kafka.NewConsumer(&kafka.ConfigMap{\n\t\t\"bootstrap.servers\": \"localhost\",\n\t\t\"group.id\": \"article_scraper_processor\",\n\t\t\"auto.offset.reset\": \"earliest\",\n\t\t\"enable.auto.commit\": \"false\",\n\t})\n\n\t//raise the panic in case of error\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\tconsumer.SubscribeTopics([]string{topic}, nil)\n\n\t/*//poll to the topic and consume the message\n\tmsg, err := consumer.ReadMessage(-1)\n\t*/\n\treturn consumer\n}", "func (c CommitterProbe) SetCommitTS(ts uint64) {\n\tatomic.StoreUint64(&c.commitTS, ts)\n}", "func New(face iface.Face, cfg Config) (*Consumer, error) {\n\tsocket := face.NumaSocket()\n\trxC := (*C.TgConsumerRx)(eal.Zmalloc(\"TgConsumerRx\", C.sizeof_TgConsumerRx, socket))\n\tcfg.RxQueue.DisableCoDel = true\n\tif e := iface.PktQueueFromPtr(unsafe.Pointer(&rxC.rxQueue)).Init(cfg.RxQueue, socket); e != nil {\n\t\teal.Free(rxC)\n\t\treturn nil, nil\n\t}\n\n\ttxC := (*C.TgConsumerTx)(eal.Zmalloc(\"TgConsumerTx\", C.sizeof_TgConsumerTx, socket))\n\ttxC.face = (C.FaceID)(face.ID())\n\ttxC.interestMp = (*C.struct_rte_mempool)(ndni.InterestMempool.Get(socket).Ptr())\n\tC.pcg32_srandom_r(&txC.trafficRng, C.uint64_t(rand.Uint64()), C.uint64_t(rand.Uint64()))\n\tC.NonceGen_Init(&txC.nonceGen)\n\n\tvar consumer Consumer\n\tconsumer.rxC = rxC\n\tconsumer.txC = txC\n\tconsumer.Rx = ealthread.New(\n\t\tcptr.Func0.C(unsafe.Pointer(C.TgConsumerRx_Run), unsafe.Pointer(rxC)),\n\t\tealthread.InitStopFlag(unsafe.Pointer(&rxC.stop)),\n\t)\n\tconsumer.Tx = ealthread.New(\n\t\tcptr.Func0.C(unsafe.Pointer(C.TgConsumerTx_Run), unsafe.Pointer(txC)),\n\t\tealthread.InitStopFlag(unsafe.Pointer(&txC.stop)),\n\t)\n\n\tfor i, pattern := range cfg.Patterns {\n\t\tif _, e := consumer.addPattern(pattern); e != nil {\n\t\t\treturn nil, fmt.Errorf(\"pattern(%d): %s\", i, e)\n\t\t}\n\t}\n\tconsumer.SetInterval(cfg.Interval.Duration())\n\treturn &consumer, nil\n}", "func (p *AutoCommitter) Flush() error {\n\tif p.verbose {\n\t\tlog.Info(fmt.Sprintf(\"AutoCommitter-%s(%s) a new flush is comming\", p.name, p.coll))\n\t}\n\tfor _, w := range p.workers {\n\t\tw.flushC <- struct{}{}\n\t\t<-w.flushAckC // wait for completion\n\t}\n\tif p.verbose {\n\t\tlog.Info(fmt.Sprintf(\"AutoCommitter-%s(%s) a new flush is finished\", p.name, p.coll))\n\t}\n\treturn nil\n}", "func WithNodeSelectionPolicy(policy NodeSelectionPolicy) ClientOption {\n\treturn func(cc *committeeClient) {\n\t\tcc.nodeSelectionPolicy = policy\n\t}\n}", "func TriggerLatestOffset() *feature.Feature {\n\tf := feature.NewFeatureNamed(\"Consumer latest auto.offset.reset config\")\n\n\tbrokerName := feature.MakeRandomK8sName(\"broker\")\n\tcmName := feature.MakeRandomK8sName(\"cm-config\")\n\n\tsource1 := feature.MakeRandomK8sName(\"source1\")\n\tsource2 := feature.MakeRandomK8sName(\"source2\")\n\n\ttrigger1Name := feature.MakeRandomK8sName(\"trigger1\")\n\tsink1 := feature.MakeRandomK8sName(\"sink1\")\n\n\ttrigger2Name := feature.MakeRandomK8sName(\"trigger2\")\n\tsink2 := feature.MakeRandomK8sName(\"sink2\")\n\n\tevent1 := test.FullEvent()\n\teventID1 := uuid.New().String()\n\tevent1.SetID(eventID1)\n\n\tevent2 := test.FullEvent()\n\teventID2 := uuid.New().String()\n\tevent2.SetID(eventID2)\n\n\tf.Setup(\"install config\", configmap.Copy(types.NamespacedName{Namespace: system.Namespace(), Name: \"kafka-broker-config\"}, cmName))\n\tf.Setup(\"install broker\", broker.Install(brokerName, append(broker.WithEnvConfig(), broker.WithConfig(cmName))...))\n\tf.Setup(\"broker is ready\", broker.IsReady(brokerName))\n\n\tf.Setup(\"install sink1\", eventshub.Install(sink1, eventshub.StartReceiver))\n\tf.Setup(\"install sink2\", eventshub.Install(sink2, eventshub.StartReceiver))\n\tf.Setup(\"install trigger 1\", trigger.Install(trigger1Name, brokerName, trigger.WithSubscriber(service.AsKReference(sink1), \"\")))\n\tf.Setup(\"trigger 1 is ready\", trigger.IsReady(trigger1Name))\n\n\tf.Requirement(\"send event 1\", eventshub.Install(source1, eventshub.InputEvent(event1), eventshub.StartSenderToResource(broker.GVR(), brokerName)))\n\tf.Requirement(\"event 1 received\", assert.OnStore(sink1).MatchEvent(test.HasId(eventID1)).Exact(1))\n\n\tf.Assert(\"install trigger 2\", trigger.Install(trigger2Name, brokerName, trigger.WithSubscriber(service.AsKReference(sink2), \"\")))\n\tf.Assert(\"trigger 2 is ready\", trigger.IsReady(trigger2Name))\n\n\tf.Assert(\"send event 2\", func(ctx context.Context, t feature.T) {\n\t\ttrigger.IsReady(trigger2Name)(ctx, t) // Wait for trigger ready\n\t\teventshub.Install(source2, eventshub.InputEvent(event2), eventshub.StartSenderToResource(broker.GVR(), brokerName))(ctx, t)\n\t})\n\n\t// Both triggers receive event 1 and 2.\n\tf.Assert(\"event 2 is received by sink 1\", assert.OnStore(sink1).MatchEvent(test.HasId(eventID2)).Exact(1))\n\tf.Assert(\"event 2 is received by sink 2\", assert.OnStore(sink2).MatchEvent(test.HasId(eventID2)).Exact(1))\n\n\t// Trigger 2 doesn't receive event 1 (sent before it was ready)\n\tf.Assert(\"event 1 is not received by sink 2\", func(ctx context.Context, t feature.T) {\n\t\ttrigger.IsReady(trigger2Name)(ctx, t) // Wait for trigger ready\n\t\ttime.Sleep(20 * time.Second) // eventually\n\t\tassert.OnStore(sink2).MatchEvent(test.HasId(eventID1)).Not()(ctx, t)\n\t})\n\n\treturn f\n}", "func (m *ManagedConsumer) Ack(ctx context.Context, msg Message) error {\n\tfor {\n\t\tm.mu.RLock()\n\t\tconsumer := m.consumer\n\t\twait := m.waitc\n\t\tm.mu.RUnlock()\n\n\t\tif consumer == nil {\n\t\t\tselect {\n\t\t\tcase <-wait:\n\t\t\t\t// a new consumer was established.\n\t\t\t\t// Re-enter read-lock to obtain it.\n\t\t\t\tcontinue\n\t\t\tcase <-ctx.Done():\n\t\t\t\treturn ctx.Err()\n\t\t\t}\n\t\t}\n\n\t\treturn consumer.Ack(msg)\n\t}\n}", "func New(address string, process Process, options ...OptionEvent) *Consumer {\n\tarl := &Consumer{\n\t\tcfg: ConsumerConfig{\n\t\t\tMaxInFlight: DefaultMaxInflight,\n\t\t\tNumOfConsumers: DefaultNumOfConsumers,\n\t\t\tMaxAttempts: DefaultMaxAttempts,\n\t\t\tRequeueInterval: DefaultRequeueInterval,\n\t\t\tTimeout: DefaultMsgTimeout,\n\t\t},\n\t\taddress: address,\n\t\tprocess: process,\n\t}\n\n\tfor _, opt := range options {\n\t\topt(arl)\n\t}\n\n\treturn arl\n}", "func (c *consumer) Ack() error {\n\tif err := c.store.Ack(c.topic, c.ackOffset); err != nil {\n\t\treturn fmt.Errorf(\"acking topic %s with offset %d: %v\", c.topic, c.ackOffset, err)\n\t}\n\n\tc.outstanding = false\n\n\treturn nil\n}", "func (c *Conn) GetAutocommit() bool {\n\treturn C.sqlite3_get_autocommit(c.db) != 0\n}", "func TTL(ttl time.Duration) crOption {\n\treturn func(cr *ConsumerRegistration) *ConsumerRegistration {\n\t\tcr.ttl = ttl\n\t\treturn cr\n\t}\n}", "func BackoffPolicy(policy []time.Duration) ConsumerOption {\n\treturn func(o *api.ConsumerConfig) error {\n\t\to.BackOff = policy\n\t\treturn nil\n\t}\n}", "func NewConsumerWithHash(ctx context.Context, db gorpmapper.SqlExecutorWithTx, userID, hash string) (*sdk.AuthUserConsumer, error) {\n\treturn newConsumerWithData(ctx, db, userID, map[string]string{\n\t\t\"hash\": hash,\n\t})\n}", "func Cacher() *cobra.Command {\n\tcchr := &cobra.Command{\n\t\tUse: \"cacher\",\n\t\tShort: \"cacher keeps recent metrics from the bus in-memory\",\n\t\tRunE: func(cmd *cobra.Command, args []string) error {\n\t\t\tctx, cancel := context.WithCancel(context.Background())\n\t\t\tterm := make(chan os.Signal, 1)\n\t\t\tsignal.Notify(term, os.Interrupt, syscall.SIGTERM)\n\t\t\tgo func() {\n\t\t\t\t<-term\n\t\t\t\tlogrus.Info(\"shutting down...\")\n\t\t\t\tcancel()\n\t\t\t\t<-term\n\t\t\t\tos.Exit(1)\n\t\t\t}()\n\n\t\t\tadvertisedAddr := viper.GetString(flagAdvertise)\n\t\t\tlistenAddr := viper.GetString(flagAddress)\n\t\t\twebListenAddr := viper.GetString(flagWebListenAddress)\n\t\t\tkafkaAddrs := strings.Split(viper.GetString(flagKafkaAddrs), \",\")\n\t\t\tclientID := viper.GetString(flagKafkaClientID)\n\t\t\tgroupID := viper.GetString(flagKafkaGroupID)\n\t\t\tkafkaSessionTimeout := viper.GetDuration(flagKafkaSession)\n\t\t\tkafkaHeartbeat := viper.GetDuration(flagKafkaHeartbeat)\n\t\t\tkafkaTopic := viper.GetString(flagKafkaTopic)\n\t\t\tmaxAge := viper.GetDuration(flagCacherMaxAge)\n\t\t\tcleanup := viper.GetDuration(flagCacherCleanup)\n\n\t\t\tlogrus.WithFields(logrus.Fields{\n\t\t\t\t\"advertised_addr\": advertisedAddr,\n\t\t\t\t\"listen_addr\": listenAddr,\n\t\t\t\t\"web_listen_addr\": webListenAddr,\n\t\t\t\t\"kafka_addrs\": kafkaAddrs,\n\t\t\t\t\"kafka_client_id\": clientID,\n\t\t\t\t\"kafka_group_id\": groupID,\n\t\t\t\t\"kafka_heartbeat\": kafkaHeartbeat,\n\t\t\t\t\"kafka_session_timeout\": kafkaSessionTimeout,\n\t\t\t\t\"kafka_topic\": kafkaTopic,\n\t\t\t\t\"max_age\": maxAge,\n\t\t\t}).Info(\"starting cacher\")\n\n\t\t\tud, err := json.Marshal(model.UserData{\n\t\t\t\tAdvertisedAddr: advertisedAddr,\n\t\t\t})\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tcfg := sarama.NewConfig()\n\t\t\tcfg.Version = sarama.V0_10_0_0\n\t\t\tcfg.ClientID = clientID\n\t\t\tclient, err := sarama.NewClient(kafkaAddrs, cfg)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\tcoord := cg.NewCoordinator(&cg.CoordinatorConfig{\n\t\t\t\tClient: client,\n\t\t\t\tContext: ctx,\n\t\t\t\tGroupID: groupID,\n\t\t\t\tProtocols: []cg.ProtocolKey{\n\t\t\t\t\t{\n\t\t\t\t\t\tProtocol: &protocol.RoundRobin{\n\t\t\t\t\t\t\tMyUserData: ud,\n\t\t\t\t\t\t},\n\t\t\t\t\t\tKey: \"roundrobin\",\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tSessionTimeout: kafkaSessionTimeout,\n\t\t\t\tHeartbeat: kafkaHeartbeat,\n\t\t\t\tTopics: []string{kafkaTopic},\n\t\t\t})\n\t\t\tc, err := cacher.NewCacher(&cacher.Config{\n\t\t\t\tCleanup: cleanup,\n\t\t\t\tClient: client,\n\t\t\t\tCoordinator: coord,\n\t\t\t\tMaxAge: maxAge,\n\t\t\t\tTopic: kafkaTopic,\n\t\t\t})\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tlis, err := net.Listen(\"tcp\", listenAddr)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tprometheus.MustRegister(c)\n\t\t\ts := grpc.NewServer()\n\t\t\tcacher.RegisterCacherServer(s, c)\n\t\t\tvar outerErr error\n\t\t\t// run grpc server in goroutine and allow it to close context and record error if any.\n\t\t\tgo func() {\n\t\t\t\tdefer cancel()\n\t\t\t\terr := s.Serve(lis)\n\t\t\t\tif err != nil {\n\t\t\t\t\touterErr = err\n\t\t\t\t}\n\t\t\t}()\n\t\t\t// run http server in goroutine and allow it to close context and record error if any.\n\t\t\tgo func() {\n\t\t\t\tdefer cancel()\n\t\t\t\thttp.Handle(\"/metrics\", prometheus.Handler())\n\t\t\t\terr := http.ListenAndServe(webListenAddr, nil)\n\t\t\t\tif err != nil {\n\t\t\t\t\touterErr = err\n\t\t\t\t}\n\t\t\t}()\n\t\t\t// run cacher service until it's done (will stop when context is canceled)\n\t\t\terr = c.Run()\n\t\t\tif err != nil {\n\t\t\t\t// if cacher failed, return its error\n\t\t\t\treturn err\n\t\t\t}\n\t\t\t// otherwise, the cacher stopped because the context canceled because of grpc/http error or shutdown.\n\t\t\treturn outerErr\n\t\t},\n\t}\n\n\thostname, err := os.Hostname()\n\tif err != nil {\n\t\thostname = \"localhost\"\n\t}\n\taddr := fmt.Sprintf(\"%s:%d\", hostname, 8082)\n\tcchr.Flags().String(flagAddress, \":8082\", \"rpc address to listen on\")\n\tcchr.Flags().String(flagWebListenAddress, \":8080\", \"web address for telemetry\")\n\tcchr.Flags().String(flagAdvertise, addr, \"rpc address to advertise\")\n\tcchr.Flags().Duration(flagCacherCleanup, time.Minute*10, \"garbage collection interval for cacher\")\n\tcchr.Flags().Duration(flagCacherMaxAge, time.Hour*4, \"max age of samples to keep in-memory\")\n\tcchr.Flags().String(flagKafkaAddrs, \"\", \"one.example.com:9092,two.example.com:9092\")\n\tcchr.Flags().String(flagKafkaClientID, \"vulcan-cacher\", \"set the kafka client id\")\n\tcchr.Flags().String(flagKafkaGroupID, \"vulcan-cacher\", \"workers with the same groupID will join the same Kafka ConsumerGroup\")\n\tcchr.Flags().Duration(flagKafkaHeartbeat, time.Second*3, \"kafka consumer group heartbeat interval\")\n\tcchr.Flags().Duration(flagKafkaSession, time.Second*30, \"kafka consumer group session duration\")\n\tcchr.Flags().String(flagKafkaTopic, \"vulcan\", \"set the kafka topic to consume\")\n\n\treturn cchr\n}", "func ReplayInstantly() ConsumerOption {\n\treturn func(o *api.ConsumerConfig) error {\n\t\to.ReplayPolicy = api.ReplayInstant\n\t\treturn nil\n\t}\n}", "func Concurrency(c int) Option {\n\treturn func(o *options) {\n\t\to.concurrency = c\n\t}\n}", "func maybeCommit(c *kafka.Consumer, topicPartition kafka.TopicPartition) error {\n\t// Commit the already-stored offsets to Kafka whenever the offset is divisible\n\t// by 10, otherwise return early.\n\t// This logic is completely arbitrary. We can use any other internal or\n\t// external variables to decide when we commit the already-stored offsets.\n\tif topicPartition.Offset%10 != 0 {\n\t\treturn nil\n\t}\n\n\tcommitedOffsets, err := c.Commit()\n\n\t// ErrNoOffset occurs when there are no stored offsets to commit. This\n\t// can happen if we haven't stored anything since the last commit.\n\t// While this will never happen for this example since we call this method\n\t// per-message, and thus, always have something to commit, the error\n\t// handling is illustrative of how to handle it in cases we call Commit()\n\t// in another way, for example, every N seconds.\n\tif err != nil && err.(kafka.Error).Code() != kafka.ErrNoOffset {\n\t\treturn err\n\t}\n\n\tfmt.Printf(\"%% Commited offsets to Kafka: %v\\n\", commitedOffsets)\n\treturn nil\n}" ]
[ "0.80161095", "0.5698739", "0.56289744", "0.55272317", "0.5302148", "0.52034277", "0.5149862", "0.5130434", "0.5120156", "0.5006398", "0.4895261", "0.48788273", "0.47227955", "0.47208133", "0.47032696", "0.46719334", "0.46237248", "0.46131718", "0.45893103", "0.45277336", "0.4479588", "0.4450407", "0.44312355", "0.44140077", "0.43837455", "0.438288", "0.43663278", "0.43472007", "0.43371728", "0.43258235", "0.43087482", "0.430203", "0.4292086", "0.42876104", "0.42797416", "0.4271953", "0.4244172", "0.42277536", "0.4217967", "0.42129353", "0.42112142", "0.41925335", "0.41863817", "0.41748494", "0.4170715", "0.4148782", "0.41440973", "0.41156355", "0.41141558", "0.40922782", "0.4090273", "0.40854338", "0.4077065", "0.407384", "0.4073393", "0.40721244", "0.40664455", "0.40628037", "0.40610954", "0.4042761", "0.4039449", "0.40357855", "0.40209827", "0.4011816", "0.40113175", "0.40046424", "0.40022078", "0.40018302", "0.39962193", "0.39934787", "0.39923513", "0.39876127", "0.39875212", "0.39823067", "0.39750668", "0.3971836", "0.39690727", "0.3959724", "0.39561924", "0.39502358", "0.39437166", "0.39428705", "0.3931944", "0.39269686", "0.39201912", "0.3918036", "0.391306", "0.39127925", "0.39072895", "0.38952684", "0.38949034", "0.38893756", "0.38867274", "0.38843808", "0.38832223", "0.38807088", "0.387407", "0.38676095", "0.38585865", "0.38490057" ]
0.8712708
0
WithDecoderConsumerOption sets the decoder for the Consumer Message
func WithDecoderConsumerOption(fn Decoder) ConsumerOption { return func(c *Consumer) { c.dec = fn } }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func WithReaderConsumerOption(reader *kafgo.Reader) ConsumerOption {\n\treturn func(c *Consumer) { c.reader = reader }\n}", "func WithDecoder(key string, dec func(body io.ReadCloser) (io.ReadCloser, error)) ToServerOption {\n\treturn func(opts *toServerOptions) {\n\t\tif opts.decoders == nil {\n\t\t\topts.decoders = map[string]func(body io.ReadCloser) (io.ReadCloser, error){}\n\t\t}\n\t\topts.decoders[key] = dec\n\t}\n}", "func NewDecoder(options ...DecoderOption) Decoder {\n\td := Decoder{}\n\tfor _, option := range options {\n\t\toption.applyDecoderOption(&d)\n\t}\n\tif d.set == nil {\n\t\td.set = charset.DefaultDecoder()\n\t}\n\tif d.ext == nil {\n\t\td.ext = charset.DefaultExtDecoder()\n\t}\n\treturn d\n}", "func WithOffsetConsumerOption(offset int64) ConsumerOption {\n\treturn func(c *Consumer) {\n\t\tswitch offset {\n\t\tcase LastOffset:\n\t\t\tc.config.StartOffset = LastOffset\n\t\tcase FirstOffset:\n\t\t\tc.config.StartOffset = FirstOffset\n\t\tdefault:\n\t\t\tc.config.StartOffset = FirstOffset\n\t\t}\n\t}\n}", "func (c *CloseConsumer) Decode(bc *api.BaseCommand) error {\n\t// set fields\n\tc.CID = bc.CloseConsumer.GetConsumerId()\n\tc.RID = bc.CloseConsumer.GetRequestId()\n\n\treturn nil\n}", "func NewDecoder(opts Options) (*Decoder, error) {\n\tif opts.Reader == nil {\n\t\treturn nil, errors.New(\"Options.Reader can't be nil\")\n\t}\n\treturn &Decoder{\n\t\treader: opts.Reader,\n\t}, nil\n}", "func NewDecoder(schemaRepository schemaregistry.Repository, options ...option) Decoder {\n\treturn &implDecoder{\n\t\tschemaRepository: schemaRepository,\n\t\tavroAPI: newConfig(options...).Freeze(),\n\t}\n}", "func NewDecoder(opts DecoderOptions) (*Decoder, error) {\n\tvar d Decoder\n\tif err := opts.validate(); err != nil {\n\t\treturn nil, fmt.Errorf(\"imaging: error validating decoder options: %w\", err)\n\t}\n\tif opts.ConcurrencyLevel > 0 {\n\t\td.sem = make(chan struct{}, opts.ConcurrencyLevel)\n\t}\n\td.opts = opts\n\treturn &d, nil\n}", "func WithEndpointConsumerOption(end endpoint.Endpoint) ConsumerOption {\n\treturn func(c *Consumer) { c.end = end }\n}", "func NewDecoder(provider ConfigProvider) *Decoder {\n\td := &Decoder{\n\t\tprovider: provider,\n\t}\n\treturn d\n}", "func NewDecoder(o DecoderOptions, eh *astiencoder.EventHandler, c *astikit.Closer) (d *Decoder, err error) {\n\t// Extend node metadata\n\tcount := atomic.AddUint64(&countDecoder, uint64(1))\n\to.Node.Metadata = o.Node.Metadata.Extend(fmt.Sprintf(\"decoder_%d\", count), fmt.Sprintf(\"Decoder #%d\", count), \"Decodes\", \"decoder\")\n\n\t// Create decoder\n\td = &Decoder{\n\t\tc: astikit.NewChan(astikit.ChanOptions{\n\t\t\tAddStrategy: astikit.ChanAddStrategyBlockWhenStarted,\n\t\t\tProcessAll: true,\n\t\t}),\n\t\teh: eh,\n\t\toutputCtx: o.OutputCtx,\n\t\tstatIncomingRate: astikit.NewCounterRateStat(),\n\t\tstatWorkRatio: astikit.NewDurationPercentageStat(),\n\t}\n\td.BaseNode = astiencoder.NewBaseNode(o.Node, astiencoder.NewEventGeneratorNode(d), eh)\n\td.d = newFrameDispatcher(d, eh, c)\n\td.addStats()\n\n\t// Find decoder\n\tvar cdc *avcodec.Codec\n\tif cdc = avcodec.AvcodecFindDecoder(o.CodecParams.CodecId()); cdc == nil {\n\t\terr = fmt.Errorf(\"astilibav: no decoder found for codec id %+v\", o.CodecParams.CodecId())\n\t\treturn\n\t}\n\n\t// Alloc context\n\tif d.ctxCodec = cdc.AvcodecAllocContext3(); d.ctxCodec == nil {\n\t\terr = fmt.Errorf(\"astilibav: no context allocated for codec %+v\", cdc)\n\t\treturn\n\t}\n\n\t// Copy codec parameters\n\tif ret := avcodec.AvcodecParametersToContext(d.ctxCodec, o.CodecParams); ret < 0 {\n\t\terr = fmt.Errorf(\"astilibav: avcodec.AvcodecParametersToContext failed: %w\", NewAvError(ret))\n\t\treturn\n\t}\n\n\t// Open codec\n\tif ret := d.ctxCodec.AvcodecOpen2(cdc, nil); ret < 0 {\n\t\terr = fmt.Errorf(\"astilibav: d.ctxCodec.AvcodecOpen2 failed: %w\", NewAvError(ret))\n\t\treturn\n\t}\n\n\t// Make sure the codec is closed\n\tc.Add(func() error {\n\t\tif ret := d.ctxCodec.AvcodecClose(); ret < 0 {\n\t\t\temitAvError(nil, eh, ret, \"d.ctxCodec.AvcodecClose failed\")\n\t\t}\n\t\treturn nil\n\t})\n\treturn\n}", "func CustomCodecOption(codec Codec) Option {\n\treturn func(o *options) {\n\t\to.codec = codec\n\t}\n}", "func (cfg frozenConfig) NewDecoder(reader io.Reader) Decoder {\n dec := decoder.NewStreamDecoder(reader)\n dec.SetOptions(cfg.decoderOpts)\n return dec\n}", "func NewDecoder(f FormatType, r io.Reader) (dec Decoder) {\n\tvar d DecodeProvider = nil\n\n\tswitch f {\n\tcase TomlFormat:\n\t\td = NewTomlDecoder(r)\n\tcase YamlFormat:\n\t\td = NewYamlDecoder(r)\n\tcase JsonFormat:\n\t\td = json.NewDecoder(r)\n\tdefault:\n\t}\n\n\treturn Decoder{Provider: d}\n}", "func WithAutoCommitConsumerOption(flag bool) ConsumerOption {\n\treturn func(c *Consumer) { c.autocommit = flag }\n}", "func ConsumerReader(r StreamReader) ConsumerOptionsFn {\n\treturn func(o *Consumer) error {\n\t\to.reader = r\n\t\treturn nil\n\t}\n}", "func NewDecoder(options ...Option) Decoder {\n\td := Decoder{listSeparator: \",\"}\n\tfor _, option := range options {\n\t\toption(&d)\n\t}\n\treturn d\n}", "func ConsumerOmitOldMsg() model.Option {\n\treturn model.FuncOption(func(d *model.Dispatcher) { d.ConsumerOmitOldMsg = true })\n}", "func Validator(v StructValidator) DecodeOption {\n\treturn func(d *Decoder) error {\n\t\td.validator = v\n\t\treturn nil\n\t}\n}", "func NewDecoder(r io.Reader, format Format) Decoder {\n\tswitch format {\n\tcase FmtProtoDelim:\n\t\treturn &protoDecoder{r: r}\n\t}\n\treturn &textDecoder{r: r}\n}", "func SetDecoder(contentType ContentType, decoder decoders.Func) {\n\t_ = defaultCtrl.SetDecoder(contentType, decoder)\n}", "func Decode(src []byte, options ...DecoderOption) ([]byte, error) {\n\td := NewDecoder(options...)\n\treturn d.Decode(src)\n}", "func (v *KnativeServingConfigurator) InjectDecoder(d *admission.Decoder) error {\n\tv.decoder = d\n\treturn nil\n}", "func NewConsumer(\n\tbrokers []string,\n\tlogger log.Logger,\n\toptions ...ConsumerOption,\n) (*Consumer, error) {\n\t// default values\n\tcfg := kafgo.ReaderConfig{\n\t\tBrokers: brokers,\n\t\tGroupID: defaultConsumerGroupID,\n\t\tTopic: defaultTopic,\n\t\tLogger: kafka.LoggerFunc(logger.Debugf),\n\t}\n\n\tcs := &Consumer{\n\t\treader: nil,\n\t\tconfig: &cfg,\n\t}\n\n\tfor _, o := range options {\n\t\to(cs)\n\t}\n\n\tif cs.end == nil {\n\t\treturn nil, errors.Wrap(\n\t\t\tErrCreatingConsumer, \"missing endpoint\",\n\t\t)\n\t}\n\n\tif cs.dec == nil {\n\t\treturn nil, errors.Wrap(\n\t\t\tErrCreatingConsumer, \"missing decoder\",\n\t\t)\n\t}\n\n\tif cs.errFn == nil {\n\t\tcs.errFn = defaultErrorFunc\n\t}\n\n\tif cs.errHandler == nil {\n\t\tcs.errHandler = transport.NewLogErrorHandler(logger)\n\t}\n\treturn cs, nil\n}", "func New(opts ...Option) Decoder {\n\td := defaultDecoder\n\tfor _, o := range opts {\n\t\to(&d)\n\t}\n\treturn d\n}", "func WithGroupIDConsumerOption(groupID string) ConsumerOption {\n\treturn func(c *Consumer) {\n\t\tc.config.GroupID = groupID\n\t}\n}", "func SetDecoderFunc(u UtilDecoderFunc) {\n\tdecoderFunc = u\n}", "func WithTopicConsumerOption(topic string) ConsumerOption {\n\treturn func(c *Consumer) {\n\t\tc.config.Topic = topic\n\t}\n}", "func OverwriteConsumer(overwrite runtime.Consumer) Option {\n\treturn OverwriteConsumerForStatus(overwrite, ForAllStatusCodes)\n}", "func NewDecoder(r io.Reader) *Decoder {\n\td := new(Decoder)\n\td.src = textproto.NewReader(bufio.NewReader(r))\n\td.attrs = make(map[string]struct{}, 8)\n\td.multi = make(map[string]struct{}, 8)\n\td.finfo = make(map[string][]int, 8)\n\treturn d\n}", "func WithMaxMinByteConsumerOption(min, max int) ConsumerOption {\n\treturn func(c *Consumer) {\n\t\tc.config.MinBytes = min\n\t\tc.config.MaxBytes = max\n\t}\n}", "func NewConsumer(conn net.Conn, compression bool) (*Consumer, error) {\n\tif !compression {\n\t\tdec := gob.NewDecoder(conn)\n\t\treturn &Consumer{conn, dec, nil}, nil\n\t}\n\n\tcompR, err := zlib.NewReader(conn)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdec := gob.NewDecoder(compR)\n\treturn &Consumer{conn, dec, compR}, nil\n}", "func (msg *Message) Decode(out interface{}) error {\n\tif msg.reader == nil {\n\t\tmsg.reader = bytes.NewReader(msg.Data)\n\t}\n\tdefer msg.c.decoderState.PushReader(msg.reader)()\n\treturn pvdata.Decode(msg.c.decoderState, out)\n}", "func Decoder(d DecodeRequestFunc) func(e *Endpoint) {\n\treturn func(e *Endpoint) { e.decode = d }\n}", "func RequestDecoder(dec DecodeRequestFunc) ServerOption {\n\treturn func(o *Server) {\n\t\to.decBody = dec\n\t}\n}", "func decode(ch channel.Receiver, v interface{}) error {\n\tbits, err := ch.Recv()\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn json.Unmarshal(bits, v)\n}", "func New(client *redis.Client, group, consumer string, options ...Option) *Consumer {\n\tcfg := &config{\n\t\tgroup: group,\n\t\tconsumer: consumer,\n\t}\n\tfor _, opt := range options {\n\t\topt(cfg)\n\t}\n\tlastIDs := make(map[string]string)\n\tfor _, stream := range cfg.streams {\n\t\tlastIDs[stream] = \"0-0\"\n\t}\n\n\treturn &Consumer{\n\t\tclient: client,\n\t\tcfg: cfg,\n\t\tlastIDs: lastIDs,\n\t}\n}", "func WithDescriber(describer Describer) Option {\n\treturn func(c *Client) {\n\t\tc.describer = describer\n\t}\n}", "func WithStrictDecoding() ParserOption {\n\treturn func(p *Parser) {\n\t\tp.decodeStrict = true\n\t}\n}", "func (p *BaseChannelProposal) Decode(r io.Reader) (err error) {\n\tif p.InitBals == nil {\n\t\tp.InitBals = new(channel.Allocation)\n\t}\n\toptAppAndDataDec := channel.OptAppAndDataDec{App: &p.App, Data: &p.InitData}\n\treturn perunio.Decode(r, &p.ProposalID, &p.ChallengeDuration, &p.NonceShare,\n\t\toptAppAndDataDec, p.InitBals, &p.FundingAgreement)\n}", "func Decode(mediaType string, body io.Reader, v interface{}) error {\n\tdecodersMu.RLock()\n\td, ok := decoders[mediaType]\n\tdecodersMu.RUnlock()\n\tif ok {\n\t\treturn d(body, v)\n\t}\n\treturn errors.New(\"http client: decoder by media type '\" + mediaType + \"' not found\")\n}", "func DealDeciderOpt(dd DealDecider) RetrievalProviderOption {\n\treturn func(provider *Provider) {\n\t\tprovider.dealDecider = dd\n\t}\n}", "func NewDecoder(r Reader, uf UnmarshalFunc) *Decoder {\n\treturn &Decoder{r: r, buf: make([]byte, 4096), uf: uf}\n}", "func NewDecoder(decoder streaming.Decoder, embeddedDecoder runtime.Decoder) *Decoder {\n\treturn &Decoder{\n\t\tdecoder: decoder,\n\t\tembeddedDecoder: embeddedDecoder,\n\t}\n}", "func WithCodec(codec eh.EventCodec) Option {\n\treturn func(b *EventBus) error {\n\t\tb.codec = codec\n\n\t\treturn nil\n\t}\n}", "func NewDecoder(reader io.Reader) *Decoder {\n\treturn &Decoder{ByteReader: bufio2.NewReaderSize(reader, 2048)}\n}", "func (p *CloseProducer) Decode(bc *api.BaseCommand) error {\n\t// set fields\n\tp.PID = bc.CloseProducer.GetProducerId()\n\tp.RID = bc.CloseProducer.GetRequestId()\n\n\treturn nil\n}", "func (p *SubChannelProposalMsg) Decode(r io.Reader) error {\n\treturn perunio.Decode(r, &p.BaseChannelProposal, &p.Parent)\n}", "func WithCounter(counter Counter) Option {\n\treturn func(c *Consumer) error {\n\t\tc.counter = counter\n\t\treturn nil\n\t}\n}", "func (int96DecoderTraits) Decoder(e parquet.Encoding, descr *schema.Column, useDict bool, mem memory.Allocator) TypedDecoder {\n\tif useDict {\n\t\treturn &DictInt96Decoder{dictDecoder{decoder: newDecoderBase(format.Encoding_RLE_DICTIONARY, descr), mem: mem}}\n\t}\n\n\tswitch e {\n\tcase parquet.Encodings.Plain:\n\t\treturn &PlainInt96Decoder{decoder: newDecoderBase(format.Encoding(e), descr)}\n\tdefault:\n\t\tpanic(\"unimplemented encoding type\")\n\t}\n}", "func YAMLDecoder(codec Codec) Codec {\n\treturn &yamlCodec{codec}\n}", "func (tt *Tester) Consume(topic string, key string, msg interface{}, options ...EmitOption) {\n\ttt.waitStartup()\n\n\topts := new(emitOption)\n\topts.applyOptions(options...)\n\tvalue := reflect.ValueOf(msg)\n\tif msg == nil || (value.Kind() == reflect.Ptr && value.IsNil()) {\n\t\ttt.pushMessage(topic, key, nil, opts.headers)\n\t} else {\n\t\tdata, err := tt.codecForTopic(topic).Encode(msg)\n\t\tif err != nil {\n\t\t\tpanic(fmt.Errorf(\"Error encoding value %v: %v\", msg, err))\n\t\t}\n\t\ttt.pushMessage(topic, key, data, opts.headers)\n\t}\n\n\ttt.waitForClients()\n}", "func (fixedLenByteArrayDecoderTraits) Decoder(e parquet.Encoding, descr *schema.Column, useDict bool, mem memory.Allocator) TypedDecoder {\n\tif useDict {\n\t\treturn &DictFixedLenByteArrayDecoder{dictDecoder{decoder: newDecoderBase(format.Encoding_RLE_DICTIONARY, descr), mem: mem}}\n\t}\n\n\tswitch e {\n\tcase parquet.Encodings.Plain:\n\t\treturn &PlainFixedLenByteArrayDecoder{decoder: newDecoderBase(format.Encoding(e), descr)}\n\tdefault:\n\t\tpanic(\"unimplemented encoding type\")\n\t}\n}", "func NewDecoder(r io.Reader) *Decoder {\n\td := &Decoder{}\n\tif rr, ok := r.(*bufio.Reader); ok {\n\t\td.r = rr\n\t} else {\n\t\td.r = bufio.NewReader(r)\n\t}\n\treturn d\n}", "func WithConsumerGroupID(groupID string) ConfigOpt {\n\treturn func(c *kafkalib.ConfigMap) {\n\t\t_ = c.SetKey(\"group.id\", groupID)\n\t}\n}", "func NewDecoder(r io.Reader) *Decoder {\n\td := &Decoder{\n\t\tr: r,\n\t\tserializer: make(chan pair, 8000), // typical PrimitiveBlock contains 8k OSM entities\n\t}\n\td.SetBufferSize(initialBlobBufSize)\n\treturn d\n}", "func decodeDecoder(s *Stream, val reflect.Value) error {\n\tif val.Kind() == reflect.Ptr && val.IsNil() {\n\t\t// set the value to the pointer pointed to the 0 represented by the data type\n\t\tval.Set(reflect.New(val.Type().Elem()))\n\t}\n\t// transfer the reflect type back to Decoder interface type, and call DecodeRLP method\n\treturn val.Interface().(Decoder).DecodeRLP(s)\n}", "func JSONConsumer() Consumer {\n\treturn ConsumerFunc(func(reader io.Reader, data interface{}) error {\n\t\tdec := json.NewDecoder(reader)\n\t\treturn dec.Decode(data)\n\t})\n}", "func (t *AlterConfigsResourceResponse44) Decode(d *Decoder, version int16) error {\n\tvar err error\n\tt.ErrorCode, err = d.Int16()\n\tif err != nil {\n\t\treturn err\n\t}\n\tt.ErrorMessage, err = d.String()\n\tif err != nil {\n\t\treturn err\n\t}\n\tt.ResourceType, err = d.Int8()\n\tif err != nil {\n\t\treturn err\n\t}\n\tt.ResourceName, err = d.String()\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn err\n}", "func (o *SMSConnectorSettings) SetDecoders(v []string) {\n\to.Decoders = v\n}", "func (byteArrayDecoderTraits) Decoder(e parquet.Encoding, descr *schema.Column, useDict bool, mem memory.Allocator) TypedDecoder {\n\tif useDict {\n\t\treturn &DictByteArrayDecoder{dictDecoder{decoder: newDecoderBase(format.Encoding_RLE_DICTIONARY, descr), mem: mem}}\n\t}\n\n\tswitch e {\n\tcase parquet.Encodings.Plain:\n\t\treturn &PlainByteArrayDecoder{decoder: newDecoderBase(format.Encoding(e), descr)}\n\tcase parquet.Encodings.DeltaLengthByteArray:\n\t\tif mem == nil {\n\t\t\tmem = memory.DefaultAllocator\n\t\t}\n\t\treturn &DeltaLengthByteArrayDecoder{\n\t\t\tdecoder: newDecoderBase(format.Encoding(e), descr),\n\t\t\tmem: mem,\n\t\t}\n\tcase parquet.Encodings.DeltaByteArray:\n\t\tif mem == nil {\n\t\t\tmem = memory.DefaultAllocator\n\t\t}\n\t\treturn &DeltaByteArrayDecoder{\n\t\t\tDeltaLengthByteArrayDecoder: &DeltaLengthByteArrayDecoder{\n\t\t\t\tdecoder: newDecoderBase(format.Encoding(e), descr),\n\t\t\t\tmem: mem,\n\t\t\t}}\n\tdefault:\n\t\tpanic(\"unimplemented encoding type\")\n\t}\n}", "func NewDecoder(r io.Reader) *Decoder {\n\treturn &Decoder{\n\t\tr: bufio.NewReaderSize(r, bufferSize),\n\t\tctx: context.Background(),\n\t}\n}", "func (c *raptorCodec) NewDecoder(messageLength int) Decoder {\n\treturn newRaptorDecoder(c, messageLength)\n}", "func (d Decoder) Decode(out interface{}) (err error) {\n\treturn d.Provider.Decode(out)\n}", "func (float64DecoderTraits) Decoder(e parquet.Encoding, descr *schema.Column, useDict bool, mem memory.Allocator) TypedDecoder {\n\tif useDict {\n\t\treturn &DictFloat64Decoder{dictDecoder{decoder: newDecoderBase(format.Encoding_RLE_DICTIONARY, descr), mem: mem}}\n\t}\n\n\tswitch e {\n\tcase parquet.Encodings.Plain:\n\t\treturn &PlainFloat64Decoder{decoder: newDecoderBase(format.Encoding(e), descr)}\n\tdefault:\n\t\tpanic(\"unimplemented encoding type\")\n\t}\n}", "func Codec(c codec.Codec) Option {\n\treturn func(o *Options) {\n\t\to.Codec = c\n\t}\n}", "func NewDecoder(rd io.ReadSeeker) *Decoder {\n\n\tdec := &Decoder{\n\t\tinput: rd,\n\t\tcurrentType: typeUninited,\n\t}\n\n\treturn dec\n}", "func ConsumerSetGroupID(groupID string) model.Option {\n\treturn model.FuncOption(func(d *model.Dispatcher) { d.ConsumerGroupID = groupID })\n}", "func NewDecoder(r io.Reader) *Decoder {\n\treturn &Decoder{r, defaultDelimiter, defaultEscape, false, false}\n}", "func NewDecoder(r io.Reader) *Decoder {\n\td := new(Decoder)\n\td.Reset(r)\n\treturn d\n}", "func OverwriteConsumerForStatus(overwrite runtime.Consumer, forStatusCode int) Option {\n\treturn func(rt *runtime.ClientOperation) {\n\t\trt.Reader = &overwriteConsumerReader{\n\t\t\trequestReader: rt.Reader,\n\t\t\tconsumer: overwrite,\n\t\t\tforStatusCode: forStatusCode,\n\t\t}\n\t}\n}", "func receive(dec json.Decoder, ctrl *Control){\n\tif testing {log.Println(\"receive\")}\n\tmsg:= new(Message)\n\tfor {\n\t\tif err := dec.Decode(msg);err != nil {\n\t\t\tfmt.Println(\"Something went wrong, closing connection\")\n\t\t\tpanic(err)\n\t\t\treturn\n\t\t}\n\t\tif msg.Kind==\"PRIVATE\"{\n\t\t\tctrl.updateText(msg.Username+\" wispers: \"+msg.MSG)\n\t\t}else if msg.Kind==\"PUBLIC\"{\n\t\t\tctrl.updateText(msg.Username+\": \"+msg.MSG)\n\t\t}else if msg.Kind==\"ADD\" || msg.Kind==\"DISCONNECT\"{\n\t\t\tctrl.updateText(msg.MSG)\n\t\t\tctrl.updateList(msg.Usernames)\n\t\t}else if msg.Kind==\"SAMENAME\"{\n\t\t\tmyName=msg.Username\n\t\t\tctrl.updateText(msg.MSG)\n\t\t}\t\n\t}\n}", "func NewDecoder(r io.ReadSeeker, dam DecoderContainerResolver) (d *Decoder) {\r\n\tif dam == nil {\r\n\t\tdam = &DefaultDecoderContainerResolver\r\n\t}\r\n\td = &Decoder{r: r, dam: dam}\r\n\td.t1, d.t2, d.t4, d.t8 = d.x[:1], d.x[:2], d.x[:4], d.x[:8]\r\n\treturn\r\n}", "func NewDecoder(r io.Reader) ir.Decoder {\n\treturn &swaggerDecoder{\n\t\tr: r,\n\t}\n}", "func NewDecoder(inner *json.Decoder, transformers ...transform.Transformer) Decoder {\n\treturn &decoder{inner, transformers}\n}", "func WithEncoder(encoder func(interface{}) ([]byte, error)) EscapeFormatterOption {\n\treturn func(f *EscapeFormatter) {\n\t\tf.encoder = encoder\n\t}\n}", "func makeOptionalPtrDecoder(typ reflect.Type) (decoder, error) {\n\t// get the underlying element type and the corresponded decoder\n\tetype := typ.Elem()\n\tetypeinfo, err := cachedTypeInfo1(etype, tags{})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tdec := func(s *Stream, val reflect.Value) (err error) {\n\t\tkind, size, err := s.Kind()\n\t\t// criteria on checking if the value is empty\n\t\tif err != nil || size == 0 && kind != Byte {\n\t\t\ts.kind = -1 // rearm the kind\n\t\t\tval.Set(reflect.Zero(typ)) // set the value to be 0 with the type pointed by the pointer\n\t\t\treturn err\n\t\t}\n\t\tnewval := val\n\n\t\t// if the val pointed to nil, allocates space (allocate space in storage)\n\t\tif val.IsNil() {\n\t\t\tnewval = reflect.New(etype)\n\t\t}\n\t\t// decode data and set val\n\t\tif err = etypeinfo.decoder(s, newval.Elem()); err == nil {\n\t\t\tval.Set(newval)\n\t\t}\n\t\treturn err\n\t}\n\treturn dec, nil\n}", "func NewDecoder(cfg *Config) (*Decoder, error) {\n\tif cfg == nil {\n\t\tcfg = NewConfig()\n\t}\n\tdec := &Decoder{\n\t\tcfg: cfg,\n\t\tdec: pocketsphinx.Init(cfg.CommandLn()),\n\t}\n\tif dec.dec == nil {\n\t\tcfg.Destroy()\n\t\terr := errors.New(\"pocketsphinx.Init failed\")\n\t\treturn nil, err\n\t}\n\tdec.SetRawDataSize(0)\n\treturn dec, nil\n}", "func WithKeyRetriever(retriever DecryptionKeyRetriever) FileDecryptionOption {\n\treturn func(cfg *fileDecryptConfig) {\n\t\tif retriever != nil {\n\t\t\tcfg.retriever = retriever\n\t\t}\n\t}\n}", "func (int64DecoderTraits) Decoder(e parquet.Encoding, descr *schema.Column, useDict bool, mem memory.Allocator) TypedDecoder {\n\tif useDict {\n\t\treturn &DictInt64Decoder{dictDecoder{decoder: newDecoderBase(format.Encoding_RLE_DICTIONARY, descr), mem: mem}}\n\t}\n\n\tswitch e {\n\tcase parquet.Encodings.Plain:\n\t\treturn &PlainInt64Decoder{decoder: newDecoderBase(format.Encoding(e), descr)}\n\tcase parquet.Encodings.DeltaBinaryPacked:\n\t\tif mem == nil {\n\t\t\tmem = memory.DefaultAllocator\n\t\t}\n\t\treturn &DeltaBitPackInt64Decoder{\n\t\t\tdeltaBitPackDecoder: &deltaBitPackDecoder{\n\t\t\t\tdecoder: newDecoderBase(format.Encoding(e), descr),\n\t\t\t\tmem: mem,\n\t\t\t}}\n\tdefault:\n\t\tpanic(\"unimplemented encoding type\")\n\t}\n}", "func ConsumerSetAsyncNum(num int) model.Option {\n\treturn model.FuncOption(func(d *model.Dispatcher) { d.ConsumerAsyncNum = num })\n}", "func WithOptionalFormatter(enabled bool, formatter ByteFormatter) OutputFormatter {\n\treturn func(i io.Writer, input []byte) []byte {\n\t\tif enabled {\n\t\t\treturn formatter(input)\n\t\t}\n\t\treturn input\n\t}\n}", "func UseJSONUnmarshaler() DecodeOption {\n\treturn func(d *Decoder) error {\n\t\td.useJSONUnmarshaler = true\n\t\treturn nil\n\t}\n}", "func NewDecoder(r io.Reader, v Validator) *Decoder {\n\treturn &Decoder{reader: r, validator: v}\n}", "func NewDecoder(r io.Reader) goa.Decoder {\n\treturn codec.NewDecoder(r, &Handle)\n}", "func (t *DescribeConfigsRequest) Decode(d *Decoder, version int16) error {\n\tvar err error\n\t// Resources\n\tif n, err := d.ArrayLength(); err != nil {\n\t\treturn err\n\t} else if n >= 0 {\n\t\tt.Resources = make([]DescribeConfigsResource32, n)\n\t\tfor i := 0; i < n; i++ {\n\t\t\tvar item DescribeConfigsResource32\n\t\t\tif err := (&item).Decode(d, version); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tt.Resources[i] = item\n\t\t}\n\t}\n\tif version >= 1 {\n\t\tt.IncludeSynoyms, err = d.Bool()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn err\n}", "func NewDecoder() Decoder {\n\treturn Decoder{}\n}", "func (*CMsgClientToGCH264Unsupported) Descriptor() ([]byte, []int) {\n\treturn file_dota_gcmessages_client_proto_rawDescGZIP(), []int{160}\n}", "func NewDecoderFn(r io.Reader, fn Decode) *Decoder {\n\ts := bufio.NewScanner(r)\n\ts.Split(ScanRecord)\n\treturn &Decoder{\n\t\ts: s,\n\t\tfn: fn,\n\t}\n}", "func NewDecoder(r io.Reader) *Decoder {\n\treturn &Decoder{r, 0}\n}", "func NewDecoder(\n\tr io.Reader,\n\tminimumTelomereLength int,\n\tmaximumTelomereLength int,\n\tbufferSize int,\n) *Decoder {\n\tif minimumTelomereLength == 0 {\n\t\tpanic(\"minimum telomere length cannot be 0\")\n\t}\n\tif maximumTelomereLength == 0 {\n\t\tpanic(\"maximum telomere length cannot be 0\")\n\t}\n\tif minimumTelomereLength > maximumTelomereLength {\n\t\tpanic(\"minimum telomere length cannot be greater than the maximum\")\n\t}\n\tif minimumTelomereLength >= bufferSize {\n\t\tpanic(\"telomere length must be less than the allocated buffer size\")\n\t}\n\treturn &Decoder{\n\t\tminimum: minimumTelomereLength,\n\t\tmaximum: maximumTelomereLength,\n\t\tb: make([]byte, bufferSize),\n\t\tr: r,\n\t}\n}", "func WithDecryptor(decryptor ProcessorFunc) Option {\n\treturn func(opts *Options) {\n\t\topts.Decryptor = decryptor\n\t}\n}", "func (d *Decoder) Decode(r io.Reader) io.Reader {\n\tdec := newVideoDecryptor(r)\n\treturn &dec\n}", "func NewDecoder(r io.Reader) *Decoder {\n\treturn &Decoder{r}\n}", "func (d Decoder) WithCharset(set charset.Decoder) Decoder {\n\td.set = set\n\treturn d\n}", "func GetContentTypeDecoder(contentType interface{}, reader io.Reader) DecoderInterface {\n\tlog.Print(\"[DEBUG] GetContentTypeDecoder - contentType is \", contentType)\n\tswitch contentType {\n\tcase render.ContentJSON:\n\t\tlog.Print(\"[DEBUG] GetContentTypeDecoder - returning a json decoder\")\n\t\treturn json.NewDecoder(reader)\n\tcase render.ContentXML:\n\t\tlog.Print(\"[DEBUG] GetContentTypeDecoder - returning an xml decoder\")\n\t\treturn xml.NewDecoder(reader)\n\tdefault:\n\t\tlog.Print(\"[DEBUG] GetContentTypeDecoder - returning a json decoder\")\n\t\treturn json.NewDecoder(reader)\n\t}\n}", "func NewDecoder(r io.Reader) *Decoder {\n\treturn &Decoder{\n\t\trd: r,\n\t\tescBuf: make([]byte, 0, 512),\n\t\tsection: endSection,\n\t\tline: 1,\n\t}\n}", "func Codec(codec *encoding.Codec) Opt {\n\treturn func(c *Client) Opt {\n\t\told := c.codec\n\t\tc.codec = codec\n\t\treturn Codec(old)\n\t}\n}", "func WrapConsumerForStatusCode(wrapper WrapperConsumer, forStatusCode int) Option {\n\treturn func(rt *runtime.ClientOperation) {\n\t\trt.Reader = &wrapConsumerReader{\n\t\t\trequestReader: rt.Reader,\n\t\t\twrapper: wrapper,\n\t\t\tforStatusCode: forStatusCode,\n\t\t}\n\t}\n}", "func (mtr *Msmsintprp5Metrics) SetDecode(val metrics.Counter) error {\n\tmtr.metrics.SetCounter(val, mtr.getOffset(\"Decode\"))\n\treturn nil\n}" ]
[ "0.6346354", "0.57452494", "0.5407947", "0.53981495", "0.539218", "0.53732616", "0.53729", "0.52718526", "0.5188233", "0.51518285", "0.510901", "0.50905174", "0.49296317", "0.49056286", "0.48400813", "0.48395008", "0.48391026", "0.4833951", "0.48156092", "0.479748", "0.47844565", "0.47382453", "0.47318757", "0.47130865", "0.47009695", "0.4698769", "0.46802977", "0.46799043", "0.4663129", "0.46592152", "0.46490338", "0.46476495", "0.46217084", "0.46161368", "0.4596414", "0.45699617", "0.4560427", "0.45440298", "0.45423773", "0.45418888", "0.45349628", "0.45308083", "0.45128438", "0.45126948", "0.44979167", "0.44924194", "0.4491118", "0.44604048", "0.44561523", "0.44545048", "0.44513506", "0.44369835", "0.4433414", "0.44166166", "0.4410974", "0.4408225", "0.44018", "0.43909273", "0.43845466", "0.43777585", "0.4363638", "0.43610555", "0.4356592", "0.43546236", "0.43259355", "0.43223116", "0.43152478", "0.43149936", "0.4311869", "0.4309327", "0.42932412", "0.42912802", "0.4284355", "0.42829472", "0.42824143", "0.42759576", "0.42742553", "0.42691642", "0.42634705", "0.42630747", "0.4253786", "0.4253148", "0.42524087", "0.42498204", "0.42414626", "0.4234281", "0.42282647", "0.42237687", "0.4221224", "0.42191517", "0.42129686", "0.42031702", "0.42018303", "0.41993085", "0.41915572", "0.41897377", "0.4188963", "0.41889027", "0.41830093", "0.41816095" ]
0.83780915
0
WithBeforeFuncsConsumerOption provides a way to set BeforeFunc(s) to the consumer
func WithBeforeFuncsConsumerOption(fns ...BeforeFunc) ConsumerOption { return func(c *Consumer) { c.befores = append(c.befores, fns...) } }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func WithAfterFuncsConsumerOption(fns ...AfterFunc) ConsumerOption {\n\treturn func(c *Consumer) { c.afters = append(c.afters, fns...) }\n}", "func (response *Response) Before(fn func()) {\n\tresponse.beforeFuncs = append(response.beforeFuncs, fn)\n}", "func BeforeStart(fn func(context.Context) error) Option {\n\treturn func(o *options) {\n\t\to.beforeStart = append(o.beforeStart, fn)\n\t}\n}", "func PublisherBefore(before ...RequestFunc) PublisherOption {\n\treturn func(p *Publisher) { p.before = append(p.before, before...) }\n}", "func ProducerMsgHandlerBefore(before ...BeforeFunc) ProducerMsgOption {\n\treturn func(h *ProducerMsgHandler) { h.Before = append(h.Before, before...) }\n}", "func (s S) Before(f func()) {\n\ts(\"\", f, func(c *config) { c.before = true })\n}", "func BeforeStop(fn func(context.Context) error) Option {\n\treturn func(o *options) {\n\t\to.beforeStop = append(o.beforeStop, fn)\n\t}\n}", "func (s *Server) RegisterBeforeFunc(fn interface{}) error {\n\tif err := validCtxFunc(fn, s.ctxType); err != nil {\n\t\treturn err\n\t}\n\ts.beforeFns = append(s.beforeFns, reflect.ValueOf(fn))\n\treturn nil\n}", "func (x Go) Before(before func()) Go {\n\tx.before = before\n\treturn x\n}", "func (tc *TestCase) SetPreTestFunc(curFunc func(data interface{}, context *TestContext)) {\n\tif tc.PreTestFunc == nil {\n\t\ttc.PreTestFunc = curFunc\n\t}\n}", "func (session *Session) Before(closures func(interface{})) *Session {\n\tif closures != nil {\n\t\tsession.beforeClosures = append(session.beforeClosures, closures)\n\t}\n\treturn session\n}", "func WithBefore(f RoundTripperBeforeFunc) RoundTripperOption {\n\treturn func(cfg *roundTripperConfig) {\n\t\tcfg.before = f\n\t}\n}", "func (f *Fastglue) Before(fm ...FastMiddleware) {\n\tf.before = append(f.before, fm...)\n}", "func WithTracer(tracer trace.Tracer) OptionFunc {\n\treturn func(c *callbacks) {\n\t\tc.tracer = tracer\n\t}\n}", "func BeforeFunc(ctx *cli.Context) (err error) {\n\tData, err = helper.ReadData()\n\tGA = helper.NewGoogleAuthenticator()\n\treturn err\n}", "func (h *editorHooks) AddBeforeReadline(f func()) {\n\th.beforeReadline = append(h.beforeReadline, f)\n}", "func (s *BaseConcertoListener) EnterFuncSpec(ctx *FuncSpecContext) {}", "func (r *Router) UseBefore(fns ...Middleware) *Router {\n\tr.middlewares.Before = append(r.middlewares.Before, fns...)\n\n\treturn r\n}", "func (m *MockCallback) AddBeforeInvoke(arg0 rpc.CallbackFunc) {\n\tm.ctrl.T.Helper()\n\tm.ctrl.Call(m, \"AddBeforeInvoke\", arg0)\n}", "func WithDecoderConsumerOption(fn Decoder) ConsumerOption {\n\treturn func(c *Consumer) { c.dec = fn }\n}", "func (ed *Editor) AddBeforeReadline(f func()) {\n\ted.BeforeReadline = append(ed.BeforeReadline, f)\n}", "func (b *Bar) PrependFunc(f DecoratorFunc) *Bar {\n\tb.mtx.Lock()\n\tdefer b.mtx.Unlock()\n\tb.prependFuncs = append(b.prependFuncs, f)\n\treturn b\n}", "func (s Suite) Before(f func(*testing.T)) bool {\n\treturn s(\"\", func(t *testing.T, _ G, _ S) {\n\t\tt.Helper()\n\t\tf(t)\n\t}, func(c *config) { c.before = true })\n}", "func WithCallback(callback EnableCallback) ProviderOpt {\n\treturn func(opts *providerOpts) {\n\t\topts.callback = callback\n\t}\n}", "func PreHook(f func()) {\n\thookLock.Lock()\n\tdefer hookLock.Unlock()\n\n\tprehooks = append(prehooks, f)\n}", "func WithOffsetConsumerOption(offset int64) ConsumerOption {\n\treturn func(c *Consumer) {\n\t\tswitch offset {\n\t\tcase LastOffset:\n\t\t\tc.config.StartOffset = LastOffset\n\t\tcase FirstOffset:\n\t\t\tc.config.StartOffset = FirstOffset\n\t\tdefault:\n\t\t\tc.config.StartOffset = FirstOffset\n\t\t}\n\t}\n}", "func (o *Offer) doBeforeInsertHooks(exec boil.Executor) (err error) {\n\tfor _, hook := range offerBeforeInsertHooks {\n\t\tif err := hook(exec, o); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}", "func WithEventDefaulter(fn EventDefaulter) Option {\n\treturn func(i interface{}) error {\n\t\tif c, ok := i.(*ceClient); ok {\n\t\t\tif fn == nil {\n\t\t\t\treturn fmt.Errorf(\"client option was given an nil event defaulter\")\n\t\t\t}\n\t\t\tc.eventDefaulterFns = append(c.eventDefaulterFns, fn)\n\t\t}\n\t\treturn nil\n\t}\n}", "func WithReaderConsumerOption(reader *kafgo.Reader) ConsumerOption {\n\treturn func(c *Consumer) { c.reader = reader }\n}", "func WithPollGoroutines(pollGoroutines int) Option {\n\treturn func(i interface{}) error {\n\t\tif c, ok := i.(*ceClient); ok {\n\t\t\tc.pollGoroutines = pollGoroutines\n\t\t}\n\t\treturn nil\n\t}\n}", "func WithCounter(counter Counter) Option {\n\treturn func(c *Consumer) error {\n\t\tc.counter = counter\n\t\treturn nil\n\t}\n}", "func WithFunc(f func(r Result)) InvocationOption {\n\treturn func(op InvocationOp) InvocationOp { op.Func = f; return op }\n}", "func (o *CMFFamilyUserPoliciesTake) doBeforeInsertHooks(ctx context.Context, exec boil.ContextExecutor) (err error) {\n\tif boil.HooksAreSkipped(ctx) {\n\t\treturn nil\n\t}\n\n\tfor _, hook := range cmfFamilyUserPoliciesTakeBeforeInsertHooks {\n\t\tif err := hook(ctx, exec, o); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}", "func WithRequestEditorFn(fn RequestEditorFn) ClientOption {\n\treturn func(c *Client) error {\n\t\tc.RequestEditors = append(c.RequestEditors, fn)\n\t\treturn nil\n\t}\n}", "func WithRequestEditorFn(fn RequestEditorFn) ClientOption {\n\treturn func(c *Client) error {\n\t\tc.RequestEditors = append(c.RequestEditors, fn)\n\t\treturn nil\n\t}\n}", "func WithRequestEditorFn(fn RequestEditorFn) ClientOption {\n\treturn func(c *Client) error {\n\t\tc.RequestEditors = append(c.RequestEditors, fn)\n\t\treturn nil\n\t}\n}", "func WithRequestEditorFn(fn RequestEditorFn) ClientOption {\n\treturn func(c *Client) error {\n\t\tc.RequestEditors = append(c.RequestEditors, fn)\n\t\treturn nil\n\t}\n}", "func WithCheckpoint(checkpoint Checkpoint) Option {\n\treturn func(c *Consumer) error {\n\t\tc.checkpoint = checkpoint\n\t\treturn nil\n\t}\n}", "func WithCallerSkip(skip int) OptionFunc {\n\treturn func(opt *Options) {\n\t\topt.CallerSkip = skip\n\t}\n}", "func (s *BaseConcertoListener) EnterFuncCallSpec(ctx *FuncCallSpecContext) {}", "func WithFnAnnotator(provider FnAnnotator) Option {\n\treturn func(ctx context.Context, s *Server) error {\n\t\ts.fnAnnotator = provider\n\t\treturn nil\n\t}\n}", "func WithFuncMap(fm map[string]interface{}) Option {\n\treturn optFuncMap(fm)\n}", "func WithFuncs(funcs gotemplate.FuncMap) Opt {\n\treturn func(t *gotemplate.Template) (*gotemplate.Template, error) {\n\t\treturn t.Funcs(funcs), nil\n\t}\n}", "func (o *CMFPaidprogramComment) doBeforeInsertHooks(ctx context.Context, exec boil.ContextExecutor) (err error) {\n\tif boil.HooksAreSkipped(ctx) {\n\t\treturn nil\n\t}\n\n\tfor _, hook := range cmfPaidprogramCommentBeforeInsertHooks {\n\t\tif err := hook(ctx, exec, o); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}", "func (o *CMFAdminMenu) doBeforeInsertHooks(ctx context.Context, exec boil.ContextExecutor) (err error) {\n\tif boil.HooksAreSkipped(ctx) {\n\t\treturn nil\n\t}\n\n\tfor _, hook := range cmfAdminMenuBeforeInsertHooks {\n\t\tif err := hook(ctx, exec, o); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}", "func (o *Onpar) BeforeEach(f interface{}) {\n\tif o.current.before != nil {\n\t\tpanic(fmt.Sprintf(\"Level '%s' already has a registered BeforeEach\", o.current.name))\n\t}\n\t_, fileName, lineNumber, _ := runtime.Caller(o.callCount)\n\n\tv := reflect.ValueOf(f)\n\to.current.before = &specInfo{\n\t\tf: &v,\n\t\tft: reflect.TypeOf(f),\n\t\tfileName: fileName,\n\t\tlineNumber: lineNumber,\n\t}\n}", "func (a *Application) SetBeforeDrawFunc(handler func(screen tcell.Screen) bool) *Application {\n\ta.beforeDraw = handler\n\treturn a\n}", "func (o *Channel) doBeforeInsertHooks(ctx context.Context, exec boil.ContextExecutor) (err error) {\n\tif boil.HooksAreSkipped(ctx) {\n\t\treturn nil\n\t}\n\n\tfor _, hook := range channelBeforeInsertHooks {\n\t\tif err := hook(ctx, exec, o); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}", "func (a *ShowAction) BeforeQueryHandlerFunc(f BeforeShowQueryHandlerFunc) {\n\ta.beforeQuery = f\n}", "func (c *IRacing) BeforeRequest(f BeforeFunc) {\n\tc.BeforeFuncs = append(c.BeforeFuncs, f)\n}", "func SetArgsFunc(fn func() []string) ServiceOption {\n\tif fn == nil {\n\t\tlog.Fatal(\"ArgsFunc is nil\")\n\t}\n\treturn func(s *Service) {\n\t\ts.argsFunc = fn\n\t}\n}", "func PersistentPreRunEFn(ctx *Context) func(*cobra.Command, []string) error {\n\treturn func(cmd *cobra.Command, args []string) error {\n\t\trootViper := viper.New()\n\t\trootViper.BindPFlags(cmd.Flags())\n\t\trootViper.BindPFlags(cmd.PersistentFlags())\n\n\t\tif cmd.Name() == version.Cmd.Name() {\n\t\t\treturn nil\n\t\t}\n\n\t\tconfig, err := interceptConfigs(ctx, rootViper)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tlogger := log.NewTMLogger(log.NewSyncWriter(os.Stdout))\n\t\tlogger, err = tmflags.ParseLogLevel(config.LogLevel, logger, tmcfg.DefaultLogLevel())\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tif rootViper.GetBool(tmcli.TraceFlag) {\n\t\t\tlogger = log.NewTracingLogger(logger)\n\t\t}\n\n\t\tlogger = logger.With(\"module\", \"main\")\n\t\tctx.Config = config\n\t\tctx.Logger = logger\n\n\t\treturn nil\n\t}\n}", "func (ctx Context) WithRequestCreatorFunc(creator RequestCreatorFunc) Context {\n\treturn Context{context.WithValue(ctx, keyRequestCreator, creator)}\n}", "func (s *Consumer) BeforeRun() error {\n\treturn nil\n}", "func (ctx *Context) UseFunc(fns ...MiddlewareFunc) {\n\tif ctx.middlewares == nil {\n\t\tctx.middlewares = make([]Middleware, 0)\n\t}\n\tfor _, fn := range fns {\n\t\tctx.middlewares = append(ctx.middlewares, fn)\n\t}\n}", "func WithKeyFunc(keyFunc jwt.Keyfunc) Option {\n\treturn func(o *options) {\n\t\to.keyFunc = keyFunc\n\t}\n}", "func (o *OauthClient) doBeforeInsertHooks(exec boil.Executor) (err error) {\n\tfor _, hook := range oauthClientBeforeInsertHooks {\n\t\tif err := hook(exec, o); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}", "func (o *Segment) doBeforeInsertHooks(ctx context.Context, exec boil.ContextExecutor) (err error) {\n\tif boil.HooksAreSkipped(ctx) {\n\t\treturn nil\n\t}\n\n\tfor _, hook := range segmentBeforeInsertHooks {\n\t\tif err := hook(ctx, exec, o); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}", "func withKubeClientProvider(kcp kube.ClientProvider) option {\n\treturn func(p *kubernetesprocessor) error {\n\t\treturn p.initKubeClient(p.logger, kcp)\n\t}\n}", "func (o *CMFFamiliesPolicy) doBeforeInsertHooks(ctx context.Context, exec boil.ContextExecutor) (err error) {\n\tif boil.HooksAreSkipped(ctx) {\n\t\treturn nil\n\t}\n\n\tfor _, hook := range cmfFamiliesPolicyBeforeInsertHooks {\n\t\tif err := hook(ctx, exec, o); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}", "func (o *RunSuiteOptions) SuiteWithProviderPreSuite() error {\n\tif err := o.SuiteWithInitializedProviderPreSuite(); err != nil {\n\t\treturn err\n\t}\n\to.GinkgoRunSuiteOptions.MatchFn = o.config.MatchFn()\n\treturn nil\n}", "func WithNewFunc(newFunc func() DBRowUnmarshaler) StmtOption {\n\treturn func(qc *StmtContext) error {\n\t\tqc.newFunc = newFunc\n\t\treturn nil\n\t}\n}", "func WithRequestEditorFn(fn RequestEditorFn) ClientOption {\n\treturn func(c *Client) error {\n\t\tc.RequestEditor = fn\n\t\treturn nil\n\t}\n}", "func WithRequestEditorFn(fn RequestEditorFn) ClientOption {\n\treturn func(c *Client) error {\n\t\tc.RequestEditor = fn\n\t\treturn nil\n\t}\n}", "func WithRequestEditorFn(fn RequestEditorFn) ClientOption {\n\treturn func(c *Client) error {\n\t\tc.RequestEditor = fn\n\t\treturn nil\n\t}\n}", "func WithRequestEditorFn(fn RequestEditorFn) ClientOption {\n\treturn func(c *Client) error {\n\t\tc.RequestEditor = fn\n\t\treturn nil\n\t}\n}", "func WithRequestEditorFn(fn RequestEditorFn) ClientOption {\n\treturn func(c *Client) error {\n\t\tc.RequestEditor = fn\n\t\treturn nil\n\t}\n}", "func WithRequestEditorFn(fn RequestEditorFn) ClientOption {\n\treturn func(c *Client) error {\n\t\tc.RequestEditor = fn\n\t\treturn nil\n\t}\n}", "func (p *Parser) WithConfigFn(fns ...func(cfg *Config)) *Parser {\n\tfor _, fn := range fns {\n\t\tfn(p.cfg)\n\t}\n\treturn p\n}", "func (c *Container) ProvideFunc(funcs ...InjectFunc) {\n\tfor i := range funcs {\n\t\tifn := funcs[i]\n\t\tifn.validate()\n\n\t\tc.unnamedFunctions = append(c.unnamedFunctions, ifn)\n\t}\n}", "func (c Command) WithFunc(name string, function interface{}) *Builder {\n\tb := c.builder()\n\tb.funcs[name] = function\n\treturn b\n}", "func (_m *CognitoIdentityProviderAPI) ListUserPoolClientsPagesWithContext(_a0 context.Context, _a1 *cognitoidentityprovider.ListUserPoolClientsInput, _a2 func(*cognitoidentityprovider.ListUserPoolClientsOutput, bool) bool, _a3 ...request.Option) error {\n\t_va := make([]interface{}, len(_a3))\n\tfor _i := range _a3 {\n\t\t_va[_i] = _a3[_i]\n\t}\n\tvar _ca []interface{}\n\t_ca = append(_ca, _a0, _a1, _a2)\n\t_ca = append(_ca, _va...)\n\tret := _m.Called(_ca...)\n\n\tvar r0 error\n\tif rf, ok := ret.Get(0).(func(context.Context, *cognitoidentityprovider.ListUserPoolClientsInput, func(*cognitoidentityprovider.ListUserPoolClientsOutput, bool) bool, ...request.Option) error); ok {\n\t\tr0 = rf(_a0, _a1, _a2, _a3...)\n\t} else {\n\t\tr0 = ret.Error(0)\n\t}\n\n\treturn r0\n}", "func SetProtoFuncs(funcs template.FuncMap) {\n\tfuncs[\"request\"] = func() interface{} { return nil }\n\tfuncs[\"param\"] = func(key string) string { return \"\" }\n}", "func SetProtoFuncs(funcs template.FuncMap) {\n\tfuncs[\"request\"] = func() interface{} { return nil }\n\tfuncs[\"param\"] = func(key string) string { return \"\" }\n}", "func (o *BoardsSectionsPosition) doBeforeInsertHooks(ctx context.Context, exec boil.ContextExecutor) (err error) {\n\tif boil.HooksAreSkipped(ctx) {\n\t\treturn nil\n\t}\n\n\tfor _, hook := range boardsSectionsPositionBeforeInsertHooks {\n\t\tif err := hook(ctx, exec, o); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}", "func ContextFunc(f func() (ctx context.Context, afterCall func())) LoggerOption {\n\treturn contextFunc(f)\n}", "func NewWithFunc[V any](withFn func(ctx context.Context, useFn func(V)) error) *WithFunc[V] {\n\treturn &WithFunc[V]{\n\t\twithFn: withFn,\n\t}\n}", "func WithInboundContextDecorator(dec func(context.Context, binding.Message) context.Context) Option {\n\treturn func(i interface{}) error {\n\t\tif c, ok := i.(*ceClient); ok {\n\t\t\tc.inboundContextDecorators = append(c.inboundContextDecorators, dec)\n\t\t}\n\t\treturn nil\n\t}\n}", "func WithCtxExtractor(fn func(ctx context.Context) []interface{}) Option {\n\treturn func(opts *logOptions) {\n\t\tparent := opts.ctxExt\n\t\topts.ctxExt = func(ctx context.Context) []interface{} {\n\t\t\tif parent != nil {\n\t\t\t\treturn append(parent(ctx), fn(ctx)...)\n\t\t\t}\n\n\t\t\treturn fn(ctx)\n\t\t}\n\t}\n}", "func (_m *LambdaAPI) ListFunctionsPagesWithContext(_a0 aws.Context, _a1 *lambda.ListFunctionsInput, _a2 func(*lambda.ListFunctionsOutput, bool) bool, _a3 ...request.Option) error {\n\t_va := make([]interface{}, len(_a3))\n\tfor _i := range _a3 {\n\t\t_va[_i] = _a3[_i]\n\t}\n\tvar _ca []interface{}\n\t_ca = append(_ca, _a0, _a1, _a2)\n\t_ca = append(_ca, _va...)\n\tret := _m.Called(_ca...)\n\n\tvar r0 error\n\tif rf, ok := ret.Get(0).(func(aws.Context, *lambda.ListFunctionsInput, func(*lambda.ListFunctionsOutput, bool) bool, ...request.Option) error); ok {\n\t\tr0 = rf(_a0, _a1, _a2, _a3...)\n\t} else {\n\t\tr0 = ret.Error(0)\n\t}\n\n\treturn r0\n}", "func (o *CMFTurntable) doBeforeInsertHooks(ctx context.Context, exec boil.ContextExecutor) (err error) {\n\tif boil.HooksAreSkipped(ctx) {\n\t\treturn nil\n\t}\n\n\tfor _, hook := range cmfTurntableBeforeInsertHooks {\n\t\tif err := hook(ctx, exec, o); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}", "func WithRunFunc(fn func(queue.QueuedMessage) error) Option {\n\treturn func(w *Worker) {\n\t\tw.runFunc = fn\n\t}\n}", "func (o *Doc) doBeforeInsertHooks(ctx context.Context, exec boil.ContextExecutor) (err error) {\n\tif boil.HooksAreSkipped(ctx) {\n\t\treturn nil\n\t}\n\n\tfor _, hook := range docBeforeInsertHooks {\n\t\tif err := hook(ctx, exec, o); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}", "func WithRunFunc(run RunFunc) Option {\n\treturn func(a *App) {\n\t\ta.runFunc = run\n\t}\n}", "func (s *Selection) Before(selector string) *Selection {\n\treturn s.BeforeMatcher(compileMatcher(selector))\n}", "func (o *Offer) doBeforeDeleteHooks(exec boil.Executor) (err error) {\n\tfor _, hook := range offerBeforeDeleteHooks {\n\t\tif err := hook(exec, o); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}", "func (o *Description) doBeforeInsertHooks(ctx context.Context, exec boil.ContextExecutor) (err error) {\n\tfor _, hook := range descriptionBeforeInsertHooks {\n\t\tif err := hook(ctx, exec, o); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}", "func (o *Customer) doBeforeInsertHooks(exec boil.Executor) (err error) {\n\tfor _, hook := range customerBeforeInsertHooks {\n\t\tif err := hook(exec, o); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}", "func CBeforeSuite(body func(context.Context), timeout time.Duration) {\n\tginkgo.BeforeSuite(contextify(body, timeout), timeout.Seconds())\n}", "func (db *wrapDB) beforeCallback(scope *gorm.Scope, cn callbackName) {\n\tv, ok := scope.Get(gormCtx)\n\tif !ok {\n\t\treturn\n\t}\n\tctx := v.(context.Context)\n\n\tdriver := db.gorm.Dialect().GetName()\n\top := \"gorm:\" + driver + \":\" + strings.ToLower(cn.ToString())\n\n\tspan, newCtx := opentracing.StartSpanFromContext(ctx, op)\n\text.DBType.Set(span, \"sql\")\n\text.DBInstance.Set(span, driver)\n\text.SpanKind.Set(span, \"client/server\")\n\n\tscope.Set(gormCtx, newCtx)\n}", "func Before(ctx *cli.Context) error {\n\treturn nil\n}", "func (o *CMFUserSuper) doBeforeInsertHooks(ctx context.Context, exec boil.ContextExecutor) (err error) {\n\tif boil.HooksAreSkipped(ctx) {\n\t\treturn nil\n\t}\n\n\tfor _, hook := range cmfUserSuperBeforeInsertHooks {\n\t\tif err := hook(ctx, exec, o); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}", "func WithSpanContextFunc(f func(context.Context) trace.SpanContext) Option {\n\treturn spanContextFuncOption{SpanContextFunc: f}\n}", "func (o *Subscriber) doBeforeInsertHooks(ctx context.Context, exec boil.ContextExecutor) (err error) {\n\tfor _, hook := range subscriberBeforeInsertHooks {\n\t\tif err := hook(ctx, exec, o); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}", "func WithPreRunE(preRunE func(cmd *cobra.Command, args []string) error) RunnerOption {\n\treturn func(k *PluginRunner) {\n\t\tk.cmd.PreRunE = func(cmd *cobra.Command, args []string) error {\n\t\t\tif err := preRunE(cmd, args); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\t// Explicitly call through to the default implementation to invoke Configurable.Config\n\t\t\treturn k.preRun(cmd, args)\n\t\t}\n\t}\n}", "func clientBefore(ctx context.Context, set *optionSet, method, grpcType, remoteEndpoint string) context.Context {\n\tevent := set.eventLoggerEntry.GetEventFactory().CreateEvent(\n\t\trkquery.WithZapLogger(set.eventLoggerOverride),\n\t\trkquery.WithEncoding(set.eventLoggerEncoding),\n\t\trkquery.WithAppName(rkentry.GlobalAppCtx.GetAppInfoEntry().AppName),\n\t\trkquery.WithAppVersion(rkentry.GlobalAppCtx.GetAppInfoEntry().Version),\n\t\trkquery.WithEntryName(set.EntryName),\n\t\trkquery.WithEntryType(set.EntryType))\n\tevent.SetStartTime(time.Now())\n\n\tremoteIp, remotePort, _ := net.SplitHostPort(remoteEndpoint)\n\tgrpcService, grpcMethod := rkgrpcinter.GetGrpcInfo(method)\n\n\tevent.SetRemoteAddr(remoteIp + \":\" + remotePort)\n\tevent.SetOperation(method)\n\n\tpayloads := []zap.Field{\n\t\tzap.String(\"remoteIp\", remoteIp),\n\t\tzap.String(\"remotePort\", remotePort),\n\t\tzap.String(\"grpcService\", grpcService),\n\t\tzap.String(\"grpcMethod\", grpcMethod),\n\t\tzap.String(\"grpcType\", grpcType),\n\t}\n\n\tif d, ok := ctx.Deadline(); ok {\n\t\tpayloads = append(payloads, zap.String(\"deadline\", d.Format(time.RFC3339)))\n\t}\n\n\tevent.AddPayloads(payloads...)\n\n\t// insert logger and event\n\trkgrpcinter.AddToClientContextPayload(ctx, rkgrpcinter.RpcEventKey, event)\n\trkgrpcinter.AddToClientContextPayload(ctx, rkgrpcinter.RpcLoggerKey, set.ZapLogger)\n\n\treturn ctx\n}", "func OptShouldOpenProvider(provider ShouldOpenProvider) Option {\n\treturn func(b *Breaker) {\n\t\tb.ShouldOpenProvider = provider\n\t}\n}", "func (cred *OAuth2Credential) SetClientFunc(f ctxclient.Func) *OAuth2Credential {\n\tcred.mu.Lock()\n\tcred.Func = f\n\tcred.mu.Unlock()\n\treturn cred\n}", "func (o *CMFFamilyUserPoliciesTake) doBeforeDeleteHooks(ctx context.Context, exec boil.ContextExecutor) (err error) {\n\tif boil.HooksAreSkipped(ctx) {\n\t\treturn nil\n\t}\n\n\tfor _, hook := range cmfFamilyUserPoliciesTakeBeforeDeleteHooks {\n\t\tif err := hook(ctx, exec, o); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}", "func (s *BasePlSqlParserListener) EnterFunction_spec(ctx *Function_specContext) {}" ]
[ "0.6618595", "0.5789044", "0.57522064", "0.5682141", "0.56307137", "0.54601324", "0.5457689", "0.54529136", "0.5351939", "0.52193326", "0.5124414", "0.50656784", "0.50458676", "0.49094933", "0.48592332", "0.4859097", "0.48306805", "0.48134655", "0.4813117", "0.48100883", "0.47860903", "0.4785624", "0.47482398", "0.4746206", "0.47374424", "0.47118932", "0.46974066", "0.46942425", "0.46941364", "0.46726334", "0.4670915", "0.46675342", "0.46494216", "0.46487543", "0.46487543", "0.46487543", "0.46487543", "0.46453628", "0.46319255", "0.46170783", "0.46134412", "0.46097964", "0.46070042", "0.4575215", "0.45292303", "0.4518652", "0.45163226", "0.4511783", "0.45062432", "0.44881457", "0.448075", "0.44782543", "0.44768274", "0.4470024", "0.44645354", "0.44556653", "0.4454701", "0.44356328", "0.44335184", "0.44289887", "0.44256806", "0.44255185", "0.44154903", "0.44154903", "0.44154903", "0.44154903", "0.44154903", "0.44154903", "0.44034177", "0.44021714", "0.4393977", "0.4390073", "0.43777773", "0.43777773", "0.43757048", "0.4374376", "0.4372919", "0.43724492", "0.43701202", "0.4358849", "0.4350518", "0.43493196", "0.4342875", "0.434268", "0.43405694", "0.43342927", "0.43322316", "0.43278998", "0.4324171", "0.43192893", "0.43051863", "0.430479", "0.43003958", "0.42896664", "0.4285665", "0.42788565", "0.4276335", "0.42680678", "0.42612055", "0.426026" ]
0.87780225
0
WithAfterFuncsConsumerOption provides a way to set AfterFunc(s) to the consumer
func WithAfterFuncsConsumerOption(fns ...AfterFunc) ConsumerOption { return func(c *Consumer) { c.afters = append(c.afters, fns...) } }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func WithBeforeFuncsConsumerOption(fns ...BeforeFunc) ConsumerOption {\n\treturn func(c *Consumer) { c.befores = append(c.befores, fns...) }\n}", "func (response *Response) After(fn func()) {\n\tresponse.afterFuncs = append(response.afterFuncs, fn)\n}", "func After(routines []func(), callback func()) {\n\tvar wg sync.WaitGroup\n\twg.Add(len(routines))\n\tfor _, routine := range routines {\n\t\tgo func(f func()) {\n\t\t\tf()\n\t\t\twg.Done()\n\t\t}(routine)\n\t}\n\tgo func() {\n\t\twg.Wait()\n\t\tcallback()\n\t}()\n}", "func ProducerMsgHandlerAfter(after ...AfterFunc) ProducerMsgOption {\n\treturn func(h *ProducerMsgHandler) { h.After = append(h.After, after...) }\n}", "func (s S) After(f func()) {\n\ts(\"\", f, func(c *config) { c.after = true })\n}", "func AfterStop(fn func(context.Context) error) Option {\n\treturn func(o *options) {\n\t\to.afterStop = append(o.afterStop, fn)\n\t}\n}", "func AfterFunc(ctx *cli.Context) error {\n\treturn helper.WriteData(Data)\n}", "func PublisherAfter(after ...PublisherResponseFunc) PublisherOption {\n\treturn func(p *Publisher) { p.after = append(p.after, after...) }\n}", "func WithAfter(f RoundTripperAfterFunc) RoundTripperOption {\n\treturn func(cfg *roundTripperConfig) {\n\t\tcfg.after = f\n\t}\n}", "func (r *Router) UseAfterFunc(f func(Context) error) {\n\tr.UseAfter(HandlerFunc(f))\n}", "func (f *Fastglue) After(fm ...FastMiddleware) {\n\tf.after = append(f.after, fm...)\n}", "func AfterStart(fn func(context.Context) error) Option {\n\treturn func(o *options) {\n\t\to.afterStart = append(o.afterStart, fn)\n\t}\n}", "func RegisterAfterShutdown(f func()) Option {\n\treturn optionFunc(func(c *config) {\n\t\tc.afterShutdown = append(c.afterShutdown, f)\n\t})\n}", "func (x Go) After(after func(), deferred ...bool) Go {\n\tx.after = after\n\tif len(deferred) > 0 {\n\t\tx.deferAfter = deferred[0]\n\t}\n\treturn x\n}", "func (session *Session) After(closures func(interface{})) *Session {\n\tif closures != nil {\n\t\tsession.afterClosures = append(session.afterClosures, closures)\n\t}\n\treturn session\n}", "func (r *Router) UseAfter(fns ...Middleware) *Router {\n\tr.middlewares.After = append(r.middlewares.After, fns...)\n\n\treturn r\n}", "func WithDecoderConsumerOption(fn Decoder) ConsumerOption {\n\treturn func(c *Consumer) { c.dec = fn }\n}", "func (s *Server) RegisterAfterFunc(fn interface{}) error {\n\tif err := validCtxFunc(fn, s.ctxType); err != nil {\n\t\treturn err\n\t}\n\ts.afterFns = append(s.beforeFns, reflect.ValueOf(fn))\n\treturn nil\n}", "func (t *Time) AfterFunc(distance Distance, callback func()) Watcher {\n\treturn NewTimeWatcher(time.AfterFunc(time.Duration(distance), callback))\n}", "func (cs *Callbacks) AddAfterServedCallBack(f func(context *Context)) {\n\tcs.Lock()\n\tcs.afterServed = append(cs.afterServed, f)\n\tcs.Unlock()\n}", "func (e *EventRoll) After(f ...Callabut) {\n\tif e.fired {\n\t\tfor _, v := range f {\n\t\t\te.Handlers.Add(v, nil)\n\t\t\tv(e.cache)\n\t\t}\n\t\treturn\n\t}\n\n\tfor _, v := range f {\n\t\te.Handlers.Add(v, nil)\n\t}\n}", "func afterFunc(c clock.Clock, d time.Duration, f func()) (cancel func()) {\n\tt := c.NewTimer(d)\n\tcancelCh := make(chan struct{})\n\tcancelOnce := sync.Once{}\n\tcancel = func() {\n\t\tt.Stop()\n\t\tcancelOnce.Do(func() {\n\t\t\tclose(cancelCh)\n\t\t})\n\t}\n\n\tgo func() {\n\t\tdefer cancel()\n\n\t\tselect {\n\t\tcase <-t.C():\n\t\t\t// We don't need to check whether the channel has returned a zero\n\t\t\t// value since t.C is never closed as per the timer.Stop\n\t\t\t// documentation.\n\t\t\tf()\n\t\tcase <-cancelCh:\n\t\t\treturn\n\t\t}\n\t}()\n\n\treturn cancel\n}", "func (rows *Rows) AfterClose(f func(*Rows)) {\n\tif rows.afterClose == nil {\n\t\trows.afterClose = f\n\t} else {\n\t\tprevFn := rows.afterClose\n\t\trows.afterClose = func(rows *Rows) {\n\t\t\tf(rows)\n\t\t\tprevFn(rows)\n\t\t}\n\t}\n}", "func (s Suite) After(f func(*testing.T)) bool {\n\treturn s(\"\", func(t *testing.T, _ G, _ S) {\n\t\tt.Helper()\n\t\tf(t)\n\t}, func(c *config) { c.after = true })\n}", "func TestAfterFunc(t *testing.T) {\n\ti := 10\n\tc := make(chan bool)\n\tvar f func()\n\tf = func() {\n\t\ti--\n\t\tif i >= 0 {\n\t\t\tAfterFunc(0, f)\n\t\t\tSleep(1e9)\n\t\t} else {\n\t\t\tc <- true\n\t\t}\n\t}\n\n\tAfterFunc(0, f)\n\t<-c\n}", "func (s *timeScheduler) After(delay time.Duration, f func()) {\n\ts.mu.Lock()\n\tdefer s.mu.Unlock()\n\ts.stop()\n\ts.timer = time.AfterFunc(delay, f)\n}", "func (o *Description) doAfterSelectHooks(ctx context.Context, exec boil.ContextExecutor) (err error) {\n\tfor _, hook := range descriptionAfterSelectHooks {\n\t\tif err := hook(ctx, exec, o); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}", "func (o *Offer) doAfterSelectHooks(exec boil.Executor) (err error) {\n\tfor _, hook := range offerAfterSelectHooks {\n\t\tif err := hook(exec, o); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}", "func After(lib, migration string) MigrationOption {\n\treturn func(m Migration) {\n\t\tbase := m.Base()\n\t\trawAfter := make([]MigrationName, len(base.rawAfter)+1)\n\t\tcopy(rawAfter, base.rawAfter) // copy in case there is another reference\n\t\trawAfter[len(base.rawAfter)] = MigrationName{\n\t\t\tLibrary: lib,\n\t\t\tName: migration,\n\t\t}\n\t\tbase.rawAfter = rawAfter\n\t}\n}", "func (s *Consumer) AfterRun() error {\n\treturn nil\n}", "func OnBlockDoneOption(handler func(size int)) Option {\n\tif handler == nil {\n\t\thandler = onBlockDone\n\t}\n\treturn func(a applier) error {\n\t\tswitch rw := a.(type) {\n\t\tcase nil:\n\t\t\ts := fmt.Sprintf(\"OnBlockDoneOption(%s)\", reflect.TypeOf(handler).String())\n\t\t\treturn lz4errors.Error(s)\n\t\tcase *Writer:\n\t\t\trw.handler = handler\n\t\t\treturn nil\n\t\tcase *Reader:\n\t\t\trw.handler = handler\n\t\t\treturn nil\n\t\t}\n\t\treturn lz4errors.ErrOptionNotApplicable\n\t}\n}", "func (o *Channel) doAfterSelectHooks(ctx context.Context, exec boil.ContextExecutor) (err error) {\n\tif boil.HooksAreSkipped(ctx) {\n\t\treturn nil\n\t}\n\n\tfor _, hook := range channelAfterSelectHooks {\n\t\tif err := hook(ctx, exec, o); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}", "func CAfterEach(body func(context.Context), timeout time.Duration) {\n\tginkgo.AfterEach(contextify(body, timeout), timeout.Seconds())\n}", "func (o *CMFTurntable) doAfterSelectHooks(ctx context.Context, exec boil.ContextExecutor) (err error) {\n\tif boil.HooksAreSkipped(ctx) {\n\t\treturn nil\n\t}\n\n\tfor _, hook := range cmfTurntableAfterSelectHooks {\n\t\tif err := hook(ctx, exec, o); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}", "func (o *CMFPaidprogramComment) doAfterSelectHooks(ctx context.Context, exec boil.ContextExecutor) (err error) {\n\tif boil.HooksAreSkipped(ctx) {\n\t\treturn nil\n\t}\n\n\tfor _, hook := range cmfPaidprogramCommentAfterSelectHooks {\n\t\tif err := hook(ctx, exec, o); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}", "func (m *Mock) AfterFunc(d time.Duration, f func()) *Timer {\n\tt := m.Timer(d)\n\tt.C = nil\n\tt.fn = f\n\treturn t\n}", "func (o *Customer) doAfterSelectHooks(exec boil.Executor) (err error) {\n\tfor _, hook := range customerAfterSelectHooks {\n\t\tif err := hook(exec, o); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}", "func PostFlushFn(postFlushFn func()) Option {\n\treturn func(lc cacheWithOpts) error {\n\t\treturn lc.setPostFlushFn(postFlushFn)\n\t}\n}", "func RegisterAfterCallback(cb func() error) {\n\tafterCallbacks = append(afterCallbacks, cb)\n}", "func withDone(f cmdFunc) cmdFunc {\n\treturn func(args []string) error {\n\t\tif err := f(args); err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn ErrDone\n\t}\n}", "func (o *Segment) doAfterSelectHooks(ctx context.Context, exec boil.ContextExecutor) (err error) {\n\tif boil.HooksAreSkipped(ctx) {\n\t\treturn nil\n\t}\n\n\tfor _, hook := range segmentAfterSelectHooks {\n\t\tif err := hook(ctx, exec, o); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}", "func (spec *Spec) After(afterBlock testCaseBlock) {\n\tspec.testingTB.Helper()\n\tspec.Around(func(t *T) func() {\n\t\treturn func() { afterBlock(t) }\n\t})\n}", "func (o *Offer) doAfterInsertHooks(exec boil.Executor) (err error) {\n\tfor _, hook := range offerAfterInsertHooks {\n\t\tif err := hook(exec, o); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}", "func PostFlushFn(postFlushFn func()) Option {\n\treturn func(lc *memoryCache) error {\n\t\tlc.postFlushFn = postFlushFn\n\t\treturn nil\n\t}\n}", "func (o *CMFBalanceChargeAdmin) doAfterSelectHooks(ctx context.Context, exec boil.ContextExecutor) (err error) {\n\tif boil.HooksAreSkipped(ctx) {\n\t\treturn nil\n\t}\n\n\tfor _, hook := range cmfBalanceChargeAdminAfterSelectHooks {\n\t\tif err := hook(ctx, exec, o); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}", "func (o *Doc) doAfterSelectHooks(ctx context.Context, exec boil.ContextExecutor) (err error) {\n\tif boil.HooksAreSkipped(ctx) {\n\t\treturn nil\n\t}\n\n\tfor _, hook := range docAfterSelectHooks {\n\t\tif err := hook(ctx, exec, o); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}", "func (o *BookCategory) doAfterSelectHooks(ctx context.Context, exec boil.ContextExecutor) (err error) {\n\tif boil.HooksAreSkipped(ctx) {\n\t\treturn nil\n\t}\n\n\tfor _, hook := range bookCategoryAfterSelectHooks {\n\t\tif err := hook(ctx, exec, o); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}", "func CAfterSuite(body func(context.Context), timeout time.Duration) {\n\tginkgo.AfterSuite(contextify(body, timeout), timeout.Seconds())\n}", "func PostFlushFn(postFlushFn func()) Option {\n\treturn func(lc *loadingCache) error {\n\t\tlc.postFlushFn = postFlushFn\n\t\treturn nil\n\t}\n}", "func (r *Route) After(handler ...RequestHandler) *Route {\n\tr.AfterMiddlewares = append(r.AfterMiddlewares, handler...)\n\n\treturn r\n}", "func AnyPlugAfter(inp <-chan Any, after <-chan time.Time) (out <-chan Any, done <-chan struct{}) {\n\tcha := make(chan Any)\n\tdoit := make(chan struct{})\n\tgo plugAnyAfter(cha, doit, inp, after)\n\treturn cha, doit\n}", "func (o *Subscriber) doAfterSelectHooks(ctx context.Context, exec boil.ContextExecutor) (err error) {\n\tfor _, hook := range subscriberAfterSelectHooks {\n\t\tif err := hook(ctx, exec, o); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}", "func AfterFunc(d time.Duration, f func()) *Timer {\n\tif d <= 0 {\n\t\treturn nil\n\t}\n\n\treturn defaultTimerWheel.AfterFunc(d, f)\n}", "func (fsm *FSM) HandleAfter(cmd string, payload interface{}, after time.Duration) {\n haf := func() {\n time.Sleep(after)\n fsm.Handle(cmd, payload)\n }\n go haf()\n}", "func (o *Offer) doAfterDeleteHooks(exec boil.Executor) (err error) {\n\tfor _, hook := range offerAfterDeleteHooks {\n\t\tif err := hook(exec, o); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}", "func (c *Clock) AfterFunc(d time.Duration, f func()) tstime.TimerController {\n\tc.init()\n\trt := c.maybeGetRealTime()\n\n\tc.mu.Lock()\n\tdefer c.mu.Unlock()\n\n\tc.advanceLocked(rt, 0)\n\tt := &Timer{\n\t\tnextTrigger: c.present.Add(d),\n\t\tem: &c.events,\n\t}\n\tt.init(c.timerChannelSize, f)\n\treturn t\n}", "func (o *BoardsSectionsPosition) doAfterSelectHooks(ctx context.Context, exec boil.ContextExecutor) (err error) {\n\tif boil.HooksAreSkipped(ctx) {\n\t\treturn nil\n\t}\n\n\tfor _, hook := range boardsSectionsPositionAfterSelectHooks {\n\t\tif err := hook(ctx, exec, o); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}", "func (spec *Spec) AfterAll(blk func(tb testing.TB)) {\n\tspec.testingTB.Helper()\n\tspec.AroundAll(func(tb testing.TB) func() {\n\t\treturn func() { blk(tb) }\n\t})\n}", "func (r *Router) UseAfter(h Handler) {\n\tr.tail = r.tail.After(h)\n}", "func (o *CMFFamilyUserPoliciesTake) doAfterSelectHooks(ctx context.Context, exec boil.ContextExecutor) (err error) {\n\tif boil.HooksAreSkipped(ctx) {\n\t\treturn nil\n\t}\n\n\tfor _, hook := range cmfFamilyUserPoliciesTakeAfterSelectHooks {\n\t\tif err := hook(ctx, exec, o); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}", "func ShutdownFunc(f func()) Option {\n\treturn func(s *Service) {\n\t\ts.shutdownFunc = f\n\t}\n}", "func (o *ScheduleSubject) doAfterSelectHooks(ctx context.Context, exec boil.ContextExecutor) (err error) {\n\tif boil.HooksAreSkipped(ctx) {\n\t\treturn nil\n\t}\n\n\tfor _, hook := range scheduleSubjectAfterSelectHooks {\n\t\tif err := hook(ctx, exec, o); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}", "func (c *IRacing) AfterResponse(f AfterFunc) {\n\tc.AfterFuncs = append(c.AfterFuncs, f)\n}", "func (m *MockCallback) AddAfterInvoke(arg0 rpc.CallbackFunc) {\n\tm.ctrl.T.Helper()\n\tm.ctrl.Call(m, \"AddAfterInvoke\", arg0)\n}", "func (o *CMFAdminMenu) doAfterSelectHooks(ctx context.Context, exec boil.ContextExecutor) (err error) {\n\tif boil.HooksAreSkipped(ctx) {\n\t\treturn nil\n\t}\n\n\tfor _, hook := range cmfAdminMenuAfterSelectHooks {\n\t\tif err := hook(ctx, exec, o); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}", "func RunAfterCallbacks() {\n\tfor _, cb := range afterCallbacks {\n\t\tif err := cb(); err != nil {\n\t\t\tfmt.Println(err)\n\t\t}\n\t}\n}", "func (cs *Callbacks) AddAfterBanCallBack(f func(rc *RemoteClient)) {\n\tcs.Lock()\n\tcs.afterBan = append(cs.afterBan, f)\n\tcs.Unlock()\n}", "func (o *OauthClient) doAfterSelectHooks(exec boil.Executor) (err error) {\n\tfor _, hook := range oauthClientAfterSelectHooks {\n\t\tif err := hook(exec, o); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}", "func (o *Friendship) doAfterSelectHooks(ctx context.Context, exec boil.ContextExecutor) (err error) {\n\tif boil.HooksAreSkipped(ctx) {\n\t\treturn nil\n\t}\n\n\tfor _, hook := range friendshipAfterSelectHooks {\n\t\tif err := hook(ctx, exec, o); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}", "func (o *Board) doAfterSelectHooks(ctx context.Context, exec boil.ContextExecutor) (err error) {\n\tif boil.HooksAreSkipped(ctx) {\n\t\treturn nil\n\t}\n\n\tfor _, hook := range boardAfterSelectHooks {\n\t\tif err := hook(ctx, exec, o); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}", "func (o *Email) doAfterSelectHooks(ctx context.Context, exec boil.ContextExecutor) (err error) {\n\tif boil.HooksAreSkipped(ctx) {\n\t\treturn nil\n\t}\n\n\tfor _, hook := range emailAfterSelectHooks {\n\t\tif err := hook(ctx, exec, o); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}", "func (o *Comment) doAfterSelectHooks(exec boil.Executor) (err error) {\n\tfor _, hook := range commentAfterSelectHooks {\n\t\tif err := hook(exec, o); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}", "func OnAfterReply(sef EventCallbackFunc) {\n\tif onAfterReplyFunc == nil {\n\t\tonAfterReplyFunc = sef\n\t\treturn\n\t}\n\tlog.Warn(\"'OnAfterReply' aah server extension point is already subscribed.\")\n}", "func (o *Employee) doAfterSelectHooks(ctx context.Context, exec boil.ContextExecutor) (err error) {\n\tif boil.HooksAreSkipped(ctx) {\n\t\treturn nil\n\t}\n\n\tfor _, hook := range employeeAfterSelectHooks {\n\t\tif err := hook(ctx, exec, o); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}", "func (o *Description) doAfterInsertHooks(ctx context.Context, exec boil.ContextExecutor) (err error) {\n\tfor _, hook := range descriptionAfterInsertHooks {\n\t\tif err := hook(ctx, exec, o); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}", "func (o *Latency) doAfterSelectHooks(ctx context.Context, exec boil.ContextExecutor) (err error) {\n\tif boil.HooksAreSkipped(ctx) {\n\t\treturn nil\n\t}\n\n\tfor _, hook := range latencyAfterSelectHooks {\n\t\tif err := hook(ctx, exec, o); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}", "func (o *Notification) doAfterSelectHooks(exec boil.Executor) (err error) {\n\tfor _, hook := range notificationAfterSelectHooks {\n\t\tif err := hook(exec, o); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}", "func (ed *Editor) AddAfterReadline(f func(string)) {\n\ted.AfterReadline = append(ed.AfterReadline, f)\n}", "func (o *ActivityLog) doAfterSelectHooks(ctx context.Context, exec boil.ContextExecutor) (err error) {\n\tif boil.HooksAreSkipped(ctx) {\n\t\treturn nil\n\t}\n\n\tfor _, hook := range activityLogAfterSelectHooks {\n\t\tif err := hook(ctx, exec, o); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}", "func After(d time.Duration) DoneChan {\n\tch := make(chan struct{})\n\tgo func() {\n\t\t<-time.After(d)\n\t\tclose(ch)\n\t}()\n\treturn ch\n}", "func (o *Origin) doAfterSelectHooks(ctx context.Context, exec boil.ContextExecutor) (err error) {\n\tif boil.HooksAreSkipped(ctx) {\n\t\treturn nil\n\t}\n\n\tfor _, hook := range originAfterSelectHooks {\n\t\tif err := hook(ctx, exec, o); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}", "func (o *Channel) doAfterInsertHooks(ctx context.Context, exec boil.ContextExecutor) (err error) {\n\tif boil.HooksAreSkipped(ctx) {\n\t\treturn nil\n\t}\n\n\tfor _, hook := range channelAfterInsertHooks {\n\t\tif err := hook(ctx, exec, o); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}", "func (o *CMFFamiliesPolicy) doAfterSelectHooks(ctx context.Context, exec boil.ContextExecutor) (err error) {\n\tif boil.HooksAreSkipped(ctx) {\n\t\treturn nil\n\t}\n\n\tfor _, hook := range cmfFamiliesPolicyAfterSelectHooks {\n\t\tif err := hook(ctx, exec, o); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}", "func (s *Selection) After(selector string) *Selection {\n\treturn s.AfterMatcher(compileMatcher(selector))\n}", "func (o *Task) doAfterSelectHooks(ctx context.Context, exec boil.ContextExecutor) (err error) {\n\tif boil.HooksAreSkipped(ctx) {\n\t\treturn nil\n\t}\n\n\tfor _, hook := range taskAfterSelectHooks {\n\t\tif err := hook(ctx, exec, o); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}", "func (e *EventBus) Off(topic string, callback *func(Event)) {\n\te.mu.Lock()\n\tdefer e.mu.Unlock()\n\n\tif cp, ok := e.pools[topic]; ok {\n\t\tcp.Remove(callback)\n\t}\n}", "func (a Args) SetAfter(cursor interface{}) { a[1] = &cursor }", "func (o *CMFUserExperienceLog) doAfterSelectHooks(ctx context.Context, exec boil.ContextExecutor) (err error) {\n\tif boil.HooksAreSkipped(ctx) {\n\t\treturn nil\n\t}\n\n\tfor _, hook := range cmfUserExperienceLogAfterSelectHooks {\n\t\tif err := hook(ctx, exec, o); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}", "func (o *BlackCard) doAfterSelectHooks(exec boil.Executor) (err error) {\n\tfor _, hook := range blackCardAfterSelectHooks {\n\t\tif err := hook(exec, o); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}", "func PutTimerWithAfterFunc(t *timerWithAfterFunc) {\n\tt.t.Stop()\n\t// time.AfterFunc使用的timer.C是nil, 不需要clean\n\t_timerWithAfterFuncPool.Put(t)\n}", "func (o *Jet) doAfterSelectHooks(exec boil.Executor) (err error) {\n\tfor _, hook := range jetAfterSelectHooks {\n\t\tif err := hook(exec, o); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}", "func (o *InstrumentClass) doAfterSelectHooks(exec boil.Executor) (err error) {\n\tfor _, hook := range instrumentClassAfterSelectHooks {\n\t\tif err := hook(exec, o); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}", "func OnNext(onNext FnOnNext) SubscriberOption {\n\treturn func(s *subscriber) {\n\t\ts.fnOnNext = onNext\n\t}\n}", "func (e *Exit) AfterEach(fn func(error)) {\n\te.afterEachFn = fn\n}", "func (r *Roller) DecidedDone(f ...Callable) {\n\tr.doners = append(r.doners, f...)\n}", "func (o *Peer) doAfterSelectHooks(ctx context.Context, exec boil.ContextExecutor) (err error) {\n\tif boil.HooksAreSkipped(ctx) {\n\t\treturn nil\n\t}\n\n\tfor _, hook := range peerAfterSelectHooks {\n\t\tif err := hook(ctx, exec, o); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}", "func Timeout(timeout time.Duration, timeoutFunction OnTimeout) crOption {\n\treturn func(cr *ConsumerRegistration) *ConsumerRegistration {\n\t\tcr.timeout = timeout\n\t\tcr.onTimeout = timeoutFunction\n\t\treturn cr\n\t}\n}", "func (h *editorHooks) AddAfterReadline(f func(string)) {\n\th.afterReadline = append(h.afterReadline, f)\n}", "func (o *Smallblog) doAfterSelectHooks(ctx context.Context, exec boil.ContextExecutor) (err error) {\n\tif boil.HooksAreSkipped(ctx) {\n\t\treturn nil\n\t}\n\n\tfor _, hook := range smallblogAfterSelectHooks {\n\t\tif err := hook(ctx, exec, o); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}", "func (o *Onpar) AfterEach(f interface{}) {\n\tif o.current.after != nil {\n\t\tpanic(fmt.Sprintf(\"Level '%s' already has a registered AfterEach\", o.current.name))\n\t}\n\n\t_, fileName, lineNumber, _ := runtime.Caller(o.callCount)\n\n\tv := reflect.ValueOf(f)\n\to.current.after = &specInfo{\n\t\tf: &v,\n\t\tft: reflect.TypeOf(f),\n\t\tfileName: fileName,\n\t\tlineNumber: lineNumber,\n\t}\n}" ]
[ "0.7024639", "0.6065201", "0.5984738", "0.5939588", "0.58868283", "0.5807153", "0.5745618", "0.5697903", "0.5637532", "0.5575202", "0.55675274", "0.55618495", "0.54770416", "0.5417393", "0.53260213", "0.52852386", "0.5212502", "0.51372826", "0.5116915", "0.5115929", "0.5113285", "0.51091415", "0.50823677", "0.5041557", "0.5025494", "0.5010478", "0.49728596", "0.4971761", "0.493191", "0.4922938", "0.49168056", "0.49075314", "0.48873696", "0.48861268", "0.48836285", "0.48663896", "0.4858325", "0.48493105", "0.4841728", "0.48276213", "0.48212224", "0.480793", "0.47896722", "0.47866148", "0.4782849", "0.4777586", "0.47661284", "0.47567922", "0.47456592", "0.4740346", "0.47396392", "0.47387594", "0.47385234", "0.47366583", "0.47235787", "0.4721699", "0.47111553", "0.47111294", "0.47033793", "0.47026834", "0.46950018", "0.46911246", "0.46865556", "0.46847394", "0.46840343", "0.46736354", "0.466608", "0.46596473", "0.46592847", "0.4651831", "0.46359304", "0.46315184", "0.4631393", "0.4629727", "0.4623575", "0.46234125", "0.46225315", "0.46199575", "0.46188816", "0.46171045", "0.46074942", "0.45990297", "0.45920736", "0.45871648", "0.4584288", "0.45768476", "0.4567216", "0.45499977", "0.45487636", "0.4548125", "0.45311543", "0.45292568", "0.4520168", "0.45114163", "0.45035404", "0.45031175", "0.45024303", "0.4500398", "0.4500365", "0.44941053" ]
0.87876165
0
WithEndpointConsumerOption provides a way to set endpoint to the consumer
func WithEndpointConsumerOption(end endpoint.Endpoint) ConsumerOption { return func(c *Consumer) { c.end = end } }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func WithEndpoint(e string) Option {\n\treturn func(o *Options) {\n\t\to.Endpoint = e\n\t}\n}", "func WithEndpoint(endpoint string) Option {\n\treturn wrappedOption{oconf.WithEndpoint(endpoint)}\n}", "func WithEndpoint(endpoint string) Option {\n\treturn func(o *options) {\n\t\to.endpoint = endpoint\n\t}\n}", "func WithEndpoint(endpoint string) Opt {\n\treturn func(c *Client) {\n\t\tc.endpoint = endpoint\n\t}\n}", "func WithEndpoint(endpoint string) ClientOption {\n\treturn func(o *requestFactory) {\n\t\to.endpoint = endpoint\n\t}\n}", "func WithEndpoint(s string) ClientOption {\n\treturn func(c *Client) error {\n\t\tc.endpoint = s\n\t\treturn nil\n\t}\n}", "func WithEndpoint(endpoint string) CollectorEndpointOption {\n\treturn collectorEndpointOptionFunc(func(o *collectorEndpointConfig) {\n\t\to.endpoint = endpoint\n\t})\n}", "func WithEndpoint(endpoint string) Option {\n\treturn wrappedOption{otlpconfig.WithEndpoint(endpoint)}\n}", "func WithEndpoint(endpoint string) {\n\tcfg.endpoint = strings.TrimRight(endpoint, \"/\")\n}", "func WithEndpoint(endpoint string) ClientOption {\n\treturn func(client *Client) {\n\t\tclient.endpoint = strings.TrimRight(endpoint, \"/\")\n\t}\n}", "func WithTopicConsumerOption(topic string) ConsumerOption {\n\treturn func(c *Consumer) {\n\t\tc.config.Topic = topic\n\t}\n}", "func WithEndpoint(v string) (p Pair) {\n\treturn Pair{Key: \"endpoint\", Value: v}\n}", "func Endpoint(endpoint *url.URL) ServerOption {\n\treturn func(s *Server) {\n\t\ts.endpoint = endpoint\n\t}\n}", "func Endpoint(endpoints ...*url.URL) Option {\n\treturn func(o *options) { o.endpoints = endpoints }\n}", "func Endpoint(endpoints ...*url.URL) Option {\n\treturn func(o *options) { o.endpoints = endpoints }\n}", "func WithOffsetConsumerOption(offset int64) ConsumerOption {\n\treturn func(c *Consumer) {\n\t\tswitch offset {\n\t\tcase LastOffset:\n\t\t\tc.config.StartOffset = LastOffset\n\t\tcase FirstOffset:\n\t\t\tc.config.StartOffset = FirstOffset\n\t\tdefault:\n\t\t\tc.config.StartOffset = FirstOffset\n\t\t}\n\t}\n}", "func (options *CreateNotificationChannelOptions) SetEndpoint(endpoint string) *CreateNotificationChannelOptions {\n\toptions.Endpoint = core.StringPtr(endpoint)\n\treturn options\n}", "func (options *UpdateNotificationChannelOptions) SetEndpoint(endpoint string) *UpdateNotificationChannelOptions {\n\toptions.Endpoint = core.StringPtr(endpoint)\n\treturn options\n}", "func WithDecoderConsumerOption(fn Decoder) ConsumerOption {\n\treturn func(c *Consumer) { c.dec = fn }\n}", "func (c *Client) SetEndpoint(endpoint string) {\n\tif endpoint == \"\" {\n\t\tc.endpoint = DefaultEndpoint\n\t\treturn\n\t}\n\tc.endpoint = endpoint\n}", "func (clt *Client) SetEndpoint(endpoint string) {\n\tclt.endpoint = endpoint\n}", "func WithAPIEndpoint(endpoint string) Option {\n\treturn func(o *Options) {\n\t\to.APIEndpoint = endpoint\n\t}\n}", "func WithCollectorEndpoint(options ...CollectorEndpointOption) EndpointOption {\n\treturn endpointOptionFunc(func() (batchUploader, error) {\n\t\tcfg := &collectorEndpointConfig{\n\t\t\tendpoint: envOr(envEndpoint, \"http://localhost:14268/api/traces\"),\n\t\t\tusername: envOr(envUser, \"\"),\n\t\t\tpassword: envOr(envPassword, \"\"),\n\t\t\thttpClient: http.DefaultClient,\n\t\t}\n\n\t\tfor _, opt := range options {\n\t\t\topt.apply(cfg)\n\t\t}\n\n\t\treturn &collectorUploader{\n\t\t\tendpoint: cfg.endpoint,\n\t\t\tusername: cfg.username,\n\t\t\tpassword: cfg.password,\n\t\t\thttpClient: cfg.httpClient,\n\t\t}, nil\n\t})\n}", "func (x *XVerify) SetEndpoint(e string) {\n\tendpoint = e\n}", "func Endpoint(rawurl string) Opt {\n\treturn func(c *Client) Opt {\n\t\told := c.url\n\t\tc.url = rawurl\n\t\treturn Endpoint(old)\n\t}\n}", "func WithMetricsEndpoint(endpoint string) Option {\n\treturn wrappedOption{otlpconfig.WithMetricsEndpoint(endpoint)}\n}", "func (u *UpYunForm) SetEndpoint(ed int) error {\n\tif ed >= Auto && ed <= Ctt {\n\t\tu.endpoint = fmt.Sprintf(\"v%d.api.upyun.com\", ed)\n\t\treturn nil\n\t}\n\n\treturn errors.New(\"Invalid endpoint, pick from Auto, Telecom, Cnc, Ctt\")\n}", "func WithSSHEndpoint(val string) ResourceMetricsOption {\n\treturn func(ras ResourceAttributesSettings, rm pmetric.ResourceMetrics) {\n\t\tif ras.SSHEndpoint.Enabled {\n\t\t\trm.Resource().Attributes().PutStr(\"ssh.endpoint\", val)\n\t\t}\n\t}\n}", "func WithAutoCommitConsumerOption(flag bool) ConsumerOption {\n\treturn func(c *Consumer) { c.autocommit = flag }\n}", "func (op *OperationRequest) setEndpoint(endpoint string) *OperationRequest {\n\tif endpoint == \"payments\" {\n\t\top.endpoint = endpoint\n\t} else {\n\t\t// default to operations\n\t\top.endpoint = \"operations\"\n\t}\n\treturn op\n}", "func WithReaderConsumerOption(reader *kafgo.Reader) ConsumerOption {\n\treturn func(c *Consumer) { c.reader = reader }\n}", "func (_Energyconsumption *EnergyconsumptionTransactor) SetConsumer(opts *bind.TransactOpts, _owner string, _deviceType string, _peakPowerPos uint32, _peakPowerNeg uint32, _latitude uint32, _longitude uint32, _voltageLevel uint32, _location string, _installDate string) (*types.Transaction, error) {\n\treturn _Energyconsumption.contract.Transact(opts, \"setConsumer\", _owner, _deviceType, _peakPowerPos, _peakPowerNeg, _latitude, _longitude, _voltageLevel, _location, _installDate)\n}", "func (c *Client) CustomEndpoint(e string) {\n\tc.Endpoint = e\n}", "func WithExchange(e string) PublishOption {\n\treturn func(o *PublishOptions) {\n\t\to.Exchange = e\n\t}\n}", "func SetServerEndpoint(address string) ServerOptions {\n\treturn func(s *Server) error {\n\t\ts.endpoint = address\n\t\treturn nil\n\t}\n}", "func (ml *ManagedListener) SetEndpoint(ep *v1.Endpoints) {\n\tdefer ml.Monitor()()\n\tif InCluster() {\n\t\tif ml.Endpoints != nil && ep != nil {\n\t\t\tlhsPorts := EndpointIPs(ml.Endpoints)\n\t\t\tlhsIPs := EndpointSubsetPorts(ml.Endpoints)\n\t\t\trhsPorts := EndpointIPs(ep)\n\t\t\trhsIPs := EndpointSubsetPorts(ep)\n\t\t\tif !lhsPorts.Equal(rhsPorts) || !lhsIPs.Equal(rhsIPs) {\n\t\t\t\tml.EndpointsChanged = true\n\t\t\t\tml.Endpoints = ep\n\t\t\t}\n\t\t}\n\t}\n}", "func WithQueueSubscriber(queue string) ConsumerOption {\n\treturn func(c *Consumer) error {\n\t\tif queue == \"\" {\n\t\t\treturn ErrInvalidQueueName\n\t\t}\n\t\tc.Subscriber = &QueueSubscriber{Queue: queue}\n\t\treturn nil\n\t}\n}", "func IngestionEndpoint() QueryOption {\n\treturn func(m *queryOptions) error {\n\t\tm.queryIngestion = true\n\t\treturn nil\n\t}\n}", "func WithSpanExporterEndpoint(url string) Option {\n\treturn func(c *Config) {\n\t\tc.SpanExporterEndpoint = url\n\t}\n}", "func WithDescriber(describer Describer) Option {\n\treturn func(c *Client) {\n\t\tc.describer = describer\n\t}\n}", "func (c *Client) endpoint(route string) string {\n\treturn baseEndpoint + route\n}", "func WithUsername(username string) CollectorEndpointOption {\n\treturn collectorEndpointOptionFunc(func(o *collectorEndpointConfig) {\n\t\to.username = username\n\t})\n}", "func (b *NutanixPrismElementEndpointApplyConfiguration) WithEndpoint(value *NutanixPrismEndpointApplyConfiguration) *NutanixPrismElementEndpointApplyConfiguration {\n\tb.Endpoint = value\n\treturn b\n}", "func WithTracesEndpoint(endpoint string) Option {\n\treturn wrappedOption{otlpconfig.WithTracesEndpoint(endpoint)}\n}", "func WithRemoteEndpoint(e model.Endpoint) TraceOption {\n\treturn func(o *TraceOptions) {\n\t\to.RemoteEndpoint = &e\n\t}\n}", "func WithSomeName(handler ConsumerHandler) ConsumerOption {\n\treturn func(consumer Consumer) error {\n\t\treturn consumer.Consume(\"some-name\", handler)\n\t}\n}", "func WithMetricsExporterEndpoint(url string) Option {\n\treturn func(c *Config) {\n\t\tc.MetricsExporterEndpoint = url\n\t}\n}", "func WithoutFnInvokeEndpoints() Option {\n\treturn func(ctx context.Context, s *Server) error {\n\t\ts.noFnInvokeEndpoint = true\n\t\treturn nil\n\t}\n}", "func WithEndpointConfig(endpointConfig fab.EndpointConfig) SDKContextParams {\n\treturn func(ctx *Provider) {\n\t\tctx.endpointConfig = endpointConfig\n\t}\n}", "func WithEndpointRegistration(prefix string, endpoints ...registerFunc) Option {\n\treturn func(g *gateway) {\n\t\tg.endpoints[prefix] = append(g.endpoints[prefix], endpoints...)\n\t}\n}", "func WithCheckpoint(checkpoint Checkpoint) Option {\n\treturn func(c *Consumer) error {\n\t\tc.checkpoint = checkpoint\n\t\treturn nil\n\t}\n}", "func (s Broker) RegisterEndpoint(e Endpoint) error {\n\tselect {\n\tcase s <- e:\n\tdefault:\n\t\treturn ErrActivityBufferFull\n\t}\n\n\treturn nil\n}", "func (c *BcsMonitorClient) SetCompleteEndpoint() {\n\tif c.opts.UserName != \"\" && c.opts.Password != \"\" {\n\t\tc.completeEndpoint = fmt.Sprintf(\"%s://%s:%s@%s\", c.opts.Schema,\n\t\t\tc.opts.UserName, c.opts.Password, c.opts.Endpoint)\n\t} else {\n\t\tc.completeEndpoint = fmt.Sprintf(\"%s://%s\", c.opts.Schema, c.opts.Endpoint)\n\t}\n}", "func (s *HttpsNotificationConfiguration) SetEndpoint(v string) *HttpsNotificationConfiguration {\n\ts.Endpoint = &v\n\treturn s\n}", "func AzureEndpoint(endpoint string) func(az *TierAzure) error {\n\treturn func(az *TierAzure) error {\n\t\taz.Endpoint = endpoint\n\t\treturn nil\n\t}\n}", "func NewEndpoint(network, address string, options ...Option) Endpoint {\n\treturn &endpoint{\n\t\tnetwork: network,\n\t\taddress: address,\n\t\toptions: options,\n\t}\n}", "func WithCounter(counter Counter) Option {\n\treturn func(c *Consumer) error {\n\t\tc.counter = counter\n\t\treturn nil\n\t}\n}", "func (plugin) WrapEndpoint(edp muxrpc.Endpoint) interface{} {\n\treturn endpoint{edp}\n}", "func (o ConsumerResponseOutput) EndpointUri() pulumi.StringOutput {\n\treturn o.ApplyT(func(v ConsumerResponse) string { return v.EndpointUri }).(pulumi.StringOutput)\n}", "func RegisterShowSchedulerHandlerFromEndpoint(ctx context.Context, mux *runtime.ServeMux, endpoint string, opts []grpc.DialOption) (err error) {\n\tconn, err := grpc.Dial(endpoint, opts...)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer func() {\n\t\tif err != nil {\n\t\t\tif cerr := conn.Close(); cerr != nil {\n\t\t\t\tgrpclog.Infof(\"Failed to close conn to %s: %v\", endpoint, cerr)\n\t\t\t}\n\t\t\treturn\n\t\t}\n\t\tgo func() {\n\t\t\t<-ctx.Done()\n\t\t\tif cerr := conn.Close(); cerr != nil {\n\t\t\t\tgrpclog.Infof(\"Failed to close conn to %s: %v\", endpoint, cerr)\n\t\t\t}\n\t\t}()\n\t}()\n\n\treturn RegisterShowSchedulerHandler(ctx, mux, conn)\n}", "func WithConsumeTimeout(s string) (OptionFunc, error) {\n\ttimeout, err := time.ParseDuration(s)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn func(c *Config) error {\n\t\tc.consumeTimeout = timeout\n\t\treturn nil\n\t}, nil\n}", "func (policy *PolicySvc) augmentEndpoint(endpoint *common.Endpoint) error {\n\ttenantSvcUrl, err := policy.client.GetServiceUrl(\"tenant\")\n\tif err != nil {\n\t\treturn err\n\t}\n\tif endpoint.Peer == common.Wildcard {\n\t\t// If a wildcard is specfied, there is nothing to augment\n\t\treturn nil\n\t}\n\tlog.Printf(\"Policy: Augmenting %#v\", endpoint)\n\n\t// Code below tries to resolve tenant name into tenant_network_id if possible.\n\t//\n\t// TODO this will have to be changed once we implement\n\t// https://paninetworks.kanbanize.com/ctrl_board/3/cards/319/details\n\tten := &tenant.Tenant{}\n\tif endpoint.TenantNetworkID == nil {\n\t\tif endpoint.TenantID != 0 {\n\t\t\ttenantIDToUse := strconv.FormatUint(endpoint.TenantID, 10)\n\t\t\ttenantsUrl := fmt.Sprintf(\"%s/tenants/%s\", tenantSvcUrl, tenantIDToUse)\n\t\t\tlog.Printf(\"Policy: Looking tenant up at %s\", tenantsUrl)\n\t\t\terr = policy.client.Get(tenantsUrl, ten)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\tendpoint.TenantNetworkID = &ten.NetworkID\n\n\t\t} else if endpoint.TenantExternalID != \"\" || endpoint.TenantName != \"\" {\n\t\t\tif endpoint.TenantExternalID != \"\" {\n\t\t\t\tten.ExternalID = endpoint.TenantExternalID\n\t\t\t}\n\t\t\tif endpoint.TenantName != \"\" {\n\t\t\t\tten.Name = endpoint.TenantName\n\t\t\t}\n\t\t\terr = policy.client.Find(ten, common.FindLast)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\tendpoint.TenantNetworkID = &ten.NetworkID\n\t\t}\n\t}\n\n\tif endpoint.SegmentNetworkID == nil {\n\t\tif ten == nil && (endpoint.SegmentID != 0 || endpoint.SegmentExternalID != \"\" || endpoint.SegmentName != \"\") {\n\t\t\treturn common.NewError400(\"No tenant information specified, cannot look up segment.\")\n\t\t}\n\t\tsegment := &tenant.Segment{}\n\t\tif endpoint.SegmentID != 0 {\n\t\t\tsegmentIDToUse := strconv.FormatUint(endpoint.SegmentID, 10)\n\t\t\tsegmentsUrl := fmt.Sprintf(\"%s/tenants/%d/segments/%s\", tenantSvcUrl, ten.ID, segmentIDToUse)\n\t\t\tlog.Printf(\"Policy: Looking segment up at %s for %#v\", segmentsUrl, endpoint)\n\t\t\terr = policy.client.Get(segmentsUrl, &segment)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tendpoint.SegmentNetworkID = &segment.NetworkID\n\t\t} else if endpoint.SegmentExternalID != \"\" || endpoint.SegmentName != \"\" {\n\t\t\tsegmentsUrl := fmt.Sprintf(\"%s/findLast/segments?tenant_id=%d&\", tenantSvcUrl, ten.ID)\n\t\t\tif endpoint.SegmentExternalID != \"\" {\n\t\t\t\tsegmentsUrl += \"external_id=\" + endpoint.TenantExternalID + \"&\"\n\t\t\t}\n\t\t\tif endpoint.SegmentName != \"\" {\n\t\t\t\tsegmentsUrl += \"name=\" + endpoint.SegmentName\n\t\t\t}\n\t\t\tlog.Printf(\"Policy: Finding segments at %s for %#v (Tenant %#v %t)\", segmentsUrl, endpoint, ten, ten == nil)\n\t\t\terr = policy.client.Get(segmentsUrl, &segment)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tendpoint.SegmentNetworkID = &segment.NetworkID\n\t\t}\n\t}\n\treturn nil\n}", "func (s *SearchRecord) SetEndpoint(v *Endpoint) *SearchRecord {\n\ts.Endpoint = v\n\treturn s\n}", "func WithPassword(password string) CollectorEndpointOption {\n\treturn collectorEndpointOptionFunc(func(o *collectorEndpointConfig) {\n\t\to.password = password\n\t})\n}", "func Exchange(opt exchange.Exchange) Option {\n\treturn func(o *Options) {\n\t\to.Exchange = opt\n\t}\n}", "func (r *EndpointRegistry) addEndpoint(addr wire.Address, conn Conn, dialer bool) *Endpoint {\n\tr.Log().WithField(\"peer\", addr).Trace(\"EndpointRegistry.addEndpoint\")\n\n\te := newEndpoint(addr, conn)\n\tfe, created := r.fullEndpoint(addr, e)\n\tif !created {\n\t\tif e, closed := fe.replace(e, r.id.Address(), dialer); closed {\n\t\t\treturn e\n\t\t}\n\t}\n\n\tconsumer := r.onNewEndpoint(addr)\n\t// Start receiving messages.\n\tgo func() {\n\t\tif err := e.recvLoop(consumer); err != nil {\n\t\t\tr.Log().WithError(err).Error(\"recvLoop finished unexpectedly\")\n\t\t}\n\t\tfe.delete(e)\n\t}()\n\n\treturn e\n}", "func (s *Cluster) SetEndpoint(v string) *Cluster {\n\ts.Endpoint = &v\n\treturn s\n}", "func (d *DBGenerator) setCommunalEndpoint(ctx context.Context) error {\n\tq := Queries[CommunalEndpointKey]\n\trows, err := d.Conn.QueryContext(ctx, q)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"failed running '%s': %w\", q, err)\n\t}\n\tdefer rows.Close()\n\n\tconst HTTPSKey = \"AWSEnableHttps\"\n\tconst EndpointKey = \"AWSEndpoint\"\n\tconst AWSAuth = \"AWSAuth\"\n\tvar protocol, endpoint string\n\tvar auth []string\n\n\tfor rows.Next() {\n\t\tif rows.Err() != nil {\n\t\t\treturn fmt.Errorf(\"failed running '%s': %w\", q, rows.Err())\n\t\t}\n\t\tvar key string\n\t\tvar value string\n\t\tif err := rows.Scan(&key, &value); err != nil {\n\t\t\treturn fmt.Errorf(\"failed running '%s': %w\", q, err)\n\t\t}\n\n\t\tswitch key {\n\t\tcase HTTPSKey:\n\t\t\tif value == \"0\" {\n\t\t\t\tprotocol = \"http\"\n\t\t\t} else {\n\t\t\t\tprotocol = \"https\"\n\t\t\t}\n\t\tcase EndpointKey:\n\t\t\tendpoint = value\n\n\t\tcase AWSAuth:\n\t\t\tauthRE := regexp.MustCompile(`:`)\n\t\t\tconst NumAuthComponents = 2\n\t\t\tauth = authRE.Split(value, NumAuthComponents)\n\t\t}\n\t}\n\tif protocol == \"\" {\n\t\treturn fmt.Errorf(\"missing '%s' in query '%s'\", HTTPSKey, q)\n\t}\n\tif endpoint == \"\" {\n\t\treturn fmt.Errorf(\"missing '%s' in query '%s'\", EndpointKey, q)\n\t}\n\tif len(auth) == 0 {\n\t\treturn fmt.Errorf(\"missing '%s' in query '%s'\", AWSAuth, q)\n\t}\n\n\td.Objs.Vdb.Spec.Communal.Endpoint = fmt.Sprintf(\"%s://%s\", protocol, endpoint)\n\td.Objs.CredSecret.Data = map[string][]byte{\n\t\tcontrollers.S3AccessKeyName: []byte(auth[0]),\n\t\tcontrollers.S3SecretKeyName: []byte(auth[1]),\n\t}\n\n\treturn nil\n}", "func (s *Workgroup) SetEndpoint(v *Endpoint) *Workgroup {\n\ts.Endpoint = v\n\treturn s\n}", "func WithMysqlInstanceEndpoint(val string) ResourceMetricsOption {\n\treturn func(rm pmetric.ResourceMetrics) {\n\t\trm.Resource().Attributes().PutString(\"mysql.instance.endpoint\", val)\n\t}\n}", "func WithHTTPClient(client *http.Client) CollectorEndpointOption {\n\treturn collectorEndpointOptionFunc(func(o *collectorEndpointConfig) {\n\t\to.httpClient = client\n\t})\n}", "func (c *Provider) EndpointConfig() fab.EndpointConfig {\n\treturn c.endpointConfig\n}", "func (sp *SessionProxy) SetEndpoint(sid string, ep Endpoint, timeout time.Duration) {\n\tsp.GetSession().SetEndpoint(sid, ep, timeout)\n}", "func WithTargetEndpoints(keys ...string) RequestOption {\n\treturn func(ctx context.Client, opts *requestOptions) error {\n\n\t\tvar targets []fab.Peer\n\n\t\tfor _, url := range keys {\n\n\t\t\tpeerCfg, err := comm.NetworkPeerConfig(ctx.EndpointConfig(), url)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\tpeer, err := ctx.InfraProvider().CreatePeerFromConfig(peerCfg)\n\t\t\tif err != nil {\n\t\t\t\treturn errors.WithMessage(err, \"creating peer from config failed\")\n\t\t\t}\n\n\t\t\ttargets = append(targets, peer)\n\t\t}\n\n\t\treturn WithTargets(targets...)(ctx, opts)\n\t}\n}", "func WithEventSettings(cfg ConsumerConfig) OptionEvent {\n\treturn func(c *Consumer) {\n\t\tif cfg.MaxInFlight > 0 {\n\t\t\tc.cfg.MaxInFlight = cfg.MaxInFlight\n\t\t}\n\t\tif cfg.MaxAttempts > 0 {\n\t\t\tc.cfg.MaxAttempts = cfg.MaxAttempts\n\t\t}\n\t\tif cfg.Timeout > 0 {\n\t\t\tc.cfg.Timeout = cfg.Timeout\n\t\t}\n\t\tif cfg.RequeueInterval > 0 {\n\t\t\tc.cfg.RequeueInterval = cfg.RequeueInterval\n\t\t}\n\t\tif cfg.NumOfConsumers > 0 {\n\t\t\tc.cfg.NumOfConsumers = cfg.NumOfConsumers\n\t\t}\n\t}\n}", "func (ch *clientSecureChannel) EndpointURL() string {\n\treturn ch.endpointURL\n}", "func NewEndpoint(githubReporter Reporter, intercomReporter Reporter, storage Uploader, rateLimiter *infra.RateLimiter) *Endpoint {\n\treturn &Endpoint{githubReporter: githubReporter, storage: storage, rateLimiter: rateLimiter, intercomReporter: intercomReporter}\n}", "func RegisterOrgHandlerFromEndpoint(ctx context.Context, mux *runtime.ServeMux, endpoint string, opts []grpc.DialOption) (err error) {\n\tconn, err := grpc.Dial(endpoint, opts...)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer func() {\n\t\tif err != nil {\n\t\t\tif cerr := conn.Close(); cerr != nil {\n\t\t\t\tgrpclog.Infof(\"Failed to close conn to %s: %v\", endpoint, cerr)\n\t\t\t}\n\t\t\treturn\n\t\t}\n\t\tgo func() {\n\t\t\t<-ctx.Done()\n\t\t\tif cerr := conn.Close(); cerr != nil {\n\t\t\t\tgrpclog.Infof(\"Failed to close conn to %s: %v\", endpoint, cerr)\n\t\t\t}\n\t\t}()\n\t}()\n\n\treturn RegisterOrgHandler(ctx, mux, conn)\n}", "func (a *DefaultApiService) ShowEndpoint(ctx _context.Context, id string) ApiShowEndpointRequest {\n\treturn ApiShowEndpointRequest{\n\t\tApiService: a,\n\t\tctx: ctx,\n\t\tid: id,\n\t}\n}", "func NewConsumer(\n\tbrokers []string,\n\tlogger log.Logger,\n\toptions ...ConsumerOption,\n) (*Consumer, error) {\n\t// default values\n\tcfg := kafgo.ReaderConfig{\n\t\tBrokers: brokers,\n\t\tGroupID: defaultConsumerGroupID,\n\t\tTopic: defaultTopic,\n\t\tLogger: kafka.LoggerFunc(logger.Debugf),\n\t}\n\n\tcs := &Consumer{\n\t\treader: nil,\n\t\tconfig: &cfg,\n\t}\n\n\tfor _, o := range options {\n\t\to(cs)\n\t}\n\n\tif cs.end == nil {\n\t\treturn nil, errors.Wrap(\n\t\t\tErrCreatingConsumer, \"missing endpoint\",\n\t\t)\n\t}\n\n\tif cs.dec == nil {\n\t\treturn nil, errors.Wrap(\n\t\t\tErrCreatingConsumer, \"missing decoder\",\n\t\t)\n\t}\n\n\tif cs.errFn == nil {\n\t\tcs.errFn = defaultErrorFunc\n\t}\n\n\tif cs.errHandler == nil {\n\t\tcs.errHandler = transport.NewLogErrorHandler(logger)\n\t}\n\treturn cs, nil\n}", "func WithUpstreamPort(port uint32) option {\n\treturn func(c *KubernetesConfigurator) {\n\t\tc.upstreamPort = port\n\t}\n}", "func WithUpstreamPort(port uint32) option {\n\treturn func(c *KubernetesConfigurator) {\n\t\tc.upstreamPort = port\n\t}\n}", "func Endpoint(url string, configureFunc func()) {\n\touterCurrentMockHandler := currentMockHandler\n\tSwitch(extractor.ExtractMethod(), configureFunc)\n\tcurrentMockery.Handle(url, currentMockHandler)\n\tcurrentMockHandler = outerCurrentMockHandler\n}", "func (_Energyconsumption *EnergyconsumptionSession) SetConsumer(_owner string, _deviceType string, _peakPowerPos uint32, _peakPowerNeg uint32, _latitude uint32, _longitude uint32, _voltageLevel uint32, _location string, _installDate string) (*types.Transaction, error) {\n\treturn _Energyconsumption.Contract.SetConsumer(&_Energyconsumption.TransactOpts, _owner, _deviceType, _peakPowerPos, _peakPowerNeg, _latitude, _longitude, _voltageLevel, _location, _installDate)\n}", "func (*protocol) NewEndpoint(stack *stack.Stack, netProto tcpip.NetworkProtocolNumber,\n\twaiterQueue *waiter.Queue) (tcpip.Endpoint, *tcpip.Error) {\n\treturn newEndpoint(stack, netProto, waiterQueue), nil\n}", "func (s *GetEndpointAccessOutput) SetEndpoint(v *EndpointAccess) *GetEndpointAccessOutput {\n\ts.Endpoint = v\n\treturn s\n}", "func (s *DiscoverPollEndpointOutput) SetEndpoint(v string) *DiscoverPollEndpointOutput {\n\ts.Endpoint = &v\n\treturn s\n}", "func (_Energyconsumption *EnergyconsumptionTransactorSession) SetConsumer(_owner string, _deviceType string, _peakPowerPos uint32, _peakPowerNeg uint32, _latitude uint32, _longitude uint32, _voltageLevel uint32, _location string, _installDate string) (*types.Transaction, error) {\n\treturn _Energyconsumption.Contract.SetConsumer(&_Energyconsumption.TransactOpts, _owner, _deviceType, _peakPowerPos, _peakPowerNeg, _latitude, _longitude, _voltageLevel, _location, _installDate)\n}", "func OverwriteConsumer(overwrite runtime.Consumer) Option {\n\treturn OverwriteConsumerForStatus(overwrite, ForAllStatusCodes)\n}", "func WithProvider(provider Provider) Option {\n\treturn func(cfg *config) {\n\t\tcfg.provider = provider\n\t}\n}", "func NewEndpointDescription(endpointUrl PascalString, server ExtensionObjectDefinition, serverCertificate PascalByteString, securityMode MessageSecurityMode, securityPolicyUri PascalString, noOfUserIdentityTokens int32, userIdentityTokens []ExtensionObjectDefinition, transportProfileUri PascalString, securityLevel uint8) *_EndpointDescription {\n\t_result := &_EndpointDescription{\n\t\tEndpointUrl: endpointUrl,\n\t\tServer: server,\n\t\tServerCertificate: serverCertificate,\n\t\tSecurityMode: securityMode,\n\t\tSecurityPolicyUri: securityPolicyUri,\n\t\tNoOfUserIdentityTokens: noOfUserIdentityTokens,\n\t\tUserIdentityTokens: userIdentityTokens,\n\t\tTransportProfileUri: transportProfileUri,\n\t\tSecurityLevel: securityLevel,\n\t\t_ExtensionObjectDefinition: NewExtensionObjectDefinition(),\n\t}\n\t_result._ExtensionObjectDefinition._ExtensionObjectDefinitionChildRequirements = _result\n\treturn _result\n}", "func WithMaxPacketSize(size int) AgentEndpointOption {\n\treturn agentEndpointOptionFunc(func(o *agentEndpointConfig) {\n\t\to.MaxPacketSize = size\n\t})\n}", "func (b *STSBuilder) OIDCEndpointURL(value string) *STSBuilder {\n\tb.oidcEndpointURL = value\n\tb.bitmap_ |= 1\n\treturn b\n}", "func editEndpointInConfig(t *testing.T, configFilePath, endpoint string) {\n\tvar config cli.Config\n\tb, err := ioutil.ReadFile(configFilePath)\n\trequire.NoError(t, err)\n\n\terr = yaml.Unmarshal(b, &config)\n\trequire.NoError(t, err)\n\n\tconfig.Endpoint = endpoint\n\n\tb, err = yaml.Marshal(&config)\n\trequire.NoError(t, err)\n\n\terr = ioutil.WriteFile(configFilePath, b, 0655)\n\trequire.NoError(t, err)\n\n}", "func (c *Command) SetEndpointCommand(endpointCommand *endpoint.Command) {\n\tc.endpointCommand = endpointCommand\n}", "func AddConfEndpoint(machine *AnsweringMachine, route string) {\n\n\t// default route\n\tif route == \"\" {\n\t\troute = \"/conf\"\n\t}\n\n\thttp.HandleFunc(route, func(w http.ResponseWriter, req *http.Request) {\n\t\tglog.V(3).Infof(\"Conf API call: %s %s\", req.Method, req.URL.String())\n\n\t\tif req.Method != \"GET\" {\n\t\t\tglog.V(2).Infof(\"Method %s not supported\", req.Method)\n\t\t\tsendBadRequest(w, \"only GET requests are supported\")\n\t\t\treturn\n\t\t}\n\n\t\tw.Header().Set(\"Content-Type\", \"application/json; charset=utf-8\")\n\t\tw.WriteHeader(http.StatusOK)\n\n\t\tw.Write([]byte(\"{\"))\n\n\t\t// write env (mask password)\n\t\tpublicEnv := *machine.env\n\t\tpublicEnv.RecorderPassword = \"********\"\n\t\tw.Write([]byte(`\"env\":`))\n\t\tenc := json.NewEncoder(w)\n\t\tenc.Encode(publicEnv)\n\n\t\t// write messages\n\t\tw.Write([]byte(`, \"messages\":`))\n\t\tenc.Encode(*machine.messages)\n\n\t\t// write routes\n\t\tw.Write([]byte(`, \"routes\":`))\n\t\tenc.Encode(*machine.routes)\n\n\t\tw.Write([]byte(\"}\"))\n\t})\n}", "func (c *ConsumerManager) AddConsumer(topic, channel string, client ConsumerClient) error {\n\n}", "func TagEndpoint() Endpoint {\n\treturn func(u *url.URL) {\n\t\taddToURL(u, \"tags\")\n\t}\n}", "func (s *NASInstance) SetEndpoint(v *Endpoint) *NASInstance {\n\ts.Endpoint = v\n\treturn s\n}", "func WithAutoCommitTimeConsumerOption(dur time.Duration) ConsumerOption {\n\treturn func(c *Consumer) { c.config.CommitInterval = dur }\n}" ]
[ "0.70915824", "0.7042524", "0.69967586", "0.6994163", "0.6945915", "0.6737613", "0.67196137", "0.661804", "0.6488725", "0.6401157", "0.61127394", "0.6109585", "0.60376006", "0.60001093", "0.60001093", "0.59774673", "0.5949713", "0.58298445", "0.57701105", "0.57417226", "0.5732146", "0.57217675", "0.5707612", "0.569287", "0.55879927", "0.5566687", "0.5515684", "0.5505141", "0.54737335", "0.5465521", "0.5414572", "0.5379914", "0.5315538", "0.5264427", "0.5262312", "0.52561224", "0.523553", "0.52297306", "0.52211004", "0.52141243", "0.51836205", "0.5162911", "0.5154268", "0.51319575", "0.51173246", "0.51144403", "0.5104898", "0.50897974", "0.5081944", "0.5077895", "0.5074168", "0.4994958", "0.4992987", "0.49851942", "0.49822205", "0.4978789", "0.49760714", "0.49708256", "0.4964599", "0.496092", "0.4954433", "0.49541765", "0.49517632", "0.4930926", "0.4928398", "0.49207476", "0.4900836", "0.48987204", "0.48913208", "0.4869723", "0.48674974", "0.48573846", "0.4851104", "0.48471427", "0.4838505", "0.48376623", "0.48322648", "0.48311645", "0.4828104", "0.48227513", "0.4802618", "0.4802618", "0.47824413", "0.47772563", "0.47755522", "0.47724447", "0.47673628", "0.47585112", "0.47542927", "0.4747596", "0.47403514", "0.4738353", "0.47373962", "0.47218567", "0.47197473", "0.4719145", "0.46984735", "0.46883354", "0.4679802", "0.46745473" ]
0.8235426
0
WithReaderConsumerOption lets you set the reader for kafka
func WithReaderConsumerOption(reader *kafgo.Reader) ConsumerOption { return func(c *Consumer) { c.reader = reader } }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func ConsumerReader(r StreamReader) ConsumerOptionsFn {\n\treturn func(o *Consumer) error {\n\t\to.reader = r\n\t\treturn nil\n\t}\n}", "func WithReader(r reader.Reader) Option {\n\treturn func(o *Options) {\n\t\to.Reader = r\n\t}\n}", "func WithReader(r reader.Reader) loader.Option {\n\treturn func(o *loader.Options) {\n\t\to.Reader = r\n\t}\n}", "func NewReader(topicName string, groupID string) *kafka.Reader {\r\n\tbrokerUrls := Config.GetStringSlice(\"kafka.consumer.brokerUrls\")\r\n\tr := kafka.NewReader(kafka.ReaderConfig{\r\n\t\tBrokers: brokerUrls,\r\n\t\tGroupID: groupID,\r\n\t\tTopic: topicName,\r\n\t\tDialer: dialer,\r\n\t})\r\n\treturn r\r\n}", "func WithOffsetConsumerOption(offset int64) ConsumerOption {\n\treturn func(c *Consumer) {\n\t\tswitch offset {\n\t\tcase LastOffset:\n\t\t\tc.config.StartOffset = LastOffset\n\t\tcase FirstOffset:\n\t\t\tc.config.StartOffset = FirstOffset\n\t\tdefault:\n\t\t\tc.config.StartOffset = FirstOffset\n\t\t}\n\t}\n}", "func WithDecoderConsumerOption(fn Decoder) ConsumerOption {\n\treturn func(c *Consumer) { c.dec = fn }\n}", "func WithTopicConsumerOption(topic string) ConsumerOption {\n\treturn func(c *Consumer) {\n\t\tc.config.Topic = topic\n\t}\n}", "func NewReaderOpt(r io.Reader, opt *Options) (res Reader, err error) {\n\to := checkOpt(opt)\n\tfor _, f := range Formats {\n\t\tif res, err = f(&r, &o); res != nil || err != nil {\n\t\t\tbreak\n\t\t}\n\t\tif rs, ok := r.(io.Seeker); ok {\n\t\t\trs.Seek(0, io.SeekStart)\n\t\t}\n\t}\n\treturn\n}", "func Reader(ioReader io.Reader) ParserOption {\n\treturn reader{\n\t\tReader: ioReader,\n\t}\n}", "func (conf KafkaConfig) NewKafkaReader() *kafka.Reader {\n\treturn kafka.NewReader(kafka.ReaderConfig{\n\t\tBrokers: conf.ConnectionStrings,\n\t\tTopic: conf.Topic,\n\t\tMaxAttempts: conf.MaxConnectAttempts,\n\t\tMinBytes: 1,\n\t\tMaxBytes: 1024 * 12,\n\t\tQueueCapacity: 1,\n\t\tLogger: conf.Logger,\n\t\tErrorLogger: conf.Logger,\n\t})\n}", "func (p *MockPartition) SetReader(r io.ReadCloser) *MockPartition {\n\tp.reader = r\n\treturn p\n}", "func NewConsumer(\n\tbrokers []string,\n\tlogger log.Logger,\n\toptions ...ConsumerOption,\n) (*Consumer, error) {\n\t// default values\n\tcfg := kafgo.ReaderConfig{\n\t\tBrokers: brokers,\n\t\tGroupID: defaultConsumerGroupID,\n\t\tTopic: defaultTopic,\n\t\tLogger: kafka.LoggerFunc(logger.Debugf),\n\t}\n\n\tcs := &Consumer{\n\t\treader: nil,\n\t\tconfig: &cfg,\n\t}\n\n\tfor _, o := range options {\n\t\to(cs)\n\t}\n\n\tif cs.end == nil {\n\t\treturn nil, errors.Wrap(\n\t\t\tErrCreatingConsumer, \"missing endpoint\",\n\t\t)\n\t}\n\n\tif cs.dec == nil {\n\t\treturn nil, errors.Wrap(\n\t\t\tErrCreatingConsumer, \"missing decoder\",\n\t\t)\n\t}\n\n\tif cs.errFn == nil {\n\t\tcs.errFn = defaultErrorFunc\n\t}\n\n\tif cs.errHandler == nil {\n\t\tcs.errHandler = transport.NewLogErrorHandler(logger)\n\t}\n\treturn cs, nil\n}", "func NewConsumer(topics []string, valueFactory ValueFactory, opts ...ConsumerOption) (*Consumer, error) {\n\tc := &Consumer{\n\t\tvalueFactory: valueFactory,\n\t\tavroAPI: avro.DefaultConfig,\n\t\tensureTopics: true,\n\t}\n\t// Loop through each option\n\tfor _, opt := range opts {\n\t\t// apply option\n\t\topt.applyC(c)\n\t}\n\n\tvar err error\n\n\t// if consumer not provided - make one\n\tif c.KafkaConsumer == nil {\n\t\t// if kafka config not provided - build default one\n\t\tif c.kafkaCfg == nil {\n\t\t\tvar envCfg struct {\n\t\t\t\tBroker string `env:\"KAFKA_BROKER\" envDefault:\"localhost:9092\"`\n\t\t\t\tCAFile string `env:\"KAFKA_CA_FILE\"`\n\t\t\t\tKeyFile string `env:\"KAFKA_KEY_FILE\"`\n\t\t\t\tCertificateFile string `env:\"KAFKA_CERTIFICATE_FILE\"`\n\t\t\t\tGroupID string `env:\"KAFKA_GROUP_ID\"`\n\t\t\t}\n\t\t\tif err := env.Parse(&envCfg); err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\n\t\t\t// default configuration\n\t\t\tc.kafkaCfg = &kafka.ConfigMap{\n\t\t\t\t\"bootstrap.servers\": envCfg.Broker,\n\t\t\t\t\"socket.keepalive.enable\": true,\n\t\t\t\t\"enable.auto.commit\": false,\n\t\t\t\t\"enable.partition.eof\": true,\n\t\t\t\t\"session.timeout.ms\": 6000,\n\t\t\t\t\"auto.offset.reset\": \"earliest\",\n\t\t\t\t\"group.id\": envCfg.GroupID,\n\t\t\t}\n\n\t\t\tif envCfg.CAFile != \"\" {\n\t\t\t\t// configure SSL\n\t\t\t\tc.kafkaCfg.SetKey(\"security.protocol\", \"ssl\")\n\t\t\t\tc.kafkaCfg.SetKey(\"ssl.ca.location\", envCfg.CAFile)\n\t\t\t\tc.kafkaCfg.SetKey(\"ssl.key.location\", envCfg.KeyFile)\n\t\t\t\tc.kafkaCfg.SetKey(\"ssl.certificate.location\", envCfg.CertificateFile)\n\t\t\t}\n\t\t}\n\n\t\tif c.KafkaConsumer, err = kafka.NewConsumer(c.kafkaCfg); err != nil {\n\t\t\treturn nil, errors.WithMessage(err, \"cannot initialize kafka consumer\")\n\t\t}\n\t}\n\n\tif c.srClient == nil {\n\t\tif c.srURL == nil {\n\t\t\tvar envCfg struct {\n\t\t\t\tSchemaRegistry *url.URL `env:\"KAFKA_SCHEMA_REGISTRY\" envDefault:\"http://localhost:8081\"`\n\t\t\t}\n\t\t\tif err := env.Parse(&envCfg); err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\tc.srURL = envCfg.SchemaRegistry\n\t\t}\n\n\t\tif c.srClient, err = NewCachedSchemaRegistryClient(c.srURL.String()); err != nil {\n\t\t\treturn nil, errors.WithMessage(err, \"cannot initialize schema registry client\")\n\t\t}\n\t}\n\n\tif c.eventHandler == nil {\n\t\tc.eventHandler = func(event kafka.Event) {\n\t\t\tlog.Println(event)\n\t\t}\n\t}\n\n\tif topics != nil {\n\t\tif err := c.KafkaConsumer.SubscribeTopics(topics, nil); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tif c.ensureTopics {\n\t\t\tif err = c.EnsureTopics(topics); err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t}\n\t}\n\n\treturn c, nil\n}", "func NewConsumer(log logrus.FieldLogger, conf Config, opts ...ConfigOpt) (Consumer, error) {\n\t// See Reference at https://github.com/edenhill/librdkafka/blob/master/CONFIGURATION.md\n\tkafkaConf := conf.baseKafkaConfig()\n\t_ = kafkaConf.SetKey(\"enable.auto.offset.store\", false) // manually StoreOffset after processing a message. Otherwise races may happen.)\n\n\t// In case we try to assign an offset out of range (greater than log-end-offset), consumer will use start consuming from offset zero.\n\t_ = kafkaConf.SetKey(\"auto.offset.reset\", \"earliest\")\n\n\tconf.Consumer.Apply(kafkaConf)\n\tfor _, opt := range opts {\n\t\topt(kafkaConf)\n\t}\n\n\tif err := conf.configureAuth(kafkaConf); err != nil {\n\t\treturn nil, errors.Wrap(err, \"error configuring auth for the Kafka consumer\")\n\t}\n\n\tconsumer, err := kafkalib.NewConsumer(kafkaConf)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif conf.RequestTimeout == 0 {\n\t\tconf.RequestTimeout = DefaultTimeout\n\t}\n\n\tcc := &ConfluentConsumer{\n\t\tc: consumer,\n\t\tconf: conf,\n\t\tlog: log,\n\t}\n\n\tlogFields := logrus.Fields{\"kafka_topic\": cc.conf.Topic}\n\n\tif cc.conf.Consumer.Partition != nil || cc.conf.Consumer.PartitionKey != \"\" {\n\t\t// set the default partitioner algorithm\n\t\tif cc.conf.Consumer.PartitionerAlgorithm == \"\" {\n\t\t\tcc.conf.Consumer.PartitionerAlgorithm = PartitionerMurMur2\n\t\t}\n\t\t// Set the partition if a key is set to determine the partition\n\t\tif cc.conf.Consumer.PartitionKey != \"\" && cc.conf.Consumer.PartitionerAlgorithm != \"\" {\n\t\t\tcc.AssignPartitionByKey(cc.conf.Consumer.PartitionKey, cc.conf.Consumer.PartitionerAlgorithm)\n\t\t}\n\n\t\tlogFields[\"kafka_partition_key\"] = cc.conf.Consumer.PartitionKey\n\t\tlogFields[\"kafka_partition\"] = *cc.conf.Consumer.Partition\n\t}\n\n\tcc.setupRebalanceHandler()\n\tcc.log.WithFields(logFields).Debug(\"Subscribing to Kafka topic\")\n\tif serr := cc.c.Subscribe(cc.conf.Topic, cc.rebalanceHandler); serr != nil {\n\t\terr = errors.Wrap(serr, \"error subscribing to topic\")\n\t}\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn cc, nil\n}", "func New(client *redis.Client, group, consumer string, options ...Option) *Consumer {\n\tcfg := &config{\n\t\tgroup: group,\n\t\tconsumer: consumer,\n\t}\n\tfor _, opt := range options {\n\t\topt(cfg)\n\t}\n\tlastIDs := make(map[string]string)\n\tfor _, stream := range cfg.streams {\n\t\tlastIDs[stream] = \"0-0\"\n\t}\n\n\treturn &Consumer{\n\t\tclient: client,\n\t\tcfg: cfg,\n\t\tlastIDs: lastIDs,\n\t}\n}", "func SetReader(r io.Reader) {\n\treader = r\n}", "func WithRandomReader(reader io.Reader) wrapping.Option {\n\treturn func() interface{} {\n\t\treturn OptionFunc(func(o *options) error {\n\t\t\to.WithRandomReader = reader\n\t\t\treturn nil\n\t\t})\n\t}\n}", "func Consume(topicName string, groupID string) {\n\tfmt.Println(\"Consumer started.\")\n\n\t// make a new reader that consumes from topic-A\n\tr := kafka.NewReader(kafka.ReaderConfig{\n\t\tBrokers: []string{\"localhost:9092\"},\n\t\tGroupID: groupID,\n\t\tTopic: topicName,\n\t\tMinBytes: 10e3, // 10KB\n\t\tMaxBytes: 10e6, // 10MB\n\t})\n\n\tfor {\n\t\tm, err := r.ReadMessage(context.Background())\n\t\tif err != nil {\n\t\t\tbreak\n\t\t}\n\t\tfmt.Printf(\"message at topic/partition/offset %v/%v/%v: %s = %s\\n\", m.Topic, m.Partition, m.Offset, string(m.Key), string(m.Value))\n\t}\n\n\tr.Close()\n\tfmt.Println(\"Consumer closed.\")\n}", "func WithAutoCommitConsumerOption(flag bool) ConsumerOption {\n\treturn func(c *Consumer) { c.autocommit = flag }\n}", "func NewReaderConfig(r io.Reader, config ReaderConfig) *Reader {\n\th, _ := NewDefaultHandle()\n\n\tif config.OutputFormat != nil {\n\t\th.SetOutputFormat(*config.OutputFormat)\n\t}\n\n\tif config.BufferSize == 0 {\n\t\tconfig.BufferSize = DefaultConfig.BufferSize\n\t}\n\n\th.OpenFeed()\n\n\treturn &Reader{\n\t\tinput: r,\n\t\th: h,\n\t\tmaxBadBytes: 4096,\n\t\tfeedBuf: make([]byte, config.BufferSize),\n\t}\n\n}", "func NewConsumer(c *aws.Config, stream string, shard string, optionFns ...ConsumerOptionsFn) (*Consumer, error) {\n\tconsumer := &Consumer{consumerOptions: defaultConsumerOptions()}\n\tfor _, optionFn := range optionFns {\n\t\toptionFn(consumer)\n\t}\n\n\tif consumer.reader == nil {\n\t\tr, err := NewKinesisReader(c, stream, shard)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tconsumer.reader = r\n\t}\n\n\tconsumer.LogHelper = &LogHelper{\n\t\tLogLevel: consumer.logLevel,\n\t\tLogger: c.Logger,\n\t}\n\n\treturn consumer, nil\n}", "func withKubeClientProvider(kcp kube.ClientProvider) option {\n\treturn func(p *kubernetesprocessor) error {\n\t\treturn p.initKubeClient(p.logger, kcp)\n\t}\n}", "func NewReader(opts Opts, outgoing chan<- shared.Representation, service *Service) *Reader {\n\tc := Reader{\n\t\topts: opts,\n\t\toutgoing: outgoing,\n\t\tservice: service,\n\t\tdeviceAwayTime: map[string]time.Time{},\n\t}\n\n\tservice.SetOpts(opts)\n\n\treturn &c\n}", "func NewReaderWithOptions(opts *ReaderOptions) (r *Reader, err error) {\n\tif err := opts.Validate(); err != nil {\n\t\treturn nil, fmt.Errorf(\"validating reader options: %w\", err)\n\t}\n\tr = &Reader{\n\t\tOptions: opts,\n\t}\n\n\tif err := r.SetImplementation(&ReaderDefaultImpl{}); err != nil {\n\t\treturn nil, fmt.Errorf(\"setting the reader implementation: %w\", err)\n\t}\n\n\treturn r, nil\n}", "func (s *MockQueueService) NewReader(topic string) queue.Reader {\n\treturn &mockReader{\n\t\tService: s,\n\t\tTopic: topic,\n\t}\n}", "func NewReader(ctx context.Context, client pb.RoutedJournalClient, req pb.ReadRequest) *Reader {\n\tvar r = &Reader{\n\t\tRequest: req,\n\t\tctx: ctx,\n\t\tclient: client,\n\t}\n\treturn r\n}", "func WithFileReader(r FileReader) LoadOption {\n\tif r == nil {\n\t\tpanic(\"WithFileReader: nil reader\")\n\t}\n\treturn fnLoadOption(func(opts *loadOptions) {\n\t\topts.fileReader = r\n\t})\n}", "func TestConsumerOptions(tb testing.TB, options ...Option) []Option {\n\ttb.Helper()\n\n\tvar defaults []Option\n\n\tconfig := ConsumerOptions(func(c *Consumer) {\n\t\tc.Kafka = kafkaconfig.TestConsumer(tb)\n\t})\n\n\tdefaults = append(defaults, config)\n\n\treturn append(defaults, options...)\n}", "func initConsumer() sarama.Consumer {\n\tsarama.Logger = log.New(os.Stdout, \"\", log.Ltime)\n\n\tconfig := sarama.NewConfig()\n\tconfig.ClientID = CLIENTID\n\tconfig.Consumer.Return.Errors = true\n\n\tbrokers := []string{BROKERS}\n\n\tmaster, err := sarama.NewConsumer(brokers, config)\n\tif err != nil {\n\t\tfmt.Println(\"error create master consumer: \")\n\t\tpanic(err)\n\t}\n\n\treturn master\n}", "func InitConsumer(broker, group string) {\n\tvar err error\n\tutils.ConsumerObject, err = kafka.NewConsumer(&kafka.ConfigMap{\n\t\t\"bootstrap.servers\": broker,\n\t\t\"group.id\": group,\n\t\t\"session.timeout.ms\": 6000,\n\t\t\"go.events.channel.enable\": true,\n\t\t\"go.application.rebalance.enable\": true,\n\t\t// Enable generation of PartitionEOF when the\n\t\t// end of a partition is reached.\n\t\t\"enable.partition.eof\": true,\n\t\t\"auto.offset.reset\": \"earliest\"})\n\n\tif err != nil {\n\t\tfmt.Fprintf(os.Stderr, \"Failed to create consumer: %s\\n\", err)\n\t\tos.Exit(1)\n\t}\n}", "func serialReaderConsumer(consumer chan interface{}, conn net.Conn, top *topic.Topic) {\n\tdefer conn.Close()\n\tdefer top.Unregister(consumer)\n\n\tfor {\n\t\tselect {\n\t\t// A new message was received from this Topic consumer\n\t\tcase msg, ok := <-consumer:\n\t\t\tif ok {\n\t\t\t\ti, err := conn.Write(msg.([]byte))\n\t\t\t\tif err != nil {\n\t\t\t\t\tlog.Printf(\"Error writing %v bytes to %v: %v\\n\", i, conn.RemoteAddr(), err)\n\t\t\t\t\tlog.Println(\"Client hung up. Closing connection.\")\n\t\t\t\t\tconn.Close()\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\tlog.Printf(\"Channel closed (%v)\", conn.RemoteAddr())\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t}\n}", "func NewConsumer(addrs, zookeepers []string, group, topic string, config *Config) (*Consumer, error) {\n\tif config == nil {\n\t\tconfig = new(Config)\n\t}\n\n\tclient, err := sarama.NewClient(addrs, config.Config)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tc, err := NewConsumerFromClient(client, zookeepers, group, topic, config)\n\tif err != nil {\n\t\tclient.Close()\n\t\treturn nil, err\n\t}\n\tc.ownClient = true\n\treturn c, nil\n}", "func WithGroupIDConsumerOption(groupID string) ConsumerOption {\n\treturn func(c *Consumer) {\n\t\tc.config.GroupID = groupID\n\t}\n}", "func (s *scanner) setReader(r io.Reader) *scanner {\n\ts.r = r\n\treturn s\n}", "func FromReader(r io.Reader, f Unmarshaler) Option {\n\treturn func(c Config, m ...OptionMeta) error {\n\t\tbuf, err := ioutil.ReadAll(r)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\treturn f(buf, c)\n\t}\n}", "func WithReadHandler(handler ReadHandler) Opt {\n\treturn func(options *Options) {\n\t\toptions.HandleRead = handler\n\t}\n}", "func NewConsumer(ctx context.Context) (*Consumer, error) {\n\t// TODO support filter in downstream sink\n\ttz, err := util.GetTimezone(timezone)\n\tif err != nil {\n\t\treturn nil, errors.Annotate(err, \"can not load timezone\")\n\t}\n\tctx = util.PutTimezoneInCtx(ctx, tz)\n\tfilter, err := cdcfilter.NewFilter(config.GetDefaultReplicaConfig())\n\tif err != nil {\n\t\treturn nil, errors.Trace(err)\n\t}\n\tc := new(Consumer)\n\tc.fakeTableIDGenerator = &fakeTableIDGenerator{\n\t\ttableIDs: make(map[string]int64),\n\t}\n\tc.sinks = make([]*struct {\n\t\tsink.Sink\n\t\tresolvedTs uint64\n\t}, kafkaPartitionNum)\n\tctx, cancel := context.WithCancel(ctx)\n\terrCh := make(chan error, 1)\n\topts := map[string]string{}\n\tfor i := 0; i < int(kafkaPartitionNum); i++ {\n\t\ts, err := sink.NewSink(ctx, \"kafka-consumer\", downstreamURIStr, filter, config.GetDefaultReplicaConfig(), opts, errCh)\n\t\tif err != nil {\n\t\t\tcancel()\n\t\t\treturn nil, errors.Trace(err)\n\t\t}\n\t\tc.sinks[i] = &struct {\n\t\t\tsink.Sink\n\t\t\tresolvedTs uint64\n\t\t}{Sink: s}\n\t}\n\tsink, err := sink.NewSink(ctx, \"kafka-consumer\", downstreamURIStr, filter, config.GetDefaultReplicaConfig(), opts, errCh)\n\tif err != nil {\n\t\tcancel()\n\t\treturn nil, errors.Trace(err)\n\t}\n\tgo func() {\n\t\terr := <-errCh\n\t\tif errors.Cause(err) != context.Canceled {\n\t\t\tlog.Error(\"error on running consumer\", zap.Error(err))\n\t\t} else {\n\t\t\tlog.Info(\"consumer exited\")\n\t\t}\n\t\tcancel()\n\t}()\n\tc.ddlSink = sink\n\tc.ready = make(chan bool)\n\treturn c, nil\n}", "func NewReader(r io.Reader) (Reader, error) {\n\treturn NewReaderOpt(r, nil)\n}", "func ProbeReader(ctx context.Context, reader io.Reader, extraFFProbeOptions ...string) (data *ProbeData, err error) {\n\targs := append([]string{\n\t\t\"-loglevel\", \"fatal\",\n\t\t\"-print_format\", \"json\",\n\t\t\"-show_format\",\n\t\t\"-show_streams\",\n\t}, extraFFProbeOptions...)\n\n\t// Add the file from stdin argument\n\targs = append(args, \"-\")\n\n\tcmd := exec.CommandContext(ctx, binPath, args...)\n\tcmd.Stdin = reader\n\tcmd.SysProcAttr = procAttributes()\n\n\treturn runProbe(cmd)\n}", "func WithCounter(counter Counter) Option {\n\treturn func(c *Consumer) error {\n\t\tc.counter = counter\n\t\treturn nil\n\t}\n}", "func NewReader(config string, dataset interface{}) (reader *Reader, e error) {\n\treader = &Reader{\n\t\tInput: \"\",\n\t\tSkip: 0,\n\t\tTrimSpace: true,\n\t\tRejected: DefaultRejected,\n\t\tInputMetadata: nil,\n\t\tMaxRows: DefaultMaxRows,\n\t\tDatasetMode: DefDatasetMode,\n\t\tdataset: dataset,\n\t\tfRead: nil,\n\t\tfReject: nil,\n\t\tbufRead: nil,\n\t\tbufReject: nil,\n\t}\n\n\te = reader.Init(config, dataset)\n\tif e != nil {\n\t\treturn nil, e\n\t}\n\n\treturn\n}", "func NewReader(r io.Reader) *Reader {\n\treturn NewReaderConfig(r, ReaderConfig{})\n\n}", "func NewConsumerClient(brokers, group, topics string, oldest, verbose bool) *consumerClient {\n\tc := &consumerClient{\n\t\tbrokers: brokers,\n\t\tgroup: group,\n\t\ttopics: topics,\n\t\toldest: oldest,\n\t\tverbose: verbose,\n\t\tversion: \"0.10.2.0\", //连云端ckafka版本必须是这个,没事别乱改\n\t}\n\treturn c\n}", "func NewWithReader(r io.Reader) (*Session, error) {\n\tv := viper.New()\n\tv.SetConfigType(\"yaml\")\n\tif err := v.ReadConfig(r); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn NewWithViper(v), nil\n}", "func NewReader(log log.Logger, cfg config.SQSReader, maxTimeNoRead *time.Duration, processor queue.MessageProcessor) (*Reader, error) {\n\tdelta := cfg.VisibilityTimeout - cfg.ProcessQuantum\n\tif delta < MaxQuantumDelta {\n\t\terr := errors.New(\"difference between visibility timeout and quantum is too short\")\n\t\treturn nil, err\n\t}\n\tvar consumer *Reader\n\tsess, err := session.NewSession()\n\tif err != nil {\n\t\terr = fmt.Errorf(\"error creating AWSSSession, %w\", err)\n\t\treturn consumer, err\n\t}\n\tarn, err := arn.Parse(cfg.ARN)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"error parsing SQS queue ARN: %v\", err)\n\t}\n\n\tawsCfg := aws.NewConfig()\n\tif arn.Region != \"\" {\n\t\tawsCfg = awsCfg.WithRegion(arn.Region)\n\t}\n\tif cfg.Endpoint != \"\" {\n\t\tawsCfg = awsCfg.WithEndpoint(cfg.Endpoint)\n\t}\n\n\tparams := &sqs.GetQueueUrlInput{\n\t\tQueueName: aws.String(arn.Resource),\n\t}\n\tif arn.AccountID != \"\" {\n\t\tparams.SetQueueOwnerAWSAccountId(arn.AccountID)\n\t}\n\n\tsrv := sqs.New(sess, awsCfg)\n\tresp, err := srv.GetQueueUrl(params)\n\tif err != nil {\n\t\treturn consumer, fmt.Errorf(\"error retrieving SQS queue URL: %v\", err)\n\t}\n\n\treceiveParams := sqs.ReceiveMessageInput{\n\t\tQueueUrl: aws.String(*resp.QueueUrl),\n\t\tMaxNumberOfMessages: aws.Int64(1),\n\t\tWaitTimeSeconds: aws.Int64(0),\n\t\tVisibilityTimeout: aws.Int64(int64(cfg.VisibilityTimeout)),\n\t\tAttributeNames: []*string{aws.String(\"ApproximateReceiveCount\")},\n\t}\n\treturn &Reader{\n\t\tRWMutex: &sync.RWMutex{},\n\t\tProcessor: processor,\n\t\tvisibilityTimeout: cfg.VisibilityTimeout,\n\t\tprocessMessageQuantum: cfg.ProcessQuantum,\n\t\tpoolingInterval: cfg.PollingInterval,\n\t\tlog: log,\n\t\twg: &sync.WaitGroup{},\n\t\treceiveParams: receiveParams,\n\t\tsqs: srv,\n\t\tmaxTimeNoRead: maxTimeNoRead,\n\t\tlastMessageReceived: nil,\n\t\tnProcessingMessages: 0,\n\t}, nil\n}", "func defaultConsumerOptions() *consumerOptions {\n\treturn &consumerOptions{\n\t\tqueueDepth: 10000,\n\t\tconcurrency: 10,\n\t\tStats: &NilConsumerStatsCollector{},\n\t}\n}", "func (c *Client) WithReadOptions(ro ...ReadOption) *Client {\n\tfor _, r := range ro {\n\t\tr.apply(c.readSettings)\n\t}\n\treturn c\n}", "func (serv *Server) pollReader() {\n\tvar (\n\t\tlogp = `pollReader`\n\n\t\tlistConn []int\n\t\terr error\n\t\tnumReader int32\n\t\tconn int\n\t)\n\n\tfor {\n\t\tlistConn, err = serv.poll.WaitRead()\n\t\tif err != nil {\n\t\t\tlog.Printf(`%s: %s`, logp, err)\n\t\t\tbreak\n\t\t}\n\n\t\tfor _, conn = range listConn {\n\t\t\tselect {\n\t\t\tcase serv.qreader <- conn:\n\t\t\tdefault:\n\t\t\t\tnumReader = serv.numGoReader.Load()\n\t\t\t\tif numReader < serv.Options.maxGoroutineReader {\n\t\t\t\t\tgo serv.reader()\n\t\t\t\t\tserv.numGoReader.Add(1)\n\t\t\t\t\tserv.qreader <- conn\n\t\t\t\t} else {\n\t\t\t\t\tgo serv.delayReader(conn)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n}", "func NewReader(logger *zap.Logger, r io.Reader) *Reader {\n\treturn &Reader{logger.Named(\"r\"), r, nil}\n}", "func (ci *ConnectionInfo) ReaderConfig() ReaderConfig {\n\treturn ci.serveconn.reader\n}", "func ConsumerOverrideReplicas(r int) ConsumerOption {\n\treturn func(o *api.ConsumerConfig) error {\n\t\to.Replicas = r\n\t\treturn nil\n\t}\n}", "func NewConsumer(cfg *ConsumerConfig, handler MessageHanlder) (*Consumer, error) {\n\tclusterConfig := cluster.NewConfig()\n\tclusterConfig.Metadata.RefreshFrequency = 1 * time.Minute\n\tclusterConfig.Group.Mode = cluster.ConsumerModePartitions\n\tclusterConfig.Group.Return.Notifications = true\n\tclusterConfig.Consumer.Offsets.Initial = sarama.OffsetNewest\n\tclusterConfig.Consumer.Return.Errors = true\n\tclientName := generateClientID(cfg.GroupID)\n\tclusterConfig.ClientID = clientName\n\n\tc, err := cluster.NewConsumer(cfg.Brokers, cfg.GroupID, cfg.Topic, clusterConfig)\n\tif err != nil {\n\t\tlog.Printf(\"Kafka Consumer: [%s] init fail, %v\", clientName, err)\n\t\treturn nil, err\n\t}\n\n\tvalidConfigValue(cfg)\n\tconsumer := &Consumer{\n\t\tclientName: clientName,\n\t\tcfg: cfg,\n\t\tconsumer: c,\n\t\tmsgHanlder: handler,\n\t\tpartitionWorkers: make([]*partitionConsumerWorker, 0),\n\t}\n\tlog.Printf(\"Kafka Consumer: [%s] init success\", clientName)\n\n\treturn consumer, nil\n}", "func NewReader(reader io.Reader) (*Reader, error) {\n\treturn NewReaderMode(reader, DefaultMode)\n}", "func WithReadConfig(c Config) Option {\n\treturn func(ro *repoOptions) {\n\t\tro.readConfigs = append(ro.readConfigs, c)\n\t}\n}", "func (c *DChSectionReader) ProvideSectionReader(dat *io.SectionReader) {\n\t<-c.req\n\tc.dat <- dat\n}", "func WithCheckpoint(checkpoint Checkpoint) Option {\n\treturn func(c *Consumer) error {\n\t\tc.checkpoint = checkpoint\n\t\treturn nil\n\t}\n}", "func New(configReader *reader.Config) *cobra.Command {\n\n\treturn &cobra.Command{\n\t\tUse: \"reader\",\n\t\tShort: \"Start reader mode\",\n\t\tLong: \"\",\n\t\tRun: func(cmd *cobra.Command, args []string) {\n\n\t\t\tin := configReader.InputStream\n\t\t\tout := configReader.OutputStream\n\n\t\t\tif err := reader.Run(in, out); err != nil {\n\t\t\t\tpanic(err)\n\t\t\t}\n\t\t},\n\t}\n}", "func OverwriteConsumer(overwrite runtime.Consumer) Option {\n\treturn OverwriteConsumerForStatus(overwrite, ForAllStatusCodes)\n}", "func (c *KafkaCommitter) SetReadLevel(l CommitterLevel) {\n\tc.readLevel = l\n}", "func WithRunner(r Runner) Option {\n\treturn func(c *Client) {\n\t\tc.runner = r\n\t}\n}", "func newKafkaConsumer() sarama.Consumer {\n\n\tkafkaBroker := os.Getenv(\"KAFKA_BROKER\")\n\n\tif runtime.GOOS == \"darwin\" {\n\t\tbrokers = []string{\"localhost:9092\"}\n\t} else {\n\t\tif kafkaBroker == \"\" {\n\t\t\tfmt.Printf(\"$KAFKA_BROKER must be set\")\n\t\t\tos.Exit(-1)\n\t\t}\n\t\tbrokers = []string{kafkaBroker}\n\t}\n\n\tconsumer, err := sarama.NewConsumer(brokers, newKafkaConfiguration())\n\n\tfmt.Print(\"Creating new Kafka Consumer \\n\")\n\n\tif err != nil {\n\t\tfmt.Printf(\"Kafka error: %s\\n\", err)\n\t\tos.Exit(-1)\n\t}\n\n\treturn consumer\n}", "func RetrierOption(retrier *retry.Retrier) ClientOption {\n\treturn func(client *Client) {\n\t\tclient.Retrier = retrier\n\t}\n}", "func OverwriteConsumerForStatus(overwrite runtime.Consumer, forStatusCode int) Option {\n\treturn func(rt *runtime.ClientOperation) {\n\t\trt.Reader = &overwriteConsumerReader{\n\t\t\trequestReader: rt.Reader,\n\t\t\tconsumer: overwrite,\n\t\t\tforStatusCode: forStatusCode,\n\t\t}\n\t}\n}", "func NewConsumerFromClient(client sarama.Client, zookeepers []string, group, topic string, config *Config) (*Consumer, error) {\n\tif config == nil {\n\t\tconfig = new(Config)\n\t}\n\tconfig.normalize()\n\n\t// Validate configuration\n\tif err := config.Validate(); err != nil {\n\t\treturn nil, err\n\t} else if topic == \"\" {\n\t\treturn nil, sarama.ConfigurationError(\"Empty topic\")\n\t} else if group == \"\" {\n\t\treturn nil, sarama.ConfigurationError(\"Empty group\")\n\t}\n\n\t// Generate unique consumer ID\n\tid := config.customID\n\tif id == \"\" {\n\t\tprefix := config.IDPrefix\n\t\tif prefix == \"\" {\n\t\t\tprefix = group\n\t\t}\n\t\tid = newGUID(prefix)\n\t}\n\n\t// Create sarama consumer instance\n\tscsmr, err := sarama.NewConsumerFromClient(client)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// Connect to zookeeper\n\tzoo, err := NewZK(zookeepers, config.ZKSessionTimeout)\n\tif err != nil {\n\t\tscsmr.Close()\n\t\treturn nil, err\n\t}\n\n\t// Initialize consumer\n\tconsumer := &Consumer{\n\t\tid: id,\n\t\tgroup: group,\n\t\ttopic: topic,\n\n\t\tzoo: zoo,\n\t\tconfig: config,\n\t\tclient: client,\n\t\tconsumer: scsmr,\n\n\t\tread: make(map[int32]int64),\n\t\tacked: make(map[int32]int64),\n\t\tpartIDs: make([]int32, 0),\n\n\t\tmessages: make(chan *sarama.ConsumerMessage),\n\t\terrors: make(chan *sarama.ConsumerError),\n\t}\n\n\t// Register consumer group and consumer itself\n\tif err := consumer.register(); err != nil {\n\t\tconsumer.closeAll()\n\t\treturn nil, err\n\t}\n\n\tconsumer.closer.Go(consumer.signalLoop)\n\tif config.CommitEvery > 0 {\n\t\tconsumer.closer.Go(consumer.commitLoop)\n\t}\n\treturn consumer, nil\n}", "func openReader(\n\tctx context.Context,\n\tdb *sql.DB,\n\tstoreID uint64,\n\taddr gospel.Address,\n\tlimit *rate.Limiter,\n\tlogger twelf.Logger,\n\topts *options.ReaderOptions,\n) (*Reader, error) {\n\t// Note that runCtx is NOT derived from ctx, which is only used for the\n\t// opening of the reader itself.\n\trunCtx, cancel := context.WithCancel(context.Background())\n\n\taccetableLatency := getAcceptableLatency(opts)\n\n\tr := &Reader{\n\t\tlogger: logger,\n\t\tfacts: make(chan gospel.Fact, getReadBufferSize(opts)),\n\t\tend: make(chan struct{}),\n\t\tdone: make(chan error, 1),\n\t\tctx: runCtx,\n\t\tcancel: cancel,\n\t\taddr: addr,\n\t\tglobalLimit: limit,\n\t\tadaptiveLimit: rate.NewLimiter(rate.Every(accetableLatency), 1),\n\t\tacceptableLatency: accetableLatency,\n\t\tstarvationLatency: getStarvationLatency(opts),\n\t\taverageLatency: ewma.NewMovingAverage(averageLatencyAge),\n\t}\n\n\tif logger.IsDebug() {\n\t\tr.debug = &readerDebug{\n\t\t\topts: opts,\n\t\t\taveragePollRate: metrics.NewRateCounter(),\n\t\t\taverageFactRate: metrics.NewRateCounter(),\n\t\t}\n\t}\n\n\tif err := r.prepareStatement(ctx, db, storeID, opts); err != nil {\n\t\treturn nil, err\n\t}\n\n\tr.logInitialization()\n\n\tgo r.run()\n\n\treturn r, nil\n}", "func WrapConsumerForStatusCode(wrapper WrapperConsumer, forStatusCode int) Option {\n\treturn func(rt *runtime.ClientOperation) {\n\t\trt.Reader = &wrapConsumerReader{\n\t\t\trequestReader: rt.Reader,\n\t\t\twrapper: wrapper,\n\t\t\tforStatusCode: forStatusCode,\n\t\t}\n\t}\n}", "func WithConsumerGroupID(groupID string) ConfigOpt {\n\treturn func(c *kafkalib.ConfigMap) {\n\t\t_ = c.SetKey(\"group.id\", groupID)\n\t}\n}", "func InitSetKafkaConfig(config KafkaConfig) model.Option {\n\treturn model.FuncOption(func(d *model.Dispatcher) { d.KafkaConfig = core.KafkaConfig(config) })\n}", "func (c *Connection) ConsumerWithConfig(done chan bool, config *Config, callback func(msgs <-chan amqp.Delivery)) error {\n\tmsgs, err := c.Channel.Consume(\n\t\tconfig.Queue,\n\t\tconfig.ConsumerTag,\n\t\tconfig.Options.Consume.AutoAck,\n\t\tconfig.Options.Consume.Exclusive,\n\t\tconfig.Options.Consume.NoLocal,\n\t\tconfig.Options.Consume.NoWait,\n\t\tconfig.Options.Consume.Args,\n\t)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tgo callback(msgs)\n\n\tlog.Println(\"Waiting for messages...\")\n\n\tfor {\n\t\tselect {\n\t\tcase <-done:\n\t\t\tc.Channel.Close()\n\t\t\tc.Conn.Close()\n\n\t\t\treturn nil\n\t\t}\n\t}\n}", "func NewReader(r io.Reader) *Reader {\n\treturn &Reader{\n\t\tr: bufio.NewReader(r),\n\t\tcurrentSection: \"default\",\n\t}\n}", "func (d *KrakenStorageDriver) Reader(ctx context.Context, path string, offset int64) (io.ReadCloser, error) {\n\tlog.Debugf(\"(*KrakenStorageDriver).Reader %s\", path)\n\tpathType, pathSubType, err := ParsePath(path)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar reader io.ReadCloser\n\tswitch pathType {\n\tcase _uploads:\n\t\treader, err = d.uploads.reader(path, pathSubType, offset)\n\tcase _blobs:\n\t\treader, err = d.blobs.reader(ctx, path, offset)\n\tdefault:\n\t\treturn nil, InvalidRequestError{path}\n\t}\n\tif err != nil {\n\t\treturn nil, toDriverError(err, path)\n\t}\n\treturn reader, nil\n}", "func initClientReader(player *models.Player, host string) {\n\taddr := net.JoinHostPort(host, sendPort)\n\tlog.Printf(\"[Reader] Client is making a connection to %s\", addr)\n\n\tconn, err := net.Dial(\"tcp\", addr)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\tlog.Printf(\"[Reader] Client is connected\")\n\n\tgo addNewPlayer(player.Scene, conn)\n}", "func WithEndpointConsumerOption(end endpoint.Endpoint) ConsumerOption {\n\treturn func(c *Consumer) { c.end = end }\n}", "func (o *ApplyTKGConfigForDockerReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {\n\tswitch response.Code() {\n\tcase 200:\n\t\tresult := NewApplyTKGConfigForDockerOK()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn result, nil\n\tcase 400:\n\t\tresult := NewApplyTKGConfigForDockerBadRequest()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn nil, result\n\tcase 500:\n\t\tresult := NewApplyTKGConfigForDockerInternalServerError()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn nil, result\n\n\tdefault:\n\t\treturn nil, runtime.NewAPIError(\"unknown error\", response, response.Code())\n\t}\n}", "func NewConsumer(stream string, sess *session.Session, opts ...Option) (*Consumer, error) {\n\tif stream == \"\" {\n\t\treturn nil, fmt.Errorf(\"must provide stream name\")\n\t}\n\n\tc := &Consumer{\n\t\tstreamName: stream,\n\t\tcheckpoint: &noopCheckpoint{},\n\t\tcounter: &noopCounter{},\n\t\tlogger: log.New(os.Stderr, \"kinesis-consumer: \", log.LstdFlags),\n\t}\n\n\t// set options\n\tfor _, opt := range opts {\n\t\tif err := opt(c); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\t// provide a default kinesis client\n\tif c.client == nil {\n\t\tc.client = kinesis.New(sess)\n\t}\n\n\treturn c, nil\n}", "func New(c *kafka.ConfigMap, topic string, clientID string) (*Consumer, error) {\n\tif err := c.SetKey(\"group.id\", clientID); err != nil {\n\t\treturn nil, err\n\t}\n\tconsumer, err := kafka.NewConsumer(c)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn &Consumer{topic, consumer}, nil\n}", "func WithLogger(logger *log.Logger) Option {\n\treturn func(c *Consumer) error {\n\t\tc.logger = logger\n\t\treturn nil\n\t}\n}", "func TestNewConsumer(tb testing.TB, defaults bool, options ...Option) Consumer {\n\tc, err := NewConsumer()\n\trequire.NoError(tb, err)\n\n\tif !defaults {\n\t\tc.Inmem = inmemconfig.Consumer{Store: nil}\n\t\tc.Kafka = kafkaconfig.Consumer{}\n\t\tc.Pubsub = pubsubconfig.Consumer{}\n\t\tc.Standardstream = standardstreamconfig.Consumer{}\n\t\tc.Logger = nil\n\t\tc.HandleInterrupt = false\n\t\tc.HandleErrors = false\n\t\tc.Name = \"\"\n\t\tc.AllowEnvironmentBasedConfiguration = false\n\t}\n\n\tfor _, option := range options {\n\t\toption.apply(&c, nil)\n\t}\n\n\terr = envconfig.Process(c.Name, &c)\n\trequire.NoError(tb, err)\n\n\treturn c\n}", "func WithReadBuffer(buffSize int) RangeOption {\n\treturn func(o rangeOptions) rangeOptions {\n\t\to.buffSize = buffSize\n\t\treturn o\n\t}\n}", "func ClientOptReadLimit(limit uint32) ClientOpt {\n\treturn func(t Client) error {\n\t\tif t == nil {\n\t\t\treturn errors.New(errors.KsiInvalidArgumentError).AppendMessage(\"Missing network client base object.\")\n\t\t}\n\n\t\tc, ok := t.(ReadLimiter)\n\t\tif !ok {\n\t\t\treturn errors.New(errors.KsiNotImplemented).AppendMessage(\n\t\t\t\tfmt.Sprintf(\"Newtwork client %s does not implement ReadLimiter interface.\", reflect.TypeOf(t)))\n\t\t}\n\t\tif err := c.SetReadLimit(limit); err != nil {\n\t\t\treturn errors.KsiErr(err).AppendMessage(\"Unable to set read limit.\")\n\t\t}\n\n\t\treturn nil\n\t}\n}", "func WithBeforeFuncsConsumerOption(fns ...BeforeFunc) ConsumerOption {\n\treturn func(c *Consumer) { c.befores = append(c.befores, fns...) }\n}", "func (lm *SimpleManager) NewReader(r io.Reader) *Reader {\n\tlr := NewReader(r)\n\tlm.Manage(lr)\n\treturn lr\n}", "func NewReader(dev *Device, readerName string) *Reader {\n\tr := &Reader{\n\t\tdev: dev,\n\t\treaderName: readerName,\n\t}\n\treturn r\n}", "func NewReader(store EntryStore) Reader {\n\treturn defaultReader{\n\t\tstore: store,\n\t}\n}", "func NewLimiterReaderWithConfig(rd io.Reader, rate infounit.BitRate, conf *LimiterConfig) (*LimiterReader, error) {\n\tif conf == nil {\n\t\tconf = DefaultLimiterConfig\n\t}\n\tr := &LimiterReader{\n\t\trd: rd,\n\t\trate: rate,\n\t\tresolution: conf.Resolution,\n\t\tmaxWait: conf.MaxWait,\n\t\tclosedChan: make(chan struct{}),\n\t}\n\tlim, err := newLimiter(r.rate, r.resolution, r.maxWait)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tr.lim = lim\n\treturn r, nil\n}", "func NewConsumer() (*cluster.Consumer, error) {\n\n\tconfig := cluster.NewConfig()\n\tconfig.Consumer.Return.Errors = true\n\tconfig.Group.Return.Notifications = true\n\tconfig.Config.Net.TLS.Enable = true\n\tconfig.Config.Net.SASL.Enable = true\n\tconfig.Config.Net.SASL.User = viper.GetString(\"kafka.user\")\n\tconfig.Config.Net.SASL.Password = viper.GetString(\"kafka.password\")\n\tconfig.ClientID = \"poke.ssl-checker\"\n\tconfig.Consumer.Offsets.Initial = sarama.OffsetOldest\n\tconfig.Consumer.Offsets.CommitInterval = 10 * time.Second\n\n\tconsumerGroup := config.Config.Net.SASL.User + \".\" + viper.GetString(\"host\")\n\tbrokers := viper.GetStringSlice(\"kafka.brokers\")\n\ttopics := viper.GetStringSlice(\"kafka.topics\")\n\n\tconsumer, err := cluster.NewConsumer(brokers, consumerGroup, topics, config)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn consumer, nil\n}", "func Reader(r io.Reader) pod.Reader {\n\treturn &reader{reader: r}\n}", "func NewReader() (*Reader, error) {\n\treturn NewReaderWithOptions(DefaultReaderOptions)\n}", "func WithReadTimeout(timeout time.Duration) OptsFunc {\n\treturn func(o *redis.Options) {\n\t\to.ReadTimeout = timeout\n\t}\n}", "func NewLimiterReader(rd io.Reader, rate infounit.BitRate) (*LimiterReader, error) {\n\treturn NewLimiterReaderWithConfig(rd, rate, nil)\n}", "func NewReader(r io.Reader) *Reader {\n\treturn &Reader{\n\t\treader: r,\n\t\tDefault: DefaultSectionName,\n\t\tStrict: DefaultStrictMode,\n\t}\n}", "func (o *GetLeaderboardConfigurationPublicV3Reader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {\n\tswitch response.Code() {\n\tcase 200:\n\t\tresult := NewGetLeaderboardConfigurationPublicV3OK()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn result, nil\n\tcase 400:\n\t\tresult := NewGetLeaderboardConfigurationPublicV3BadRequest()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn result, nil\n\tcase 401:\n\t\tresult := NewGetLeaderboardConfigurationPublicV3Unauthorized()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn result, nil\n\tcase 403:\n\t\tresult := NewGetLeaderboardConfigurationPublicV3Forbidden()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn result, nil\n\tcase 404:\n\t\tresult := NewGetLeaderboardConfigurationPublicV3NotFound()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn result, nil\n\tcase 500:\n\t\tresult := NewGetLeaderboardConfigurationPublicV3InternalServerError()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn result, nil\n\n\tdefault:\n\t\tdata, err := ioutil.ReadAll(response.Body())\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\treturn nil, fmt.Errorf(\"Requested GET /leaderboard/v3/public/namespaces/{namespace}/leaderboards/{leaderboardCode} returns an error %d: %s\", response.Code(), string(data))\n\t}\n}", "func WithKafkaMode(broker []string, topic string) *kafkaW {\n\treturn &kafkaW{\n\t\tbrokers: broker,\n\t\ttopic: topic,\n\t}\n}", "func Consumer(config *kafka.ConfigMap, topic string, partition int32, group string) *kafka.Consumer {\n\n\t// return cached consumer, if any\n\tif consumer, ok := consumers[group]; ok {\n\t\tlog.Printf(\"retrieved consumer %s\", group)\n\t\treturn consumer\n\t}\n\n\t// not found in cache,\n\t// create a consumer and return it\n\tconfig.SetKey(\"group.id\", group)\n\tlog.Printf(\"config %v\", config)\n\tconsumer, err := kafka.NewConsumer(config)\n\tif err != nil {\n\t\tlog.Println(err)\n\t\treturn nil\n\t}\n\n\t// assign to a specific topic and partition\n\tassignment := []kafka.TopicPartition{{\n\t\tTopic: &topic,\n\t\tPartition: partition}}\n\tconsumer.Assign(assignment)\n\tconsumers[group] = consumer\n\n\t// cache values and subscribe\n\tlog.Printf(\"created consumer %s for %s:%d \", group, topic, partition)\n\treturn consumer\n}", "func KafkaCommonCobraInit(cmd *cobra.Command, kconf *KafkaCommonConf) {\n\tdefBrokerList := strings.Split(os.Getenv(\"KAFKA_BROKERS\"), \",\")\n\tif len(defBrokerList) == 1 && defBrokerList[0] == \"\" {\n\t\tdefBrokerList = []string{}\n\t}\n\tdefTLSenabled, _ := strconv.ParseBool(os.Getenv(\"KAFKA_TLS_ENABLED\"))\n\tdefTLSinsecure, _ := strconv.ParseBool(os.Getenv(\"KAFKA_TLS_INSECURE\"))\n\tcmd.Flags().StringArrayVarP(&kconf.Brokers, \"brokers\", \"b\", defBrokerList, \"Comma-separated list of bootstrap brokers\")\n\tcmd.Flags().StringVarP(&kconf.ClientID, \"clientid\", \"i\", os.Getenv(\"KAFKA_CLIENT_ID\"), \"Client ID (or generated UUID)\")\n\tcmd.Flags().StringVarP(&kconf.ConsumerGroup, \"consumer-group\", \"g\", os.Getenv(\"KAFKA_CONSUMER_GROUP\"), \"Client ID (or generated UUID)\")\n\tcmd.Flags().StringVarP(&kconf.TopicIn, \"topic-in\", \"t\", os.Getenv(\"KAFKA_TOPIC_IN\"), \"Topic to listen to\")\n\tcmd.Flags().StringVarP(&kconf.TopicOut, \"topic-out\", \"T\", os.Getenv(\"KAFKA_TOPIC_OUT\"), \"Topic to send events to\")\n\tcmd.Flags().StringVarP(&kconf.TLS.ClientCertsFile, \"tls-clientcerts\", \"c\", os.Getenv(\"KAFKA_TLS_CLIENT_CERT\"), \"A client certificate file, for mutual TLS auth\")\n\tcmd.Flags().StringVarP(&kconf.TLS.ClientKeyFile, \"tls-clientkey\", \"k\", os.Getenv(\"KAFKA_TLS_CLIENT_KEY\"), \"A client private key file, for mutual TLS auth\")\n\tcmd.Flags().StringVarP(&kconf.TLS.CACertsFile, \"tls-cacerts\", \"C\", os.Getenv(\"KAFKA_TLS_CA_CERTS\"), \"CA certificates file (or host CAs will be used)\")\n\tcmd.Flags().BoolVarP(&kconf.TLS.Enabled, \"tls-enabled\", \"e\", defTLSenabled, \"Encrypt network connection with TLS (SSL)\")\n\tcmd.Flags().BoolVarP(&kconf.TLS.InsecureSkipVerify, \"tls-insecure\", \"z\", defTLSinsecure, \"Disable verification of TLS certificate chain\")\n\tcmd.Flags().StringVarP(&kconf.SASL.Username, \"sasl-username\", \"u\", os.Getenv(\"KAFKA_SASL_USERNAME\"), \"Username for SASL authentication\")\n\tcmd.Flags().StringVarP(&kconf.SASL.Password, \"sasl-password\", \"p\", os.Getenv(\"KAFKA_SASL_PASSWORD\"), \"Password for SASL authentication\")\n\treturn\n}", "func WithSomeName(handler ConsumerHandler) ConsumerOption {\n\treturn func(consumer Consumer) error {\n\t\treturn consumer.Consume(\"some-name\", handler)\n\t}\n}", "func GetReader(cl *http.Client, cmkarn arn.ARN) (Reader, error) {\n\tcfg, err := loadAWSConfig(cl, cmkarn)\n\tif err != nil {\n\t\treturn Reader{}, err\n\t}\n\treturn Reader{\n\t\tKMSsrv: kms.New(cfg),\n\t}, nil\n}", "func WithCommander(r commanders.Reader) func(*fiber.Ctx) error {\n\treturn func(ctx *fiber.Ctx) error {\n\t\tid, err := uuid.FromString(ctx.Params(\"commanderID\"))\n\t\tif err != nil {\n\t\t\treturn newErrBadRequest(\"Invalid CommanderID\")\n\t\t}\n\t\tcommander, err := r.FindOne(commanders.FindOneQuery{ID: id})\n\t\tif err != nil {\n\t\t\treturn handleFindOneError(err, \"Commander\")\n\t\t}\n\t\tctx.Locals(\"commander\", commander)\n\t\treturn ctx.Next()\n\t}\n}", "func newPrivateOrPartnerConsumer(provider *Provider, authURL string) *oauth.Consumer {\n\tprivateKeyFileContents, err := ioutil.ReadFile(privateKeyFilePath)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\tblock, _ := pem.Decode(privateKeyFileContents)\n\n\tprivateKey, err := x509.ParsePKCS1PrivateKey(block.Bytes)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tc := oauth.NewRSAConsumer(\n\t\tprovider.ClientKey,\n\t\tprivateKey,\n\t\toauth.ServiceProvider{\n\t\t\tRequestTokenUrl: requestURL,\n\t\t\tAuthorizeTokenUrl: authURL,\n\t\t\tAccessTokenUrl: tokenURL},\n\t)\n\n\tc.Debug(provider.debug)\n\n\taccepttype := []string{\"application/json\"}\n\tuseragent := []string{userAgentString}\n\tc.AdditionalHeaders = map[string][]string{\n\t\t\"Accept\": accepttype,\n\t\t\"User-Agent\": useragent,\n\t}\n\n\treturn c\n}", "func (o *AddConsumerReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {\n\tswitch response.Code() {\n\tcase 200:\n\t\tresult := NewAddConsumerOK()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn result, nil\n\n\tdefault:\n\t\treturn nil, runtime.NewAPIError(\"response status code does not match any response statuses defined for this endpoint in the swagger spec\", response, response.Code())\n\t}\n}" ]
[ "0.721365", "0.63954514", "0.612182", "0.60554236", "0.5910244", "0.58986294", "0.56332886", "0.5478815", "0.54349446", "0.5386139", "0.52661705", "0.52229625", "0.52031034", "0.5185453", "0.51728", "0.5157714", "0.51267815", "0.51030433", "0.5096483", "0.50594753", "0.50166637", "0.4938628", "0.49383843", "0.49267343", "0.4855722", "0.48462942", "0.4826329", "0.48240507", "0.48130253", "0.4773379", "0.47658545", "0.47609836", "0.47584864", "0.47580934", "0.47540534", "0.4749815", "0.47287604", "0.47188878", "0.47180042", "0.4712055", "0.4701531", "0.46792653", "0.46753034", "0.4658295", "0.46464288", "0.4645672", "0.46198848", "0.46131203", "0.46023285", "0.46008217", "0.45873532", "0.4585457", "0.45815432", "0.4578921", "0.45771083", "0.45422596", "0.45412734", "0.4534787", "0.45242256", "0.45241094", "0.45111534", "0.450626", "0.44990268", "0.44843996", "0.44802433", "0.4477714", "0.44733146", "0.44726878", "0.44646573", "0.44614238", "0.44525012", "0.44467732", "0.4441351", "0.44399065", "0.44379345", "0.44364372", "0.4433515", "0.44319534", "0.44240317", "0.44201252", "0.44142774", "0.44136044", "0.44079933", "0.43988508", "0.43963313", "0.43922392", "0.4383668", "0.43828988", "0.43745813", "0.43726406", "0.437251", "0.43724158", "0.4371812", "0.43698183", "0.43596938", "0.4355738", "0.43498936", "0.4349752", "0.43256295", "0.43231902" ]
0.84777284
0
WithOffsetConsumerOption lets you set the kafka offset to read from
func WithOffsetConsumerOption(offset int64) ConsumerOption { return func(c *Consumer) { switch offset { case LastOffset: c.config.StartOffset = LastOffset case FirstOffset: c.config.StartOffset = FirstOffset default: c.config.StartOffset = FirstOffset } } }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (_options *ListTopicsOptions) SetOffset(offset int64) *ListTopicsOptions {\n\t_options.Offset = core.Int64Ptr(offset)\n\treturn _options\n}", "func WithOffset(offset int) eventsource.QueryOption {\n\treturn func(i interface{}) {\n\t\tif o, ok := i.(*options); ok {\n\t\t\to.offset = &offset\n\t\t}\n\t}\n}", "func WithReaderConsumerOption(reader *kafgo.Reader) ConsumerOption {\n\treturn func(c *Consumer) { c.reader = reader }\n}", "func WithOffset(offset int64) AppendOption {\n\treturn func(pw *pendingWrite) {\n\t\tpw.request.Offset = &wrapperspb.Int64Value{\n\t\t\tValue: offset,\n\t\t}\n\t}\n}", "func WithTopicConsumerOption(topic string) ConsumerOption {\n\treturn func(c *Consumer) {\n\t\tc.config.Topic = topic\n\t}\n}", "func (c *offsetCoordinator) Offset(\n\ttopic string, partition int32) (\n\toffset int64, metadata string, resErr error) {\n\n\tretry := &backoff.Backoff{Min: c.conf.RetryErrWait, Jitter: true}\n\tfor try := 0; try < c.conf.RetryErrLimit; try++ {\n\t\tif try != 0 {\n\t\t\ttime.Sleep(retry.Duration())\n\t\t}\n\n\t\t// get a copy of our connection with the lock, this might establish a new\n\t\t// connection so can take a bit\n\t\tconn, err := c.broker.coordinatorConnection(c.conf.ConsumerGroup)\n\t\tif conn == nil {\n\t\t\tresErr = err\n\t\t\tcontinue\n\t\t}\n\t\tdefer func(lconn *connection) { go c.broker.conns.Idle(lconn) }(conn)\n\n\t\tresp, err := conn.OffsetFetch(&proto.OffsetFetchReq{\n\t\t\tConsumerGroup: c.conf.ConsumerGroup,\n\t\t\tTopics: []proto.OffsetFetchReqTopic{\n\t\t\t\t{\n\t\t\t\t\tName: topic,\n\t\t\t\t\tPartitions: []int32{partition},\n\t\t\t\t},\n\t\t\t},\n\t\t})\n\t\tresErr = err\n\n\t\tswitch err {\n\t\tcase io.EOF, syscall.EPIPE:\n\t\t\tlog.Debugf(\"connection died while fetching offsets on %s:%d for %s: %s\",\n\t\t\t\ttopic, partition, c.conf.ConsumerGroup, err)\n\t\t\t_ = conn.Close()\n\n\t\tcase nil:\n\t\t\tfor _, t := range resp.Topics {\n\t\t\t\tfor _, p := range t.Partitions {\n\t\t\t\t\tif t.Name != topic || p.ID != partition {\n\t\t\t\t\t\tlog.Warningf(\"offset response with unexpected data for %s:%d\",\n\t\t\t\t\t\t\tt.Name, p.ID)\n\t\t\t\t\t\tcontinue\n\t\t\t\t\t}\n\n\t\t\t\t\tif p.Err != nil {\n\t\t\t\t\t\treturn 0, \"\", p.Err\n\t\t\t\t\t}\n\t\t\t\t\t// This is expected in and only in the case where the consumer group, topic\n\t\t\t\t\t// pair is brand new. However, it appears there may be race conditions\n\t\t\t\t\t// where Kafka returns -1 erroneously. Not sure how to handle this yet,\n\t\t\t\t\t// but adding debugging in the meantime.\n\t\t\t\t\tif p.Offset < 0 {\n\t\t\t\t\t\tlog.Errorf(\"negative offset response %d for %s:%d\",\n\t\t\t\t\t\t\tp.Offset, t.Name, p.ID)\n\t\t\t\t\t}\n\t\t\t\t\treturn p.Offset, p.Metadata, nil\n\t\t\t\t}\n\t\t\t}\n\t\t\treturn 0, \"\", errors.New(\"response does not contain offset information\")\n\t\t}\n\t}\n\n\treturn 0, \"\", resErr\n}", "func (kc *KClient) ConsumeOffsetMsg(topic string, partition int32, offset int64) (message *Message, err error) {\n\tconsumer, err := sarama.NewConsumerFromClient(kc.cl)\n\tif err != nil {\n\t\treturn\n\t}\n\tpartitionConsumer, err := consumer.ConsumePartition(topic, partition, offset)\n\tif err != nil {\n\t\treturn\n\t}\n\tmsg := <-partitionConsumer.Messages()\n\tmessage = convertMsg(msg)\n\terr = partitionConsumer.Close()\n\tif err != nil {\n\t\treturn\n\t}\n\terr = consumer.Close()\n\tif err != nil {\n\t\treturn\n\t}\n\treturn\n}", "func AutoOffsetNone() ConsumerOption {\n\treturn func(o *ConsumerOptions) error {\n\t\to.AutoOffset = autoOffsetNone\n\t\treturn nil\n\t}\n}", "func (c *Consumer) Offset(partitionID int32) (int64, error) {\n\treturn c.zoo.Offset(c.group, c.topic, partitionID)\n}", "func (_options *ListSubscriptionsOptions) SetOffset(offset int64) *ListSubscriptionsOptions {\n\t_options.Offset = core.Int64Ptr(offset)\n\treturn _options\n}", "func (cc *ConfluentConsumer) Seek(offset int64) error {\n\ttp := kafkalib.TopicPartition{Topic: &cc.conf.Topic, Offset: kafkalib.Offset(offset)}\n\tif cc.conf.Consumer.Partition != nil {\n\t\ttp.Partition = *cc.conf.Consumer.Partition\n\t}\n\n\terr := cc.c.Seek(tp, int(cc.conf.RequestTimeout.Milliseconds()))\n\tif err, ok := err.(kafkalib.Error); ok && err.Code() == kafkalib.ErrTimedOut {\n\t\treturn ErrSeekTimedOut\n\t}\n\n\treturn nil\n}", "func WithOffset(offset int) requestOptionalParam {\n\treturn func(params requestOptionalParams) {\n\t\tparams.Add(paramOffset, strconv.Itoa(offset))\n\t}\n}", "func (_options *ListTagsSubscriptionOptions) SetOffset(offset int64) *ListTagsSubscriptionOptions {\n\t_options.Offset = core.Int64Ptr(offset)\n\treturn _options\n}", "func (_options *ListConfigurationsOptions) SetOffset(offset int64) *ListConfigurationsOptions {\n\t_options.Offset = core.Int64Ptr(offset)\n\treturn _options\n}", "func WithOffset(v int64) (p Pair) {\n\treturn Pair{Key: \"offset\", Value: v}\n}", "func (_options *ListSecretsOptions) SetOffset(offset int64) *ListSecretsOptions {\n\t_options.Offset = core.Int64Ptr(offset)\n\treturn _options\n}", "func WithCheckpoint(checkpoint Checkpoint) Option {\n\treturn func(c *Consumer) error {\n\t\tc.checkpoint = checkpoint\n\t\treturn nil\n\t}\n}", "func NewConsumer(log logrus.FieldLogger, conf Config, opts ...ConfigOpt) (Consumer, error) {\n\t// See Reference at https://github.com/edenhill/librdkafka/blob/master/CONFIGURATION.md\n\tkafkaConf := conf.baseKafkaConfig()\n\t_ = kafkaConf.SetKey(\"enable.auto.offset.store\", false) // manually StoreOffset after processing a message. Otherwise races may happen.)\n\n\t// In case we try to assign an offset out of range (greater than log-end-offset), consumer will use start consuming from offset zero.\n\t_ = kafkaConf.SetKey(\"auto.offset.reset\", \"earliest\")\n\n\tconf.Consumer.Apply(kafkaConf)\n\tfor _, opt := range opts {\n\t\topt(kafkaConf)\n\t}\n\n\tif err := conf.configureAuth(kafkaConf); err != nil {\n\t\treturn nil, errors.Wrap(err, \"error configuring auth for the Kafka consumer\")\n\t}\n\n\tconsumer, err := kafkalib.NewConsumer(kafkaConf)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif conf.RequestTimeout == 0 {\n\t\tconf.RequestTimeout = DefaultTimeout\n\t}\n\n\tcc := &ConfluentConsumer{\n\t\tc: consumer,\n\t\tconf: conf,\n\t\tlog: log,\n\t}\n\n\tlogFields := logrus.Fields{\"kafka_topic\": cc.conf.Topic}\n\n\tif cc.conf.Consumer.Partition != nil || cc.conf.Consumer.PartitionKey != \"\" {\n\t\t// set the default partitioner algorithm\n\t\tif cc.conf.Consumer.PartitionerAlgorithm == \"\" {\n\t\t\tcc.conf.Consumer.PartitionerAlgorithm = PartitionerMurMur2\n\t\t}\n\t\t// Set the partition if a key is set to determine the partition\n\t\tif cc.conf.Consumer.PartitionKey != \"\" && cc.conf.Consumer.PartitionerAlgorithm != \"\" {\n\t\t\tcc.AssignPartitionByKey(cc.conf.Consumer.PartitionKey, cc.conf.Consumer.PartitionerAlgorithm)\n\t\t}\n\n\t\tlogFields[\"kafka_partition_key\"] = cc.conf.Consumer.PartitionKey\n\t\tlogFields[\"kafka_partition\"] = *cc.conf.Consumer.Partition\n\t}\n\n\tcc.setupRebalanceHandler()\n\tcc.log.WithFields(logFields).Debug(\"Subscribing to Kafka topic\")\n\tif serr := cc.c.Subscribe(cc.conf.Topic, cc.rebalanceHandler); serr != nil {\n\t\terr = errors.Wrap(serr, \"error subscribing to topic\")\n\t}\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn cc, nil\n}", "func (s *Select) SetOffset(n int) *Select {\n\ts.offset = n\n\treturn s\n}", "func WithOffset(offset int64) ListOption {\n\treturn func(args *listOptions) {\n\t\targs.offset = offset\n\t}\n}", "func (b *Broker) offset(topic string, partition int32, timems int64) (int64, error) {\n\treq := &proto.OffsetReq{\n\t\tClientID: b.conf.ClientID,\n\t\tReplicaID: -1, // any client\n\t\tTopics: []proto.OffsetReqTopic{\n\t\t\t{\n\t\t\t\tName: topic,\n\t\t\t\tPartitions: []proto.OffsetReqPartition{\n\t\t\t\t\t{\n\t\t\t\t\t\tID: partition,\n\t\t\t\t\t\tTimeMs: timems,\n\t\t\t\t\t\tMaxOffsets: 2,\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n\n\tvar resErr error\n\tretry := &backoff.Backoff{Min: b.conf.LeaderRetryWait, Jitter: true}\noffsetRetryLoop:\n\tfor try := 0; try < b.conf.LeaderRetryLimit; try++ {\n\t\tif try != 0 {\n\t\t\ttime.Sleep(retry.Duration())\n\t\t}\n\n\t\tconn, err := b.leaderConnection(topic, partition)\n\t\tif err != nil {\n\t\t\treturn 0, err\n\t\t}\n\t\tdefer func(lconn *connection) { go b.conns.Idle(lconn) }(conn)\n\n\t\tresp, err := conn.Offset(req)\n\t\tif err != nil {\n\t\t\tif _, ok := err.(*net.OpError); ok || err == io.EOF || err == syscall.EPIPE {\n\t\t\t\tlog.Debugf(\"connection died while sending message to %s:%d: %s\",\n\t\t\t\t\ttopic, partition, err)\n\t\t\t\t_ = conn.Close()\n\t\t\t\tresErr = err\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\treturn 0, err\n\t\t}\n\n\t\tfor _, t := range resp.Topics {\n\t\t\tfor _, p := range t.Partitions {\n\t\t\t\tif t.Name != topic || p.ID != partition {\n\t\t\t\t\tlog.Warningf(\"offset response with unexpected data for %s:%d\",\n\t\t\t\t\t\tt.Name, p.ID)\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\tresErr = p.Err\n\n\t\t\t\tswitch p.Err {\n\t\t\t\tcase proto.ErrLeaderNotAvailable, proto.ErrNotLeaderForPartition,\n\t\t\t\t\tproto.ErrBrokerNotAvailable, proto.ErrUnknownTopicOrPartition:\n\t\t\t\t\t// Failover happened, so we probably need to talk to a different broker. Let's\n\t\t\t\t\t// kick off a metadata refresh.\n\t\t\t\t\tlog.Warningf(\"cannot fetch offset: %s\", p.Err)\n\t\t\t\t\tif err := b.cluster.RefreshMetadata(); err != nil {\n\t\t\t\t\t\tlog.Warningf(\"cannot refresh metadata: %s\", err)\n\t\t\t\t\t}\n\t\t\t\t\tcontinue offsetRetryLoop\n\t\t\t\t}\n\n\t\t\t\t// Happens when there are no messages in the partition\n\t\t\t\tif len(p.Offsets) == 0 {\n\t\t\t\t\treturn 0, p.Err\n\t\t\t\t} else {\n\t\t\t\t\treturn p.Offsets[0], p.Err\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\n\tif resErr == nil {\n\t\treturn 0, errors.New(\"incomplete fetch response\")\n\t}\n\treturn 0, resErr\n}", "func (_options *ListSecretVersionLocksOptions) SetOffset(offset int64) *ListSecretVersionLocksOptions {\n\t_options.Offset = core.Int64Ptr(offset)\n\treturn _options\n}", "func (b *Kafka) Consume(ctx context.Context, topic string, offset int64, imm bool, max int64) ([]string, error) {\n\n\tb.lockForTopic(topic)\n\n\tdefer b.unlockForTopic(topic)\n\t// Fetch offsets\n\tnewOff, err := b.Client.GetOffset(topic, 0, sarama.OffsetNewest)\n\n\tif err != nil {\n\t\tlog.Error(err.Error())\n\t}\n\n\toldOff, err := b.Client.GetOffset(topic, 0, sarama.OffsetOldest)\n\tif err != nil {\n\t\tlog.Error(err.Error())\n\t}\n\n\tlog.Infof(\"Consuming topic: %v, min_offset: %v, max_offset: %v, current_offset: %v\", topic, oldOff, newOff, offset)\n\n\t// If tracked offset is equal or bigger than topic offset means no new messages\n\tif offset >= newOff {\n\t\treturn []string{}, nil\n\t}\n\n\t// If tracked offset is left behind increment it to topic's min. offset\n\tif offset < oldOff {\n\t\tlog.Infof(\"Tracked offset is off for topic: %v, broker_offset %v, tracked_offset: %v\", topic, offset, oldOff)\n\t\treturn []string{}, errors.New(\"offset is off\")\n\t}\n\n\tpartitionConsumer, err := b.Consumer.ConsumePartition(topic, 0, offset)\n\n\tif err != nil {\n\t\tlog.Errorf(\"Unable to consume topic %v, %v, min_offset: %v, max_offset: %v, current_offset: %v\", topic, err.Error(), newOff, oldOff, offset)\n\t\treturn []string{}, err\n\n\t}\n\n\tdefer func() {\n\t\tif err := partitionConsumer.Close(); err != nil {\n\t\t\tlog.Error(err)\n\t\t}\n\t}()\n\n\tmessages := make([]string, 0)\n\tvar consumed int64\n\ttimeout := time.After(300 * time.Second)\n\n\tif imm {\n\t\ttimeout = time.After(100 * time.Millisecond)\n\t}\n\nConsumerLoop:\n\tfor {\n\t\tselect {\n\t\t// If the http client cancels the http request break consume loop\n\t\tcase <-ctx.Done():\n\t\t\t{\n\t\t\t\tbreak ConsumerLoop\n\t\t\t}\n\t\tcase <-timeout:\n\t\t\t{\n\t\t\t\tbreak ConsumerLoop\n\t\t\t}\n\t\tcase msg := <-partitionConsumer.Messages():\n\n\t\t\tmessages = append(messages, string(msg.Value[:]))\n\n\t\t\tconsumed++\n\n\t\t\tlog.Infof(\"Consumed: %v, Max: %v, Latest Message: %v\", consumed, max, string(msg.Value[:]))\n\n\t\t\t// if we pass over the available messages and still want more\n\t\t\tif consumed >= max {\n\t\t\t\tbreak ConsumerLoop\n\t\t\t}\n\n\t\t\tif offset+consumed > newOff-1 {\n\t\t\t\t// if returnImmediately is set don't wait for more\n\t\t\t\tif imm {\n\t\t\t\t\tbreak ConsumerLoop\n\t\t\t\t}\n\n\t\t\t}\n\n\t\t}\n\t}\n\n\treturn messages, nil\n}", "func WithAutoCommitConsumerOption(flag bool) ConsumerOption {\n\treturn func(c *Consumer) { c.autocommit = flag }\n}", "func (o *ListScansParams) SetOffset(offset *int64) {\n\to.Offset = offset\n}", "func (_options *ListSecretLocksOptions) SetOffset(offset int64) *ListSecretLocksOptions {\n\t_options.Offset = core.Int64Ptr(offset)\n\treturn _options\n}", "func (o *GetSearchClinicsParams) SetOffset(offset *int64) {\n\to.Offset = offset\n}", "func (_options *ListSecretsLocksOptions) SetOffset(offset int64) *ListSecretsLocksOptions {\n\t_options.Offset = core.Int64Ptr(offset)\n\treturn _options\n}", "func (c *GroupClient) FetchOffset(topic string, partition int32) (int64, error) {\n\treq := OffsetFetch.NewRequest(c.GroupId, topic, partition)\n\tresp := &OffsetFetch.Response{}\n\tif err := c.request(req, resp); err != nil {\n\t\treturn -1, fmt.Errorf(\"error making fetch offsets call: %w\", err)\n\t}\n\treturn parseOffsetFetchResponse(resp)\n}", "func (options *ListActionsOptions) SetOffset(offset int64) *ListActionsOptions {\n\toptions.Offset = core.Int64Ptr(offset)\n\treturn options\n}", "func (o *SearchKeywordChunkedParams) SetOffset(offset *int64) {\n\to.Offset = offset\n}", "func (b *MessagesGetConversationsBuilder) Offset(v int) *MessagesGetConversationsBuilder {\n\tb.Params[\"offset\"] = v\n\treturn b\n}", "func (_options *ListSourcesOptions) SetOffset(offset int64) *ListSourcesOptions {\n\t_options.Offset = core.Int64Ptr(offset)\n\treturn _options\n}", "func (options *ListJobsOptions) SetOffset(offset int64) *ListJobsOptions {\n\toptions.Offset = core.Int64Ptr(offset)\n\treturn options\n}", "func AutoOffsetEarliest() ConsumerOption {\n\treturn func(o *ConsumerOptions) error {\n\t\to.AutoOffset = autoOffsetEarliest\n\t\treturn nil\n\t}\n}", "func (b *MessagesGetHistoryBuilder) Offset(v int) *MessagesGetHistoryBuilder {\n\tb.Params[\"offset\"] = v\n\treturn b\n}", "func AutoOffsetLatest() ConsumerOption {\n\treturn func(o *ConsumerOptions) error {\n\t\to.AutoOffset = autoOffsetLatest\n\t\treturn nil\n\t}\n}", "func (_options *ListDestinationsOptions) SetOffset(offset int64) *ListDestinationsOptions {\n\t_options.Offset = core.Int64Ptr(offset)\n\treturn _options\n}", "func (options *ListWorkspaceActivitiesOptions) SetOffset(offset int64) *ListWorkspaceActivitiesOptions {\n\toptions.Offset = core.Int64Ptr(offset)\n\treturn options\n}", "func (o *GetModerationRulesParams) SetOffset(offset *int64) {\n\to.Offset = offset\n}", "func Offset(n int) QueryOption {\n\treturn newFuncQueryOption(func(wrapper *QueryWrapper) {\n\t\twrapper.offset = n\n\t\twrapper.queryLen += 2\n\t})\n}", "func (b *MessagesSearchBuilder) Offset(v int) *MessagesSearchBuilder {\n\tb.Params[\"offset\"] = v\n\treturn b\n}", "func (o *GetContactsParams) SetOffset(offset *int64) {\n\to.Offset = offset\n}", "func (o *InvestmentsTransactionsGetRequestOptions) SetOffset(v int32) {\n\to.Offset = &v\n}", "func ExampleZSelectBuilder_Offset() {\n\tuser := q.T(\"user\")\n\tfmt.Println(\"int: \", q.Select().From(user).Limit(10).Offset(10))\n\tfmt.Println(\"q.Unsafe:\", q.Select().From(user).Limit(10).Offset(q.Unsafe(10, \"*\", 20)))\n\t// Output:\n\t// int: SELECT * FROM \"user\" LIMIT ? OFFSET ? [10 10]\n\t// q.Unsafe: SELECT * FROM \"user\" LIMIT ? OFFSET 10*20 [10]\n}", "func (o *GetContactsParams) SetOffset(offset *int32) {\n\to.Offset = offset\n}", "func (p ReadParams) WithOffset(offset int64) *ReadParams {\n\tp.Offset = offset\n\treturn &p\n}", "func (osq *OfflineSessionQuery) Offset(offset int) *OfflineSessionQuery {\n\tosq.ctx.Offset = &offset\n\treturn osq\n}", "func (in *ActionUserClusterResourceIndexInput) SetOffset(value int64) *ActionUserClusterResourceIndexInput {\n\tin.Offset = value\n\n\tif in._selectedParameters == nil {\n\t\tin._selectedParameters = make(map[string]interface{})\n\t}\n\n\tin._selectedParameters[\"Offset\"] = nil\n\treturn in\n}", "func TestDecodeConsumerPartitionOffsetV3(t *testing.T) {\n\tkey := bytes.NewBuffer([]byte(\"\\x00\\nmy-group-5\\x00\\x06orders\\x00\\x00\\x00\\x00\"))\n\tvalue := bytes.NewBuffer([]byte(\"\\x00\\x03\\x00\\x00\\x00\\x00\\x00\\x00@\\x96\\xff\\xff\\xff\\xff\\x00\\x00\\x00\\x00\\x01j\\xfb\\x8a\\xb6\\x16\"))\n\tlogger := log.WithFields(log.Fields{})\n\n\texpected := &ConsumerPartitionOffset{\n\t\tGroup: \"my-group-5\",\n\t\tTopic: \"orders\",\n\t\tPartition: 0,\n\t\tOffset: 16534,\n\t\tTimestamp: 1558998332950,\n\t}\n\n\toffset, err := newConsumerPartitionOffset(key, value, logger)\n\tassert.Nil(t, err, \"Expected newConsumerPartitionOffset to return no error\")\n\tassert.Equal(t, expected, offset, \"Decoded offset does not equal expected value\")\n}", "func (c *Client) ConsumerOffsets(ctx context.Context, tg TopicAndGroup) (map[int]int64, error) {\n\tmetadata, err := c.Metadata(ctx, &MetadataRequest{\n\t\tTopics: []string{tg.Topic},\n\t})\n\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to get topic metadata :%w\", err)\n\t}\n\n\ttopic := metadata.Topics[0]\n\tpartitions := make([]int, len(topic.Partitions))\n\n\tfor i := range topic.Partitions {\n\t\tpartitions[i] = topic.Partitions[i].ID\n\t}\n\n\toffsets, err := c.OffsetFetch(ctx, &OffsetFetchRequest{\n\t\tGroupID: tg.GroupId,\n\t\tTopics: map[string][]int{\n\t\t\ttg.Topic: partitions,\n\t\t},\n\t})\n\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to get offsets: %w\", err)\n\t}\n\n\ttopicOffsets := offsets.Topics[topic.Name]\n\tpartitionOffsets := make(map[int]int64, len(topicOffsets))\n\n\tfor _, off := range topicOffsets {\n\t\tpartitionOffsets[off.Partition] = off.CommittedOffset\n\t}\n\n\treturn partitionOffsets, nil\n}", "func (o *QueryFirewallFieldsParams) SetOffset(offset *string) {\n\to.Offset = offset\n}", "func (options *ListWorkspacesOptions) SetOffset(offset int64) *ListWorkspacesOptions {\n\toptions.Offset = core.Int64Ptr(offset)\n\treturn options\n}", "func (c *Coordinator) CommitOffset(topic string, partition int32, offset int64) error {\n\tb, err := c.client.Coordinator(c.cfg.GroupID)\n\tif err != nil {\n\t\treturn err\n\t}\n\t// OffsetCommitRequest retention time should be -1 to signify to use the broker default.\n\tvar rt int64 = -1\n\tif c.cfg.RetentionTime.Nanoseconds() != 0 {\n\t\trt = c.cfg.RetentionTime.Nanoseconds() / int64(time.Millisecond)\n\t}\n\treq := &sarama.OffsetCommitRequest{\n\t\tConsumerGroup: c.cfg.GroupID,\n\t\tConsumerGroupGeneration: c.gid,\n\t\tConsumerID: c.mid,\n\t\tRetentionTime: rt,\n\t\tVersion: offsetCommitRequestVersion,\n\t}\n\treq.AddBlock(topic, partition, offset, 0, \"\")\n\tresp, err := b.CommitOffset(req)\n\tif err != nil {\n\t\treturn err\n\t}\n\t// return first error we happen to iterate into (if any).\n\tfor _, topicErrs := range resp.Errors {\n\t\tfor _, partitionErr := range topicErrs {\n\t\t\tif partitionErr == sarama.ErrNoError {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\treturn partitionErr\n\t\t}\n\t}\n\treturn nil\n}", "func (o *QueryChangesParams) SetOffset(offset *int64) {\n\to.Offset = offset\n}", "func WithDecoderConsumerOption(fn Decoder) ConsumerOption {\n\treturn func(c *Consumer) { c.dec = fn }\n}", "func TestConsumerQueryWatermarkOffsets(t *testing.T) {\n\tif !testconfRead() {\n\t\tt.Skipf(\"Missing testconf.json\")\n\t}\n\n\t// getMessageCountInTopic() uses consumer QueryWatermarkOffsets() API to\n\t// get the number of messages in a topic\n\tmsgcnt, err := getMessageCountInTopic(testconf.Topic)\n\tif err != nil {\n\t\tt.Errorf(\"Cannot get message size. Error: %s\\n\", err)\n\t}\n\n\t// Prime topic with test messages\n\tcreateTestMessages()\n\tproducerTest(t, \"Priming producer\", p0TestMsgs, producerCtrl{silent: true},\n\t\tfunc(p *Producer, m *Message, drChan chan Event) {\n\t\t\tp.ProduceChannel() <- m\n\t\t})\n\n\t// getMessageCountInTopic() uses consumer QueryWatermarkOffsets() API to\n\t// get the number of messages in a topic\n\tnewmsgcnt, err := getMessageCountInTopic(testconf.Topic)\n\tif err != nil {\n\t\tt.Errorf(\"Cannot get message size. Error: %s\\n\", err)\n\t}\n\n\tif newmsgcnt-msgcnt != len(p0TestMsgs) {\n\t\tt.Errorf(\"Incorrect offsets. Expected message count %d, got %d\\n\", len(p0TestMsgs), newmsgcnt-msgcnt)\n\t}\n\n}", "func (o *NarrowSearchRecipeParams) SetOffset(offset *int64) {\n\to.Offset = offset\n}", "func (o *QueryRolesParams) SetOffset(offset *int64) {\n\to.Offset = offset\n}", "func (b *GroupsGetBuilder) Offset(v int) *GroupsGetBuilder {\n\tb.Params[\"offset\"] = v\n\treturn b\n}", "func NewOffsetCoordinatorConf(consumerGroup string) OffsetCoordinatorConf {\n\treturn OffsetCoordinatorConf{\n\t\tConsumerGroup: consumerGroup,\n\t\tRetryErrLimit: 10,\n\t\tRetryErrWait: time.Millisecond * 500,\n\t}\n}", "func (s *stream) setReadOffset(offset uint64) {\n\tassert(s.id == 0)\n\ts.recv.offset = offset\n}", "func (q *BasicQuery) SetOffset(n uint64) Query {\n\tq.Offset = n\n\treturn q\n}", "func (o *GetDevicesAllParams) SetOffset(offset *string) {\n\to.Offset = offset\n}", "func (_options *ListIntegrationsOptions) SetOffset(offset int64) *ListIntegrationsOptions {\n\t_options.Offset = core.Int64Ptr(offset)\n\treturn _options\n}", "func (osq *OfflineSessionQuery) Offset(offset int) *OfflineSessionQuery {\n\tosq.offset = &offset\n\treturn osq\n}", "func (os *Offsets) AddOffset(t string, p int32, o int64) {\n\tos.Add(Offset{\n\t\tTopic: t,\n\t\tPartition: p,\n\t\tOffset: o,\n\t\tLeaderEpoch: -1,\n\t})\n}", "func (o *QueryEntitlementsParams) SetOffset(offset *int32) {\n\to.Offset = offset\n}", "func (in *ActionUserSessionIndexInput) SetOffset(value int64) *ActionUserSessionIndexInput {\n\tin.Offset = value\n\n\tif in._selectedParameters == nil {\n\t\tin._selectedParameters = make(map[string]interface{})\n\t}\n\n\tin._selectedParameters[\"Offset\"] = nil\n\treturn in\n}", "func (o *AdminGetBannedDevicesV4Params) SetOffset(offset *int64) {\n\to.Offset = offset\n}", "func (s *TransferService) FromOffset(ctx context.Context, partition int32, offset int64) (<-chan *wallet.Transfer, <-chan error) {\n\ttransfers := make(chan *wallet.Transfer)\n\terrc := make(chan error, 1)\n\n\tgo func() {\n\t\t// Close the transfers channel after messages returns.\n\t\tdefer close(transfers)\n\n\t\terr := s.messages(ctx, s.client.copts.transferTopic, partition, offset, func(m *sarama.ConsumerMessage) error {\n\t\t\tt := wallet.Transfer{}\n\t\t\tif err := json.Unmarshal(m.Value, &t); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tt.Partition = m.Partition\n\t\t\tt.SequenceID = m.Offset\n\n\t\t\tselect {\n\t\t\tcase transfers <- &t:\n\t\t\tcase <-ctx.Done():\n\t\t\t\treturn ctx.Err()\n\t\t\t}\n\t\t\treturn nil\n\t\t})\n\n\t\t// No select needed for this send, since errc is buffered.\n\t\terrc <- err\n\t}()\n\n\treturn transfers, errc\n}", "func WithEndpointConsumerOption(end endpoint.Endpoint) ConsumerOption {\n\treturn func(c *Consumer) { c.end = end }\n}", "func (o *ExtractionListV1Params) SetOffset(offset *string) {\n\to.Offset = offset\n}", "func (o *QuerySensorUpdateKernelsDistinctParams) SetOffset(offset *int64) {\n\to.Offset = offset\n}", "func Offset(n int64) Option {\n\treturn func(q Query) Query {\n\t\tq.clauses = append(q.clauses, offsetClause(n))\n\t\treturn q\n\t}\n}", "func (b *GroupsGetRequestsBuilder) Offset(v int) *GroupsGetRequestsBuilder {\n\tb.Params[\"offset\"] = v\n\treturn b\n}", "func (driver) LoadOffset(\n\tctx context.Context,\n\tdb *sql.DB,\n\tak, sk string,\n) (uint64, error) {\n\trow := db.QueryRowContext(\n\t\tctx,\n\t\t`SELECT\n\t\t\tnext_offset\n\t\tFROM stream_offset\n\t\tWHERE app_key = ?\n\t\tAND source_app_key = ?`,\n\t\tak,\n\t\tsk,\n\t)\n\n\tvar o uint64\n\terr := row.Scan(&o)\n\tif err == sql.ErrNoRows {\n\t\terr = nil\n\t}\n\n\treturn o, err\n}", "func (o *AdminSearchUserV3Params) SetOffset(offset *string) {\n\to.Offset = offset\n}", "func (b *GroupsGetAddressesBuilder) Offset(v int) *GroupsGetAddressesBuilder {\n\tb.Params[\"offset\"] = v\n\treturn b\n}", "func (c *KafkaClient) GetOffset(group string, topic string, partition int32) (int64, error) {\n\tlog.Infof(\"Getting offset for group %s, topic %s, partition %d\", group, topic, partition)\n\tcoordinator, err := c.metadata.OffsetCoordinator(group)\n\tif err != nil {\n\t\treturn InvalidOffset, err\n\t}\n\n\trequest := NewOffsetFetchRequest(group)\n\trequest.AddOffset(topic, partition)\n\tbytes, err := c.syncSendAndReceive(coordinator, request)\n\tif err != nil {\n\t\treturn InvalidOffset, err\n\t}\n\tresponse := new(OffsetFetchResponse)\n\tdecodingErr := c.decode(bytes, response)\n\tif decodingErr != nil {\n\t\tlog.Errorf(\"Could not decode an OffsetFetchResponse. Reason: %s\", decodingErr.Reason())\n\t\treturn InvalidOffset, decodingErr.Error()\n\t}\n\n\ttopicOffsets, exist := response.Offsets[topic]\n\tif !exist {\n\t\treturn InvalidOffset, fmt.Errorf(\"OffsetFetchResponse does not contain information about requested topic\")\n\t}\n\n\tif offset, exists := topicOffsets[partition]; !exists {\n\t\treturn InvalidOffset, fmt.Errorf(\"OffsetFetchResponse does not contain information about requested partition\")\n\t} else if offset.Error != ErrNoError {\n\t\treturn InvalidOffset, offset.Error\n\t} else {\n\t\treturn offset.Offset, nil\n\t}\n}", "func (in *ActionDatasetSnapshotIndexInput) SetOffset(value int64) *ActionDatasetSnapshotIndexInput {\n\tin.Offset = value\n\n\tif in._selectedParameters == nil {\n\t\tin._selectedParameters = make(map[string]interface{})\n\t}\n\n\tin._selectedParameters[\"Offset\"] = nil\n\treturn in\n}", "func (tdq *TCPDetectorQuery) Offset(offset int) *TCPDetectorQuery {\n\ttdq.ctx.Offset = &offset\n\treturn tdq\n}", "func (a *AdminClient) ListConsumerGroupOffsets(\n\tctx context.Context, groupsPartitions []ConsumerGroupTopicPartitions,\n\toptions ...ListConsumerGroupOffsetsAdminOption) (lcgor ListConsumerGroupOffsetsResult, err error) {\n\terr = a.verifyClient()\n\tif err != nil {\n\t\treturn lcgor, err\n\t}\n\n\tlcgor.ConsumerGroupsTopicPartitions = nil\n\n\t// For now, we only support one group at a time given as a single element of\n\t// groupsPartitions.\n\t// Code has been written so that only this if-guard needs to be removed when\n\t// we add support for multiple ConsumerGroupTopicPartitions.\n\tif len(groupsPartitions) != 1 {\n\t\treturn lcgor, fmt.Errorf(\n\t\t\t\"expected length of groupsPartitions is 1, got %d\", len(groupsPartitions))\n\t}\n\n\tcGroupsPartitions := make([]*C.rd_kafka_ListConsumerGroupOffsets_t,\n\t\tlen(groupsPartitions))\n\n\t// Convert Go ConsumerGroupTopicPartitions to C ListConsumerGroupOffsets.\n\tfor i, groupPartitions := range groupsPartitions {\n\t\t// We need to destroy this list because rd_kafka_ListConsumerGroupOffsets_new\n\t\t// creates a copy of it.\n\t\tvar cPartitions *C.rd_kafka_topic_partition_list_t = nil\n\n\t\tif groupPartitions.Partitions != nil {\n\t\t\tcPartitions = newCPartsFromTopicPartitions(groupPartitions.Partitions)\n\t\t\tdefer C.rd_kafka_topic_partition_list_destroy(cPartitions)\n\t\t}\n\n\t\tcGroupID := C.CString(groupPartitions.Group)\n\t\tdefer C.free(unsafe.Pointer(cGroupID))\n\n\t\tcGroupsPartitions[i] =\n\t\t\tC.rd_kafka_ListConsumerGroupOffsets_new(cGroupID, cPartitions)\n\t\tdefer C.rd_kafka_ListConsumerGroupOffsets_destroy(cGroupsPartitions[i])\n\t}\n\n\t// Convert Go AdminOptions (if any) to C AdminOptions.\n\tgenericOptions := make([]AdminOption, len(options))\n\tfor i := range options {\n\t\tgenericOptions[i] = options[i]\n\t}\n\tcOptions, err := adminOptionsSetup(\n\t\ta.handle, C.RD_KAFKA_ADMIN_OP_LISTCONSUMERGROUPOFFSETS, genericOptions)\n\tif err != nil {\n\t\treturn lcgor, err\n\t}\n\tdefer C.rd_kafka_AdminOptions_destroy(cOptions)\n\n\t// Create temporary queue for async operation.\n\tcQueue := C.rd_kafka_queue_new(a.handle.rk)\n\tdefer C.rd_kafka_queue_destroy(cQueue)\n\n\t// Call rd_kafka_ListConsumerGroupOffsets (asynchronous).\n\tC.rd_kafka_ListConsumerGroupOffsets(\n\t\ta.handle.rk,\n\t\t(**C.rd_kafka_ListConsumerGroupOffsets_t)(&cGroupsPartitions[0]),\n\t\tC.size_t(len(cGroupsPartitions)),\n\t\tcOptions,\n\t\tcQueue)\n\n\t// Wait for result, error or context timeout.\n\trkev, err := a.waitResult(\n\t\tctx, cQueue, C.RD_KAFKA_EVENT_LISTCONSUMERGROUPOFFSETS_RESULT)\n\tif err != nil {\n\t\treturn lcgor, err\n\t}\n\tdefer C.rd_kafka_event_destroy(rkev)\n\n\tcRes := C.rd_kafka_event_ListConsumerGroupOffsets_result(rkev)\n\n\t// Convert result from C to Go.\n\tvar cGroupCount C.size_t\n\tcGroups := C.rd_kafka_ListConsumerGroupOffsets_result_groups(cRes, &cGroupCount)\n\tlcgor.ConsumerGroupsTopicPartitions = a.cToConsumerGroupTopicPartitions(cGroups, cGroupCount)\n\n\treturn lcgor, nil\n}", "func (b *GroupsGetBannedBuilder) Offset(v int) *GroupsGetBannedBuilder {\n\tb.Params[\"offset\"] = v\n\treturn b\n}", "func (cr *ClientResponse) SetOffset(off uint64) {\n\tcr.offset = off\n}", "func Offset(offset int) QueryOptions {\n\treturn func(query *Query) (*Query, error) {\n\t\tif offset < 0 {\n\t\t\treturn nil, errors.New(\"offset can't be negative\")\n\t\t}\n\t\tresMap, err := mergeQueryMaps(query.content,\n\t\t\tmap[string]interface{}{operations[OFFSET]: offset})\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tquery.content = resMap.(map[string]interface{})\n\t\treturn query, nil\n\t}\n}", "func (b *PhotosGetCommentsBuilder) Offset(v int) *PhotosGetCommentsBuilder {\n\tb.Params[\"offset\"] = v\n\treturn b\n}", "func (a *ReadArgs) SetOffset(offset int) *ReadArgs {\n\ta.Offset = &offset\n\treturn a\n}", "func (tq *TweetQuery) Offset(offset int) *TweetQuery {\n\ttq.ctx.Offset = &offset\n\treturn tq\n}", "func (c *Consumer) CommitOffset(msg *sarama.ConsumerMessage) {\n\tc.consumer.MarkOffset(msg, \"\")\n}", "func consumeStart() {\n\tif *c.Zookeeper == \"\" {\n\t\tlogger.Fatalln(\"Zookeeper Config is invalid.\")\n\t\tos.Exit(1)\n\t}\n\n\tconfig := consumergroup.NewConfig()\n\tconfig.Offsets.Initial = sarama.OffsetNewest\n\n\tif c.OffsetStart > 0 {\n\t\tconfig.Offsets.Initial = c.OffsetStart\n\t}\n\n\tconfig.Offsets.ProcessingTimeout = 10 * time.Second\n\n\tzookeeperNodes, config.Zookeeper.Chroot = kazoo.ParseConnectionString(*c.Zookeeper)\n\n\tkafkaTopics := strings.Split(*c.Topics, \",\")\n\n\tconsumer, consumerErr := consumergroup.JoinConsumerGroup(c.ConsumerGroup, kafkaTopics, zookeeperNodes, config)\n\tif consumerErr != nil {\n\t\tlogger.Fatalln(consumerErr)\n\t}\n\n\tsig := make(chan os.Signal, 1)\n\tsignal.Notify(sig, os.Interrupt)\n\tgo func() {\n\t\t<-sig\n\t\tif err := consumer.Close(); err != nil {\n\t\t\tsarama.Logger.Println(\"Error closing the consumer\", err)\n\t\t}\n\t}()\n\n\tgo func() {\n\t\tfor err := range consumer.Errors() {\n\t\t\tlogger.Fatalln(err)\n\t\t}\n\t}()\n\n\teventCount := 0\n\toffsets := make(map[string]map[int32]int64)\n\n\tfor message := range consumer.Messages() {\n\t\t// if offset beyond the offsetEnd number , exit.\n\t\tif c.OffsetEnd > 0 && message.Offset > c.OffsetEnd {\n\t\t\tlogger.Println(\"Offset beyond the end offset point.\")\n\t\t\tos.Exit(1)\n\t\t}\n\t\tif offsets[message.Topic] == nil {\n\t\t\toffsets[message.Topic] = make(map[int32]int64)\n\t\t}\n\n\t\teventCount += 1\n\t\tif offsets[message.Topic][message.Partition] != 0 && offsets[message.Topic][message.Partition] != message.Offset-1 {\n\t\t\tlogger.Printf(\"Unexpected offset on %s:%d. Expected %d, found %d, diff %d.\\n\", message.Topic, message.Partition, offsets[message.Topic][message.Partition]+1, message.Offset, message.Offset-offsets[message.Topic][message.Partition]+1)\n\t\t}\n\n\t\toffsets[message.Topic][message.Partition] = message.Offset\n\t\tmsg <- string(message.Value)\n\t\tconsumer.CommitUpto(message)\n\t\tlogger.Printf(\"Value:%s,Partition:%v, Offset:%v\", message.Value, message.Partition, message.Offset)\n\t}\n\n\tlogger.Printf(\"Processed %d events.\", eventCount)\n}", "func Consume(topicName string, groupID string) {\n\tfmt.Println(\"Consumer started.\")\n\n\t// make a new reader that consumes from topic-A\n\tr := kafka.NewReader(kafka.ReaderConfig{\n\t\tBrokers: []string{\"localhost:9092\"},\n\t\tGroupID: groupID,\n\t\tTopic: topicName,\n\t\tMinBytes: 10e3, // 10KB\n\t\tMaxBytes: 10e6, // 10MB\n\t})\n\n\tfor {\n\t\tm, err := r.ReadMessage(context.Background())\n\t\tif err != nil {\n\t\t\tbreak\n\t\t}\n\t\tfmt.Printf(\"message at topic/partition/offset %v/%v/%v: %s = %s\\n\", m.Topic, m.Partition, m.Offset, string(m.Key), string(m.Value))\n\t}\n\n\tr.Close()\n\tfmt.Println(\"Consumer closed.\")\n}", "func (o *ExtrasSavedFiltersListParams) SetOffset(offset *int64) {\n\to.Offset = offset\n}", "func (o *GetSearchEmployeesParams) SetOffset(offset *int64) {\n\to.Offset = offset\n}", "func (b *SelectBuilder) Offset(offset interface{}) *SelectBuilder {\n\tif b.Select.Limit == nil {\n\t\tpanic(\"offset without limit\")\n\t}\n\tb.Select.Limit.Offset = makeValExpr(offset)\n\treturn b\n}", "func NewConsumer(topics []string, valueFactory ValueFactory, opts ...ConsumerOption) (*Consumer, error) {\n\tc := &Consumer{\n\t\tvalueFactory: valueFactory,\n\t\tavroAPI: avro.DefaultConfig,\n\t\tensureTopics: true,\n\t}\n\t// Loop through each option\n\tfor _, opt := range opts {\n\t\t// apply option\n\t\topt.applyC(c)\n\t}\n\n\tvar err error\n\n\t// if consumer not provided - make one\n\tif c.KafkaConsumer == nil {\n\t\t// if kafka config not provided - build default one\n\t\tif c.kafkaCfg == nil {\n\t\t\tvar envCfg struct {\n\t\t\t\tBroker string `env:\"KAFKA_BROKER\" envDefault:\"localhost:9092\"`\n\t\t\t\tCAFile string `env:\"KAFKA_CA_FILE\"`\n\t\t\t\tKeyFile string `env:\"KAFKA_KEY_FILE\"`\n\t\t\t\tCertificateFile string `env:\"KAFKA_CERTIFICATE_FILE\"`\n\t\t\t\tGroupID string `env:\"KAFKA_GROUP_ID\"`\n\t\t\t}\n\t\t\tif err := env.Parse(&envCfg); err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\n\t\t\t// default configuration\n\t\t\tc.kafkaCfg = &kafka.ConfigMap{\n\t\t\t\t\"bootstrap.servers\": envCfg.Broker,\n\t\t\t\t\"socket.keepalive.enable\": true,\n\t\t\t\t\"enable.auto.commit\": false,\n\t\t\t\t\"enable.partition.eof\": true,\n\t\t\t\t\"session.timeout.ms\": 6000,\n\t\t\t\t\"auto.offset.reset\": \"earliest\",\n\t\t\t\t\"group.id\": envCfg.GroupID,\n\t\t\t}\n\n\t\t\tif envCfg.CAFile != \"\" {\n\t\t\t\t// configure SSL\n\t\t\t\tc.kafkaCfg.SetKey(\"security.protocol\", \"ssl\")\n\t\t\t\tc.kafkaCfg.SetKey(\"ssl.ca.location\", envCfg.CAFile)\n\t\t\t\tc.kafkaCfg.SetKey(\"ssl.key.location\", envCfg.KeyFile)\n\t\t\t\tc.kafkaCfg.SetKey(\"ssl.certificate.location\", envCfg.CertificateFile)\n\t\t\t}\n\t\t}\n\n\t\tif c.KafkaConsumer, err = kafka.NewConsumer(c.kafkaCfg); err != nil {\n\t\t\treturn nil, errors.WithMessage(err, \"cannot initialize kafka consumer\")\n\t\t}\n\t}\n\n\tif c.srClient == nil {\n\t\tif c.srURL == nil {\n\t\t\tvar envCfg struct {\n\t\t\t\tSchemaRegistry *url.URL `env:\"KAFKA_SCHEMA_REGISTRY\" envDefault:\"http://localhost:8081\"`\n\t\t\t}\n\t\t\tif err := env.Parse(&envCfg); err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\tc.srURL = envCfg.SchemaRegistry\n\t\t}\n\n\t\tif c.srClient, err = NewCachedSchemaRegistryClient(c.srURL.String()); err != nil {\n\t\t\treturn nil, errors.WithMessage(err, \"cannot initialize schema registry client\")\n\t\t}\n\t}\n\n\tif c.eventHandler == nil {\n\t\tc.eventHandler = func(event kafka.Event) {\n\t\t\tlog.Println(event)\n\t\t}\n\t}\n\n\tif topics != nil {\n\t\tif err := c.KafkaConsumer.SubscribeTopics(topics, nil); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tif c.ensureTopics {\n\t\t\tif err = c.EnsureTopics(topics); err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t}\n\t}\n\n\treturn c, nil\n}", "func (r InfluxReporter) ReportConsumerOffsets(o *kage.ConsumerOffsets) {\n\tpts, err := client.NewBatchPoints(client.BatchPointsConfig{\n\t\tDatabase: r.database,\n\t\tPrecision: \"s\",\n\t})\n\tif err != nil {\n\t\tr.log.Error(err.Error())\n\n\t\treturn\n\t}\n\n\tfor group, topics := range *o {\n\t\tfor topic, partitions := range topics {\n\t\t\tfor partition, offset := range partitions {\n\t\t\t\ttags := map[string]string{\n\t\t\t\t\t\"type\": \"ConsumerOffset\",\n\t\t\t\t\t\"group\": group,\n\t\t\t\t\t\"topic\": topic,\n\t\t\t\t\t\"partition\": fmt.Sprint(partition),\n\t\t\t\t}\n\n\t\t\t\tfor key, value := range r.tags {\n\t\t\t\t\ttags[key] = value\n\t\t\t\t}\n\n\t\t\t\tpt, err := client.NewPoint(\n\t\t\t\t\tr.metric,\n\t\t\t\t\ttags,\n\t\t\t\t\tmap[string]interface{}{\n\t\t\t\t\t\t\"offset\": offset.Offset,\n\t\t\t\t\t\t\"lag\": offset.Lag,\n\t\t\t\t\t},\n\t\t\t\t\ttime.Now(),\n\t\t\t\t)\n\t\t\t\tif err != nil {\n\t\t\t\t\tr.log.Error(err.Error())\n\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\n\t\t\t\tpts.AddPoint(pt)\n\t\t\t}\n\t\t}\n\t}\n\n\tif err := r.client.Write(pts); err != nil {\n\t\tr.log.Error(err.Error())\n\t}\n}", "func (o *GetSingleBeadSimulationsParams) SetOffset(offset *int32) {\n\to.Offset = offset\n}", "func (c *cursor) setOffset(o cursorOffset) {\n\tc.cursorOffset = o\n}", "func (s *cryptoStreamImpl) setReadOffset(offset protocol.ByteCount) {\n\ts.receiveStream.readOffset = offset\n\ts.receiveStream.frameQueue.readPos = offset\n}" ]
[ "0.65632766", "0.63940036", "0.6103379", "0.6010719", "0.5940324", "0.59126174", "0.59043145", "0.5897389", "0.58670217", "0.5842533", "0.5818599", "0.5817295", "0.58023596", "0.5781974", "0.5693429", "0.5672001", "0.5663549", "0.565853", "0.5648319", "0.56464297", "0.5642492", "0.5625509", "0.5620801", "0.56202585", "0.5613766", "0.56060284", "0.5597694", "0.55845237", "0.5569826", "0.55527467", "0.55510587", "0.5544347", "0.55174696", "0.54941505", "0.5485965", "0.547392", "0.54697806", "0.54597086", "0.54576296", "0.5456798", "0.54417455", "0.54011214", "0.5400859", "0.539568", "0.53893495", "0.5381571", "0.5378882", "0.5377509", "0.53409964", "0.5337666", "0.5334435", "0.5289621", "0.52884763", "0.52854836", "0.5278601", "0.52677536", "0.52674496", "0.5261389", "0.5261253", "0.52487534", "0.52477854", "0.5247455", "0.5235995", "0.52247095", "0.5223586", "0.5223513", "0.52047217", "0.5201383", "0.5197311", "0.5188397", "0.51624465", "0.516078", "0.5148998", "0.5146166", "0.51424116", "0.51341754", "0.5133082", "0.513129", "0.51298004", "0.512472", "0.51210755", "0.5119349", "0.51076996", "0.5095863", "0.50940275", "0.50899875", "0.50815475", "0.50802743", "0.50696343", "0.5068071", "0.5061455", "0.5058045", "0.5057095", "0.50566953", "0.5047067", "0.5029198", "0.50289255", "0.5025755", "0.5001228", "0.49921855" ]
0.8170133
0
Open actually handles the subcriber messages
func (c *Consumer) Open() error { if c.reader == nil { c.reader = kafgo.NewReader(*c.config) } for { // start a new context var ( ctx = context.Background() msg kafgo.Message err error ) if c.autocommit { msg, err = c.reader.ReadMessage(ctx) } else { msg, err = c.reader.FetchMessage(ctx) } if err != nil { c.errFn(ctx, msg, errors.Wrap( err, "read message from kafka failed", )) c.errHandler.Handle(ctx, err) continue } // before endpoint for _, fn := range c.befores { ctx = fn(ctx, msg) } rq, err := c.dec(ctx, msg) if err != nil { c.errFn(ctx, msg, err) c.errHandler.Handle(ctx, err) continue } // execute endpoint rs, err := c.end(ctx, rq) if err != nil { c.errFn(ctx, msg, err) c.errHandler.Handle(ctx, err) continue } for _, fn := range c.afters { ctx = fn(ctx, msg, rs) } if !c.autocommit { err = c.reader.CommitMessages(ctx, msg) if err != nil { c.errFn(ctx, msg, err) c.errHandler.Handle(ctx, err) continue } } } }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (core *Core) handleOpen(query *MessageQuery) {\n\n\tif verbose {\n\t\tlog.Println(\"Opening connection\")\n\t}\n\tcore.locks.AddClient(query.clt)\n}", "func (w *BaseWebsocketClient) OnOpen() {}", "func (s *BasevhdlListener) EnterFile_open_information(ctx *File_open_informationContext) {}", "func (controller *UIController) Open(ctx context.Context) error {\n\treturn nil\n}", "func (c *subContext) openStream(ctx context.Context, epID epapi.ID, indCh chan<- indication.Indication) error {\n\tresponse, err := c.epClient.Get(ctx, epID)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tconn, err := c.conns.Connect(fmt.Sprintf(\"%s:%d\", response.IP, response.Port))\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tclient := termination.NewClient(conn)\n\tresponseCh := make(chan e2tapi.StreamResponse)\n\trequestCh, err := client.Stream(ctx, responseCh)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\trequestCh <- e2tapi.StreamRequest{\n\t\tAppID: e2tapi.AppID(c.config.AppID),\n\t\tInstanceID: e2tapi.InstanceID(c.config.InstanceID),\n\t\tSubscriptionID: e2tapi.SubscriptionID(c.sub.ID),\n\t}\n\n\tfor response := range responseCh {\n\t\tindCh <- indication.Indication{\n\t\t\tEncodingType: encoding.Type(response.Header.EncodingType),\n\t\t\tPayload: indication.Payload{\n\t\t\t\tHeader: response.IndicationHeader,\n\t\t\t\tMessage: response.IndicationMessage,\n\t\t\t},\n\t\t}\n\t}\n\treturn nil\n}", "func (t *Tunnel) open() error {\n\tif t.nc == nil {\n\t\treturn nats.ErrInvalidConnection\n\t}\n\tt.mon.timer = globalTimerPool.Get(t.mon.readTimeout)\n\tt.inbox = nats.NewInbox()\n\t// TODO: we could use this directly below in subscribe instead of wildcard, but will consider later.\n\tt.reply = fmt.Sprintf(\"%s.%s\", t.inbox, t.randSuffix.Get())\n\tt.mux = sync.Mutex{}\n\n\tsub, err := t.nc.QueueSubscribe(fmt.Sprintf(\"%s.*\", t.inbox), emptyQ, t.recv)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tt.sub = sub\n\treturn nil\n}", "func (jobs *Jobs) Open() {\n\tjobs.ch = channels.NewInfiniteChannel()\n}", "func (t *Transport) Open() error {\n\treturn errors.New(\"TODO\")\n}", "func (s *Service) Open(ctx context.Context) {\n\tctx, cancel := context.WithCancel(ctx)\n\ts.closer = cancel\n\n\ts.queue = queue.NewLocalLimitedSize(2, 256)\n\tgrip.CatchAlert(s.queue.Start(ctx))\n\n\ts.addRoutes()\n}", "func Open() {\n\tsystem = Connect()\n}", "func (c *Command) open() {\n\tif len(c.parsed) <= 1 {\n\t\treturn\n\t}\n\tc.openFile(c.parsed[1:])\n\tc.done()\n}", "func (ml *ManagedListener) Open() {\n\tif ml != nil {\n\t\tdefer trace.Tracer.ScopedTrace()()\n\t\tgo ml.Listening()\n\t\tgo ml.PipeMapHandler()\n\t\tml.SetExternalIP()\n\t}\n}", "func Open(id int)(*Window,error){\n\n\n/*8:*/\n\n\n//line goacme.w:147\n\n{\nvar err error\nonce.Do(func(){fsys,err= client.MountService(\"acme\")})\nif err!=nil{\nreturn nil,err\n}\n}\n\n\n\n/*:8*/\n\n\n//line goacme.w:177\n\nif err:=fsys.Access(fmt.Sprintf(\"%d\",id),plan9.OREAD);err!=nil{\nreturn nil,err\n}\nthis:=&Window{id:id}\n\n\n/*22:*/\n\n\n//line goacme.w:308\n\nthis.files= make(map[string]*client.Fid)\n\n\n\n/*:22*/\n\n\n\n/*30:*/\n\n\n//line goacme.w:384\n\nthis.prev= lwin\nthis.next= nil\nif fwin==nil{\nfwin= this\n}\nif lwin!=nil{\nlwin.next= this\n}\nlwin= this\n\n\n\n/*:30*/\n\n\n\n/*64:*/\n\n\n//line goacme.w:868\n\nif _,err:=this.File(\"addr\");err!=nil{\nreturn nil,err\n}\n\n\n\n\n/*:64*/\n\n\n//line goacme.w:182\n\nreturn this,nil\n}", "func (notifee *Notifee) OpenedStream(network.Network, network.Stream) {}", "func (d *Discord) Open() (<-chan Message, error) {\n\tshards := d.Shards\n\tif shards < 1 {\n\t\tshards = 1\n\t}\n\n\td.Sessions = make([]*discordgo.Session, shards)\n\n\tfor i := 0; i < shards; i++ {\n\t\tsession, err := discordgo.New(d.args...)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tsession.State.TrackPresences = false\n\t\tsession.ShardCount = shards\n\t\tsession.ShardID = i\n\t\tsession.AddHandler(d.onMessageCreate)\n\t\tsession.AddHandler(d.onMessageUpdate)\n\t\tsession.AddHandler(d.onMessageDelete)\n\n\t\td.Sessions[i] = session\n\t}\n\n\td.Session = d.Sessions[0]\n\n\tfor i := 0; i < len(d.Sessions); i++ {\n\t\td.Sessions[i].Open()\n\t}\n\n\treturn d.messageChan, nil\n}", "func (c *Client) Open() error {\n\theader := http.Header{}\n\tc.authorize(header)\n\theader.Set(\"Num-Shards\", strconv.FormatInt(int64(c.totalShards), 10))\n\theader.Set(\"User-Id\", c.userID)\n\n\tvar err error\n\tc.conn, _, err = websocket.DefaultDialer.Dial(c.wsHost, header)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}", "func (d *Discord) Open() (<-chan Message, error) {\n\tgateway, err := discordgo.New(d.args[0].(string))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\ts, err := gateway.GatewayBot()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\td.Sessions = make([]*discordgo.Session, s.Shards)\n\n\tlog.Printf(\"%s opening with %d shards\\n\", d.Name(), s.Shards)\n\twg := sync.WaitGroup{}\n\tfor i := 0; i < s.Shards; i++ {\n\t\tlog.Printf(\"%s opening shard %d\\n\", d.Name(), i+1)\n\t\tsession, err := discordgo.New(d.args[0].(string))\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tif d.Session == nil {\n\t\t\td.Session = session\n\t\t}\n\t\td.Sessions[i] = session\n\t\tsession.ShardCount = s.Shards\n\t\tsession.ShardID = i\n\t\tsession.State.TrackPresences = false\n\t\twg.Add(1)\n\t\tgo func(session *discordgo.Session) {\n\t\t\tdefer wg.Done()\n\t\t\terr := session.Open()\n\t\t\tif err != nil {\n\t\t\t\tlog.Printf(\"error opening shard %s\", err)\n\t\t\t}\n\t\t}(d.Sessions[i])\n\t}\n\twg.Wait()\n\n\tfor _, session := range d.Sessions {\n\t\tsession.AddHandler(d.onMessageCreate)\n\t\tsession.AddHandler(d.onMessageUpdate)\n\t\tsession.AddHandler(d.onMessageDelete)\n\t}\n\n\treturn d.messageChan, nil\n}", "func (ferry *Ferry) Open() {\n\tfor {\n\t\tcar := <-ferry.enterChannel\n\n\t\tferry.loadedCars = append(ferry.loadedCars, car)\n\t\tferry.totalRevenue = ferry.totalRevenue + car.GetTicket().Price\n\n\t\tfmt.Print(\"\\nRute yang telah dilewati:\\n\")\n\t\tcar.PrintRoutes()\n\t}\n}", "func (c *Client) Sub(name string, args ...interface{}) (chan string, error) {\n\n\tif args == nil {\n\t\tlog.Println(\"no args passed\")\n\t\tif err := c.ddp.Sub(name); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t} else {\n\t\tif err := c.ddp.Sub(name, args[0], false); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\tmsgChannel := make(chan string, default_buffer_size)\n\tc.ddp.CollectionByName(\"stream-room-messages\").AddUpdateListener(genericExtractor{msgChannel, \"update\"})\n\n\treturn msgChannel, nil\n}", "func (tr *Transport) Open() error {\n\n\tif tr.metricer != nil {\n\t\tif hn := tr.metricer.Handler(); hn != nil {\n\t\t\ttr.mux.Handler(net_http.MethodGet, \"/metrics\", hn)\n\t\t}\n\t}\n\n\tfor _, mon := range tr.monitors {\n\t\ttr.mux.Handler(net_http.MethodGet, mon, net_http.HandlerFunc(\n\t\t\tfunc(rw net_http.ResponseWriter, req *net_http.Request) {\n\t\t\t\trw.WriteHeader(net_http.StatusOK)\n\t\t\t\trw.Write([]byte(\"alive\")) //nolint:errcheck\n\t\t\t},\n\t\t))\n\t}\n\n\treturn tr.ListenAndServe()\n}", "func (r *ReceiveFuncState) Open() {\n\tmu.Lock()\n\tdefer mu.Unlock()\n\tr.started = true\n\tr.running = true\n\tselfCheckLocked()\n}", "func (cb *callbacks) OnStreamOpen(ctx context.Context, id int64, typ string) error {\n\tglog.V(2).Infof(\"OnStreamOpen %d open for %s\", id, typ)\n\treturn nil\n}", "func (s *Session) open(ctx context.Context) error {\n\terr := s.doSession(ctx, func(ctx context.Context, conn *grpc.ClientConn, header *headers.RequestHeader) (*headers.ResponseHeader, interface{}, error) {\n\t\trequest := &api.OpenSessionRequest{\n\t\t\tHeader: header,\n\t\t\tTimeout: &s.Timeout,\n\t\t}\n\t\tclient := api.NewSessionServiceClient(conn)\n\t\tresponse, err := client.OpenSession(ctx, request)\n\t\tif err != nil {\n\t\t\treturn nil, nil, err\n\t\t}\n\t\treturn response.Header, response, nil\n\t})\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tgo func() {\n\t\tfor range s.ticker.C {\n\t\t\t_ = s.keepAlive(context.TODO())\n\t\t}\n\t}()\n\treturn nil\n}", "func (me *MessagerEngine) Open(dbconfigs dbconfigs.DBConfigs) error {\n\tif me.isOpen {\n\t\treturn nil\n\t}\n\tme.conns.Open(&dbconfigs.App, &dbconfigs.Dba)\n\tme.tsv.se.RegisterNotifier(\"messages\", me.schemaChanged)\n\tme.isOpen = true\n\treturn nil\n}", "func (c *Connection) Open(conn net.Conn) (err error) {\n\tc.handler = newHandler(c)\n\tc.pump, err = event.NewPump(conn,\n\t\tevent.NewMessagingDelegator(c.handler),\n\t)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif c.Server {\n\t\tc.pump.Server()\n\t}\n\tgo c.pump.Run()\n\treturn nil\n}", "func (p *streamingPuller) open() error {\n\tp.c.L.Lock()\n\tdefer p.c.L.Unlock()\n\tp.openLocked()\n\treturn p.err\n}", "func (n *SQSNotify) Open() (err error) {\n\tawsSQS := sqs.New(n.auth, n.region)\n\tn.queue, err = awsSQS.GetQueue(n.name)\n\tif err != nil {\n\t\treturn err\n\t}\n\tn.dqID = list.New()\n\tn.dqMsg = make(map[string]sqs.Message)\n\treturn nil\n}", "func (ch *Channel) Open() {\n ch.Clients = new(sync.Map)\n}", "func New()(*Window,error){\n\n\n/*8:*/\n\n\n//line goacme.w:147\n\n{\nvar err error\nonce.Do(func(){fsys,err= client.MountService(\"acme\")})\nif err!=nil{\nreturn nil,err\n}\n}\n\n\n\n/*:8*/\n\n\n//line goacme.w:160\n\nf,err:=fsys.Open(\"new/ctl\",plan9.OREAD)\nif err!=nil{\nreturn nil,err\n}\ndefer f.Close()\nvar id int\nif _,err:=fmt.Fscan(f,&id);err!=nil{\nreturn nil,err\n}\nreturn Open(id)\n}", "func (cli *chatLogInteral) run() {\n\npumpLoop:\n\tfor {\n\t\tselect {\n\t\tcase deadSub, ok := <-cli.killSubs:\n\t\t\tif !ok {\n\t\t\t\tbreak pumpLoop\n\t\t\t}\n\n\t\t\tsubs := []subToChatPump{}\n\t\t\tfor _, sub := range cli.subbedToChat {\n\t\t\t\tif sub.C == deadSub.C {\n\t\t\t\t\tclose(deadSub.C)\n\t\t\t\t} else {\n\t\t\t\t\tsubs = append(subs, sub)\n\t\t\t\t}\n\t\t\t}\n\t\t\tcli.subbedToChat = subs\n\n\t\tcase newSub, ok := <-cli.newSubs:\n\t\t\tif !ok {\n\t\t\t\tbreak pumpLoop\n\t\t\t}\n\n\t\t\tcli.subbedToChat = append(cli.subbedToChat, newSub)\n\n\t\tcase llp := <-cli.newLines:\n\t\t\tcli.postInternal(llp)\n\t\t}\n\t}\n\n\t// Close all alert Channels\n\tfor _, sub := range cli.subbedToChat {\n\t\tclose(sub.C)\n\t}\n}", "func (m *Manager) Open() (err error) {\n\tres := new(events.GatewayBot)\n\tif err = m.Client.FetchJSON(\"GET\", \"/gateway/bot\", nil, &res); err != nil {\n\t\treturn\n\t}\n\tm.Gateway = res\n\n\t// _-**--__\n\t// _--* *--__ Sandwich Producer ...\n\t// _-** **-_\n\t// |_*--_ _-* _|\t Cluster: ... (...)\n\t// | *-_ *---_ _----* _-* | Shards: ... (...)\n\t// *-_ *--__ ***** __---* _*\t Sessions Remaining: ...\n\t// *--__ *-----** ___--* Concurrent Identifies: ... / ...\n\t// **-____-**\n\n\tfmt.Printf(\"\\n _-**--__\\n _--* *--__ Sandwich Producer %s\\n _-** **-_\\n|_*--_ _-* _| Cluster: %d (%d)\\n| *-_ *---_ _----* _-* | Shards: %d (%d)\\n *-_ *--__ ***** __---* _* Sessions Remaining: %d/%d\\n *--__ *-----** ___--* Concurrent Clients: %d / %d\\n **-____-**\\n\\n\",\n\t\tVERSION, m.Configuration.ClusterID, m.Configuration.ClusterCount, m.Configuration.ShardCount, res.Shards, res.SessionStartLimit.Remaining, res.SessionStartLimit.Total, m.Configuration.MaxConcurrentIdentifies, m.Gateway.SessionStartLimit.MaxConcurrency)\n\n\tif m.Configuration.ShardCount*2 >= res.SessionStartLimit.Remaining {\n\t\tm.log.Warn().Msgf(\"Current set shard count of %d is near the remaining session limit of %d\",\n\t\t\tm.Configuration.ShardCount, res.SessionStartLimit.Remaining)\n\t}\n\n\tvar shardCount int\n\n\tif m.Configuration.AutoSharded || m.Configuration.ShardCount < int(res.Shards)/2 {\n\t\tshardCount = res.Shards\n\t} else {\n\t\tshardCount = m.Configuration.ShardCount\n\t}\n\n\t// We will always round up the Shards to the nearest 16 if it uses more than 63 shards\n\t// just in order to support the majority of larger bots as we don't really know when\n\t// big bot sharding has occured and usually the determined devision is 16 or a multiple.\n\tif shardCount > 63 {\n\t\tshardCount = int(math.Ceil(float64(shardCount)/16)) * 16\n\t}\n\n\tm.log.Info().Msgf(\"Using %d shard(s)\", shardCount)\n\n\terr = m.Scale(m.CreateShardIDs(shardCount), shardCount)\n\treturn\n}", "func (d *device) Open() { d.os.openShell() }", "func (c *Client) Open() error {\n\t// TODO: auto-select transport based on BMC capabilities\n\treturn c.open()\n}", "func (c *ExampleClient) Open(info SessionInfo) (string, Error) {\n\tc.SessionInfo = info\n\t// Do what you need to do to open the session.\n\treturn \"Client has finished opening the session\", nil\n}", "func (c *Config) Open(id string, logger log.Logger) (connector.Connector, error) {\n\tuserHeader := c.UserHeader\n\tif userHeader == \"\" {\n\t\tuserHeader = \"X-Remote-User\"\n\t}\n\tgroupHeader := c.GroupHeader\n\tif groupHeader == \"\" {\n\t\tgroupHeader = \"X-Remote-Group\"\n\t}\n\n\treturn &callback{userHeader: userHeader, groupHeader: groupHeader, logger: logger, pathSuffix: \"/\" + id, groups: c.Groups}, nil\n}", "func open(path string) {\n\tif err := exec.Command(\"/usr/bin/open\", path).Run(); err != nil {\n\t\tfmt.Printf(\"report.Open err: %x\\n\", err.Error())\n\t}\n}", "func Open(config *Config) (*Tc, error) {\n\tvar tc Tc\n\n\tif config == nil {\n\t\tconfig = &Config{}\n\t}\n\n\tcon, err := netlink.Dial(unix.NETLINK_ROUTE, &netlink.Config{NetNS: config.NetNS})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\ttc.con = con\n\n\treturn &tc, nil\n}", "func Open() {\n\t// router.RegisterByHandleFunc(\"/\", func(res http.ResponseWriter, req *http.Request) {\n\t// \tvar tpl = template.Must(template.ParseGlob(\"resources/*.gohtml\"))\n\t// \terr := tpl.ExecuteTemplate(res, \"home.gohtml\", nil)\n\t// \tif !recoder.Write(err) {\n\t// \t\tio.WriteString(res, err.Error())\n\t// \t}\n\t// })\n\trouter.RegisterByString(\"/\", \"HomeController\", \"Index\")\n\trouter.RegisterByString(\"/login\", \"LoginController\", \"Index\")\n\trouter.RegisterByString(\"/logincheck\", \"LoginController\", \"LoginCheck\")\n\trouter.RegisterByString(\"/logout\", \"LoginController\", \"Logout\")\n\trouter.RegisterByString(\"/register\", \"RegisterController\", \"Index\")\n\trouter.RegisterByString(\"/registercheck\", \"RegisterController\", \"Register\")\n\trouter.RegisterByString(\"/activity\", \"ActivityController\", \"Index\")\n\trouter.RegisterByString(\"/activity/create\", \"ActivityController\", \"Create\")\n\trouter.RegisterByString(\"/activity/store\", \"ActivityController\", \"Store\")\n\trouter.RegisterByString(\"/activity/delete\", \"ActivityController\", \"Delete\")\n\trouter.RegisterByString(\"/activity/join\", \"ActivityController\", \"Join\")\n\trouter.Start()\n}", "func (s *Server) Open() error {\n\n ln, err := net.Listen(\"tcp\", s.Addr)\n if err != nil {\n return err\n }\n s.ln = ln\n\n //start a go routine that starts an http server\n go func() {http.Serve(s.ln, s.Handler)} ()\n\n return nil\n}", "func (sc *TraceScope) Open(args ...interface{}) *TraceScope {\n\treturn sc.emitRecord(beginRecord, genericArgs(args))\n}", "func (dataChannel *DataChannel) Open(log log.T) (err error) {\n\tif err = dataChannel.wsChannel.Open(log); err != nil {\n\t\treturn fmt.Errorf(\"failed to open data channel with error: %v\", err)\n\t}\n\n\tif err = dataChannel.FinalizeDataChannelHandshake(log, dataChannel.wsChannel.GetChannelToken()); err != nil {\n\t\treturn fmt.Errorf(\"error sending token for handshake: %v\", err)\n\t}\n\treturn\n}", "func (c *service) OpenMultiCommandSession(config *SessionConfig) (MultiCommandSession, error) {\n\treturn newMultiCommandSession(c, config, c.replayCommands, c.recordSession)\n}", "func (ro *room) run() {\n\tlog.Println(\"Starting up the room with ID = \", ro.ID)\n\n\tfor {\n\t\tselect {\n\t\t//any new incomming messages to the room ?\n\t\tcase msg := <-ro.messages:\n\t\t\tfmt.Printf(\"room%v: %v\\n\", ro.ID, msg)\n\t\t//create a reference of the client in the room struct, and set its value to true\n\t\t//to indicate that this client is in the room\n\t\tcase c := <-ro.joining:\n\t\t\tro.clients[c] = true\n\t\t\tc.msg <- \"Welcome to the room !\"\n\n\t\t}\n\t}\n\n}", "func (config Service) Open(log gopi.Logger) (gopi.Driver, error) {\n\tlog.Debug(\"<grpc.service.mihome>Open{ server=%v mihome=%v }\", config.Server, config.MiHome)\n\n\tthis := new(service)\n\tthis.log = log\n\tthis.mihome = config.MiHome\n\tthis.pubsub = nil\n\n\t// Register service with server\n\tconfig.Server.Register(this)\n\n\t// Reset the radio\n\tif err := this.mihome.ResetRadio(); err != nil {\n\t\treturn nil, err\n\t}\n\n\t// Start goroutine for capturing events from mihome\n\tthis.startCapture()\n\n\t// Success\n\treturn this, nil\n}", "func (c *connection) run(msg Message) error {\n\tchannel, requests, err := c.ssh.OpenChannel(\"session\", nil)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tsession := &session{\n\t\tconnection: c,\n\t\tchannel: channel,\n\t\trequests: requests,\n\t\texit: make(chan status),\n\t}\n\tgo session.serviceRequests()\n\n\tif err = session.start(string(msg.payload)); err != nil {\n\t\treturn err\n\t}\n\n\treaper := make(chan int)\n\tc.reapers = append(c.reapers, reaper)\n\n\tsession.finish(msg.responses, reaper)\n\treturn nil\n}", "func (fs *OneFile) Open(id string) error {\n\treturn nil\n}", "func (lp *LP) Open() (err error) {\n\tgojs.CatchException(&err)\n\tlp.Component().Call(\"open\")\n\treturn err\n}", "func (c *Client) readRoutine() {\n\tdefer func() {\n\t\tc.ch.unregister <- c\n\t\tc.conn.Close()\n\t}()\n\n\t// Limit the size of the messages we read\n\tc.conn.SetReadLimit(maxMessageSize) // TODO: Move to creation of conn\n\tc.conn.SetReadDeadline(time.Now().Add(pongWait))\n\tc.conn.SetPongHandler(func(string) error {\n\t\tc.conn.SetReadDeadline(time.Now().Add(pongWait))\n\t\treturn nil\n\t})\n\n\tfor {\n\t\t_, msg, err := c.conn.ReadMessage()\n\t\tif err != nil {\n\t\t\tif ws.IsUnexpectedCloseError(err, ws.CloseGoingAway, ws.CloseAbnormalClosure) {\n\t\t\t\tlog.Printf(\"error: %v\", err)\n\t\t\t} // Else the Close was expected aka no error.\n\t\t\treturn\n\t\t}\n\n\t\tmessage := NewMessage(c.Name, strings.Trim(string(msg), \" \\n\\r\"))\n\t\tc.ch.broadcast <- message\n\t}\n}", "func (p *Session) OnOpen() {\n\tlevel.Debug(p.logger).Log(\"msg\", \"Session open\")\n\tp.open = true\n}", "func (r *Receiver) Close() error { return nil }", "func (p *Port) Open() {\n\tif !p.closed {\n\t\treturn\n\t}\n\n\tp.closed = false\n\n\tif p.buf != nil {\n\t\tp.buf = make(chan interface{}, CHANNEL_SIZE)\n\t}\n\n\tif p.sub != nil {\n\t\tp.sub.Open()\n\t}\n\n\tif len(p.subs) > 0 {\n\t\tfor _, sub := range p.subs {\n\t\t\tsub.Open()\n\t\t}\n\t}\n}", "func onFileOpen(filename string) {\n\t//\n}", "func (self *AbtabURL) OpenRead() error {\n\tswitch {\n\tcase \"\" == self.Url.Scheme:\n\t\tself.TabOpenRead()\n\t\treturn nil\n\t\tbreak\n\tcase \"tab\" == self.Url.Scheme:\n\t\tself.TabOpenRead()\n\t\treturn nil\n\t\tbreak\n\tcase \"fixed\" == self.Url.Scheme:\n\t\tself.FixedWidthOpenRead()\n\t\treturn nil\n\t\tbreak\n\tdefault:\n\t\treturn AbtabError{Message: fmt.Sprintf(\"Error: unrecognized scheme: '%s'\", self.Url.Scheme)}\n\t\tbreak\n\tcase \"csv\" == self.Url.Scheme:\n\t\tself.CsvOpenRead()\n\t\tbreak\n\t\t// case \"fixed\" == self.Url.Scheme:\n\t\t// self.FixedOpenRead()\n\t\t// break;\n\tcase \"pg\" == self.Url.Scheme:\n\t\tself.PgOpenRead()\n\t\tbreak\n\t\t// case \"mysql\" == self.Url.Scheme:\n\t\t// self.MysqlOpenRead()\n\t\t// break;\n\t\t// return nil\n\t}\n\treturn nil\n}", "func (c *Client) Open(bufferSize int) error {\n\tvar (\n\t\taddr *net.UDPAddr\n\t\tok bool\n\t\terr error\n\t)\n\n\tif addr, ok = c.LocalAddr.(*net.UDPAddr); !ok {\n\t\treturn fmt.Errorf(\"Invalid local address: %v not a net.UDPAddr\", c.LocalAddr)\n\t}\n\n\t// prepare the socket to listen on for replies\n\tc.connection, err = net.ListenUDP(\"udp6\", addr)\n\tif err != nil {\n\t\treturn err\n\t}\n\tc.stopping = new(sync.WaitGroup)\n\tc.sendQueue = make(chan dhcpv6.DHCPv6, bufferSize)\n\tc.receiveQueue = make(chan dhcpv6.DHCPv6, bufferSize)\n\tc.packets = make(map[dhcpv6.TransactionID]*promise.Promise)\n\tc.packetsLock = sync.Mutex{}\n\tc.errors = make(chan error)\n\n\tvar ctx context.Context\n\tctx, c.cancel = context.WithCancel(context.Background())\n\tgo c.receiverLoop(ctx)\n\tgo c.senderLoop(ctx)\n\n\treturn nil\n}", "func (s *instance) initialize(jsondata []byte) error {\r\n\ts.name = \"shout\"\r\n\tconfig, err := config.ShoutConfigFromJSON(jsondata)\r\n\tif err == nil {\r\n\t\ts.config = config\r\n\t\ts.client = slack.New(s.config.UserToken.String, slack.OptionDebug(true), slack.OptionAppLevelToken(s.config.AppToken.String))\r\n\r\n\t\t/// The below does work, we can receive messages :-)\r\n\r\n\t\t///\t\t// go-slack comes with a SocketMode package that we need to use that accepts a Slack client and outputs a Socket mode client instead\r\n\t\t///\t\ts.socket_client = socketmode.New(\r\n\t\t///\t\t\ts.client,\r\n\t\t///\t\t\tsocketmode.OptionDebug(true),\r\n\t\t///\t\t\t// Option to set a custom logger\r\n\t\t///\t\t\tsocketmode.OptionLog(log.New(os.Stdout, \"socketmode: \", log.Lshortfile|log.LstdFlags)),\r\n\t\t///\t\t)\r\n\t\t///\r\n\t\t///\t\t// Create a context that can be used to cancel goroutine\r\n\t\t///\t\tctx, cancel := context.WithCancel(context.Background())\r\n\t\t///\t\t// Make this cancel called properly in a real program , graceful shutdown etc\r\n\t\t///\t\tdefer cancel()\r\n\t\t///\r\n\t\t///\t\tgo func(ctx context.Context, client *slack.Client, socketClient *socketmode.Client) {\r\n\t\t///\t\t\t// Create a for loop that selects either the context cancellation or the events incomming\r\n\t\t///\t\t\tfor {\r\n\t\t///\t\t\t\tselect {\r\n\t\t///\t\t\t\t// inscase context cancel is called exit the goroutine\r\n\t\t///\t\t\t\tcase <-ctx.Done():\r\n\t\t///\t\t\t\t\tlog.Println(\"Shutting down socketmode listener\")\r\n\t\t///\t\t\t\t\treturn\r\n\t\t///\t\t\t\tcase event := <-socketClient.Events:\r\n\t\t///\t\t\t\t\t// We have a new Events, let's type switch the event\r\n\t\t///\t\t\t\t\t// Add more use cases here if you want to listen to other events.\r\n\t\t///\t\t\t\t\tswitch event.Type {\r\n\t\t///\t\t\t\t\t// handle EventAPI events\r\n\t\t///\t\t\t\t\tcase slackevents.Message:\r\n\t\t///\r\n\t\t///\t\t\t\t\t\t// The application has been mentioned since this Event is a Mention event\r\n\t\t///\t\t\t\t\t\tlog.Println(event)\r\n\t\t///\r\n\t\t///\t\t\t\t\tcase socketmode.EventTypeEventsAPI:\r\n\t\t///\t\t\t\t\t\t// The Event sent on the channel is not the same as the EventAPI events so we need to type cast it\r\n\t\t///\t\t\t\t\t\teventsAPIEvent, ok := event.Data.(slackevents.EventsAPIEvent)\r\n\t\t///\t\t\t\t\t\tif !ok {\r\n\t\t///\t\t\t\t\t\t\tlog.Printf(\"Could not type cast the event to the EventsAPIEvent: %v\\n\", event)\r\n\t\t///\t\t\t\t\t\t\tcontinue\r\n\t\t///\t\t\t\t\t\t}\r\n\t\t///\t\t\t\t\t\t// We need to send an Acknowledge to the slack server\r\n\t\t///\t\t\t\t\t\tsocketClient.Ack(*event.Request)\r\n\t\t///\t\t\t\t\t\t// Now we have an Events API event, but this event type can in turn be many types, so we actually need another type switch\r\n\t\t///\t\t\t\t\t\tlog.Println(eventsAPIEvent)\r\n\t\t///\t\t\t\t\t}\r\n\t\t///\r\n\t\t///\t\t\t\t}\r\n\t\t///\t\t\t}\r\n\t\t///\t\t}(ctx, s.client, s.socket_client)\r\n\t\t///\r\n\t\t///\t\ts.socket_client.Run()\r\n\t}\r\n\treturn err\r\n}", "func (stream *MAMWriteStream) Open() (trinary.Trytes, error) {\n\tchannelID, err := stream.m.ChannelCreate(5)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tstream.currentChannelID = channelID\n\treturn channelID, nil\n}", "func (c *JSONRPCSignalClient) Open(url string) (<-chan struct{}, error) {\n\tconn, _, err := websocket.DefaultDialer.Dial(url, nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tc.jc = jsonrpc2.NewConn(c.context, websocketjsonrpc2.NewObjectStream(conn), c)\n\treturn c.jc.DisconnectNotify(), nil\n}", "func TestClient_Open(t *testing.T) {\n\tc := NewClient(1000)\n\tdefer c.Close()\n\n\t// Create replica on broker.\n\tc.Server.Handler.Broker().CreateReplica(1000, &url.URL{Host: \"localhost\"})\n\n\t// Open client to broker.\n\tf := NewTempFile()\n\tdefer os.Remove(f)\n\tu, _ := url.Parse(c.Server.URL)\n\tif err := c.Open(f, []*url.URL{u}); err != nil {\n\t\tt.Fatalf(\"unexpected error: %s\", err)\n\t}\n\n\t// Receive a message from the stream.\n\tif m := <-c.C(); m.Type != messaging.CreateReplicaMessageType {\n\t\tt.Fatalf(\"unexpected message type: %x\", m.Type)\n\t}\n\n\t// Close connection to the broker.\n\tif err := c.Client.Close(); err != nil {\n\t\tt.Fatalf(\"unexpected error: %s\", err)\n\t}\n}", "func init() {\n\tgo func(ch <-chan string, chClose <-chan struct{}) {\n\t\tfor true {\n\t\t\tselect {\n\t\t\tcase msg := <-ch:\n\t\t\t\tOutMessageToAll(msg)\n\t\t\tcase <-chClose:\n\t\t\t\tclose(models.CancelChOutMessageToAll)\n\t\t\t\tclose(models.ChOutMessageToAll)\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t}(models.ChOutMessageToAll, models.CancelChOutMessageToAll)\n}", "func (L *State) OpenBase() {\n\tC.clua_openbase(L.s)\n\tL.PushGoFunction(print)\n\tL.SetGlobal(\"print\")\n\tL.PushGoFunction(printex)\n\tL.SetGlobal(\"printex\")\n\tL.PushGoFunction(pcall)\n\tL.SetGlobal(\"pcall\")\n\tL.PushGoFunction(xpcall)\n\tL.SetGlobal(\"xpcall\")\n\n}", "func TestClient_Open_ErrClientOpen(t *testing.T) {\n\tc := NewClient(1000)\n\tdefer c.Close()\n\n\t// Open client to broker.\n\tf := NewTempFile()\n\tdefer os.Remove(f)\n\tu, _ := url.Parse(c.Server.URL)\n\tc.Open(f, []*url.URL{u})\n\tif err := c.Open(f, []*url.URL{u}); err != messaging.ErrClientOpen {\n\t\tt.Fatalf(\"unexpected error: %s\", err)\n\t}\n}", "func (nc *NetClient) handleMessage(m *arbor.ProtocolMessage) {\n\tswitch m.Type {\n\tcase arbor.NewMessageType:\n\t\tif !nc.Archive.Has(m.UUID) {\n\t\t\tif nc.receiveHandler != nil {\n\t\t\t\tnc.receiveHandler(m.ChatMessage)\n\t\t\t\t// ask Notifier to handle the message\n\t\t\t\tnc.Notifier.Handle(nc, m.ChatMessage)\n\t\t\t}\n\t\t\tif m.Parent != \"\" && !nc.Archive.Has(m.Parent) {\n\t\t\t\tnc.Query(m.Parent)\n\t\t\t}\n\t\t}\n\tcase arbor.WelcomeType:\n\t\tif !nc.Has(m.Root) {\n\t\t\tnc.Query(m.Root)\n\t\t}\n\t\tfor _, recent := range m.Recent {\n\t\t\tif !nc.Has(recent) {\n\t\t\t\tnc.Query(recent)\n\t\t\t}\n\t\t}\n\tcase arbor.MetaType:\n\t\tnc.HandleMeta(m.Meta)\n\t}\n}", "func (r *Reader) openBidi() error {\n\t// read READY frame\n\tready, err := r.readControl(ControlTypeReady)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// write ACCEPT frame w/ matching content types from READY frame\n\taf := &Control{\n\t\tType: ControlTypeAccept,\n\t}\n\tfor _, t := range r.contentTypes {\n\t\tif ready.MatchFieldContentType(t) {\n\t\t\taf.ContentTypes = append(af.ContentTypes, t)\n\t\t}\n\t}\n\terr = r.writeControlFrame(af)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn r.openUni()\n}", "func init() {\n\tgo chatroom()\n}", "func Open(msg *api.SecureEnvelope, opts ...Option) (payload *api.Payload, reject *api.Error, err error) {\n\tvar env *Envelope\n\tif env, err = Wrap(msg, opts...); err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\t// A rejection here would be related to a sealing key failure\n\tif reject, err = env.unsealEnvelope(); reject != nil || err != nil {\n\t\treturn nil, reject, err\n\t}\n\n\t// A rejection here is related to the decryption, verification, and parsing the payload\n\tif reject, err = env.decrypt(); reject != nil || err != nil {\n\t\treturn nil, reject, err\n\t}\n\n\tif payload, err = env.Payload(); err != nil {\n\t\treturn nil, nil, err\n\t}\n\treturn payload, nil, nil\n}", "func (c *client) read() {\n\t// Unregister if the node dies\n\tdefer func() {\n\t\tlog.Info(\"Server died\")\n\t\tc.isOpen = false\n\t\tc.conn.Close()\n\t}()\n\n\tfor {\n\t\tmsgType, msg, err := c.conn.ReadMessage()\n\t\tif err != nil {\n\t\t\tcontinue\n\t\t}\n\t\tif msgType != websocket.BinaryMessage {\n\t\t\tcontinue\n\t\t}\n\n\t\tpb := protoBlockchain.Envelope{}\n\t\terr = proto.Unmarshal(msg, &pb)\n\t\tif err != nil {\n\t\t\tlog.Error(err)\n\t\t\tcontinue\n\t\t}\n\n\t\tgo func() {\n\t\t\tswitch pb.GetType() {\n\n\t\t\tcase protoBlockchain.Envelope_UPLOAD:\n\t\t\t\tif len(pb.GetData()) > 0 {\n\t\t\t\t\tlog.Info(\"File Uploaded\")\n\t\t\t\t\tf, err := os.Create(\"tmp\")\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\tlog.Error(err)\n\t\t\t\t\t\treturn\n\t\t\t\t\t}\n\t\t\t\t\t// f.WriteString(string(pb.GetData()))\n\t\t\t\t\tf.WriteString(\"yes\")\n\t\t\t\t} else {\n\t\t\t\t\tf, err := os.Create(\"tmp\")\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\tlog.Error(err)\n\t\t\t\t\t\treturn\n\t\t\t\t\t}\n\t\t\t\t\tf.WriteString(\"no\")\n\t\t\t\t}\n\n\t\t\tcase protoBlockchain.Envelope_EXIST:\n\t\t\t\tif len(pb.GetData()) > 0 {\n\t\t\t\t\tlog.Info(\"File Exist\")\n\t\t\t\t\tf, err := os.Create(\"tmp\")\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\tlog.Error(err)\n\t\t\t\t\t\treturn\n\t\t\t\t\t}\n\t\t\t\t\tf.WriteString(string(pb.GetData()))\n\t\t\t\t} else {\n\t\t\t\t\tlog.Info(\"File doesn't Exist\")\n\t\t\t\t\tf, err := os.Create(\"tmp\")\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\tlog.Error(err)\n\t\t\t\t\t\treturn\n\t\t\t\t\t}\n\t\t\t\t\tf.WriteString(\"no\")\n\t\t\t\t}\n\t\t\t}\n\t\t}()\n\t}\n}", "func (c *DirectClient) Open(correlationId string) error {\n\tif c.Opened {\n\t\treturn nil\n\t}\n\n\tif c.Controller == nil {\n\t\terr := cerr.NewConnectionError(correlationId, \"NO_CONTROLLER\", \"Controller reference is missing\")\n\t\treturn err\n\t}\n\n\tc.Opened = true\n\n\tc.Logger.Info(correlationId, \"Opened direct client\")\n\treturn nil\n}", "func (p *TBufferedReadTransport) Open() error {\n\treturn nil\n}", "func (f *ClientFD) OpenAt(ctx context.Context, flags uint32) (FDID, int, error) {\n\treq := OpenAtReq{\n\t\tFD: f.fd,\n\t\tFlags: flags,\n\t}\n\tvar respFD [1]int\n\tvar resp OpenAtResp\n\tctx.UninterruptibleSleepStart(false)\n\terr := f.client.SndRcvMessage(OpenAt, uint32(req.SizeBytes()), req.MarshalUnsafe, resp.UnmarshalUnsafe, respFD[:])\n\tctx.UninterruptibleSleepFinish(false)\n\treturn resp.NewFD, respFD[0], err\n}", "func messageCreate(s *discordgo.Session, m *discordgo.MessageCreate) {\n\n\t// Ignore all messages created by the bot itself\n\t// This isn't required in this specific example but it's a good practice.\n\tif m.Author.ID == s.State.User.ID {\n\t\treturn\n\t}\n\n\tif m.Content == \"!laser backlight on\" {\n\t\ts.ChannelMessageSend(m.ChannelID, backlight(\"on\"))\n\t}\n\n\tif m.Content == \"!laser backlight off\" {\n\t\ts.ChannelMessageSend(m.ChannelID, backlight(\"off\"))\n\t}\n\n\tif m.Content == \"!laser fullstatus\" {\n\t\ts.ChannelMessageSend(m.ChannelID, fullStatus())\n\t}\n\n\tif m.Content == \"!laser help\" {\n\t\tvar printText string\n\t\tprintText += \"```\\n\"\n\t\tprintText += \"Available Commands:\\n\"\n\t\tprintText += \"-------------------------------\\n\"\n\t\tprintText += \" laser backlight [on|off]\\n\"\n\t\tprintText += \" laser fullstatus\\n\"\n\t\tprintText += \" laser help\\n\"\n\t\tprintText += \" laser maintenance [on|off]\\n\"\n\t\tprintText += \" laser scanwifi\\n\"\n\t\tprintText += \" laser status\\n\"\n\t\tprintText += \"```\\n\"\n\t\ts.ChannelMessageSend(m.ChannelID, printText)\n\t}\n\n\tif m.Content == \"!laser maintenance off\" {\n\t\ts.ChannelMessageSend(m.ChannelID, maintenancemode(\"disable\"))\n\t}\n\n\tif m.Content == \"!laser maintenance on\" {\n\t\ts.ChannelMessageSend(m.ChannelID, maintenancemode(\"enable\"))\n\t}\n\n\tif m.Content == \"!laser scanwifi\" {\n\t\ts.ChannelMessageSend(m.ChannelID, scanWifi())\n\t}\n\n\tif m.Content == \"!laser status\" {\n\t\ts.ChannelMessageSend(m.ChannelID, shortStatus())\n\t}\n\n\tif m.Content == \"!cat\" {\n\t\ttr := &http.Transport{DisableKeepAlives: true}\n\t\tclient := &http.Client{Transport: tr}\n\t\tresp, err := client.Get(\"https://images-na.ssl-images-amazon.com/images/I/71FcdrSeKlL._AC_SL1001_.jpg\")\n\t\tif resp != nil {\n\t\t\tdefer resp.Body.Close()\n\t\t}\n\t\tif err != nil {\n\t\t\ts.ChannelMessageSend(m.ChannelID, \"Unable to fetch cat!\")\n\t\t\tfmt.Println(\"[Warning] : Cat API Error\")\n\t\t} else {\n\t\t\ts.ChannelMessageSendEmbed(m.ChannelID, &discordgo.MessageEmbed{\n\t\t\t\tAuthor: &discordgo.MessageEmbedAuthor{Name: \"Cat Picture\", IconURL: Icons + \"/729726642758615151.png\"},\n\t\t\t\tColor: Color,\n\t\t\t\tImage: &discordgo.MessageEmbedImage{\n\t\t\t\t\tURL: resp.Request.URL.String(),\n\t\t\t\t},\n\t\t\t\tFooter: &discordgo.MessageEmbedFooter{Text: \"Cat pictures provided by TheCatApi\", IconURL: Icons + \"/729726642758615151.png\"},\n\t\t\t})\n\t\t\tfmt.Println(\"[Info] : Cat sent successfully to \" + m.Author.Username + \"(\" + m.Author.ID + \") in \" + m.ChannelID)\n\t\t}\n\t}\n}", "func (c *Client) Open() error {\n\tif c.closed() {\n\t\treturn ErrServiceUnavailable\n\t}\n\n\t// first load the meta data from disk\n\t// path := c.Path()\n\t// if path != \"\" {\n\t// \tc.Logger().Printf(\"using client state dir:%s\", path)\n\t// \tif err := c.loadMetaServers(path); err != nil {\n\t// \t\tc.Logger().Fatalf(\"failed to load meta sever data from %s\", path)\n\t// \t}\n\t// }\n\n\t// if metas := c.MetaServers(); len(metas) == 0 {\n\t// \treturn fmt.Errorf(\"MetaServers is empty. It shold at least contain itslef\")\n\t// }\n\n\t// c.changed = make(chan struct{})\n\t// c.closing = make(chan struct{})\n\n\t// c.cacheData = c.retryUntilSnapshot(0)\n\t// if c.cacheData == nil {\n\t// \treturn fmt.Errorf(\"failed to snapshot %v\", c.cacheData)\n\t// }\n\n\t// if err := c.updateMetaServers(); err != nil {\n\t// \tc.Logger().Println(\"failed to updated meta servers\")\n\t// }\n\n\tc.changed = make(chan struct{})\n\tc.closing = make(chan struct{})\n\tc.cacheData = c.retryUntilSnapshot(0)\n\n\tgo c.pollForUpdates()\n\n\treturn nil\n}", "func (c *Car) Open() {}", "func (c *Definition) Open() error {\n\treturn nil\n}", "func (s *store) open(raftln net.Listener) error {\n\ts.logger.Info(fmt.Sprintf(\"Using data dir: %v\", s.path))\n\n\tjoinPeers, err := s.filterAddr(s.config.JoinPeers, s.httpAddr)\n\tif err != nil {\n\t\treturn err\n\t}\n\tjoinPeers = s.config.JoinPeers\n\n\tvar initializePeers []string\n\tif len(joinPeers) > 0 {\n\t\tc := NewClient()\n\t\tc.SetMetaServers(joinPeers)\n\t\tc.SetTLS(s.config.HTTPSEnabled)\n\t\tfor {\n\t\t\tpeers := c.peers()\n\t\t\tif !Peers(peers).Contains(s.raftAddr) {\n\t\t\t\tpeers = append(peers, s.raftAddr)\n\t\t\t}\n\n\t\t\ts.logger.Info(fmt.Sprintf(\"len : %d, %d \\r\\n\" ,len(s.config.JoinPeers),len(peers)))\n\t\t\tif len(s.config.JoinPeers)-len(peers) == 0 {\n\t\t\t\tinitializePeers = peers\n\t\t\t\tbreak\n\t\t\t}\n\n\t\t\tif len(peers) > len(s.config.JoinPeers) {\n\t\t\t\ts.logger.Info(fmt.Sprintf(\"waiting for join peers to match config specified. found %v, config specified %v\", peers, s.config.JoinPeers))\n\t\t\t} else {\n\t\t\t\ts.logger.Info(fmt.Sprintf(\"Waiting for %d join peers. Have %v. Asking nodes: %v\", len(s.config.JoinPeers)-len(peers), peers, joinPeers))\n\t\t\t}\n\t\t\ttime.Sleep(time.Second)\n\t\t}\n\t}\n\n\tif err := s.setOpen(); err != nil {\n\t\treturn err\n\t}\n\n\t// Create the root directory if it doesn't already exist.\n\tif err := os.MkdirAll(s.path, 0777); err != nil {\n\t\treturn fmt.Errorf(\"mkdir all: %s\", err)\n\t}\n\n\t// Open the raft store.\n\tif err := s.openRaft(initializePeers, raftln); err != nil {\n\t\treturn fmt.Errorf(\"raft: %s\", err)\n\t}\n\n\ts.logger.Info(fmt.Sprintf(\"open raft done. %d\", len(joinPeers)))\n\tif len(joinPeers) > 0 {\n\t\tc := NewClient()\n\t\tc.SetMetaServers(joinPeers)\n\t\tc.SetTLS(s.config.HTTPSEnabled)\n\t\tif err := c.Open(); err != nil {\n\t\t\treturn err\n\t\t}\n\t\tdefer c.Close()\n\n\t\t_, err := c.JoinMetaServer(s.httpAddr, s.raftAddr)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\t// Wait for a leader to be elected so we know the raft log is loaded\n\t// and up to date\n\tif err := s.waitForLeader(0); err != nil {\n\t\treturn err\n\t}\n\n\t// Make sure this server is in the list of metanodes\n\tpeers, err := s.raftState.peers()\n\tif err != nil {\n\t\treturn err\n\t}\n\ts.logger.Info(fmt.Sprintf(\"peers count= %d\", len(peers)))\n\tif len(peers) <= 1 {\n\t\t// we have to loop here because if the hostname has changed\n\t\t// raft will take a little bit to normalize so that this host\n\t\t// will be marked as the leader\n\t\tfor {\n\t\t\terr := s.setMetaNode(s.httpAddr, s.raftAddr)\n\t\t\tif err == nil {\n\t\t\t\tbreak\n\t\t\t}else{\n\t\t\t\ts.logger.Error(err.Error())\n\t\t\t}\n\t\t\ttime.Sleep(100 * time.Millisecond)\n\t\t}\n\t}\n\n\tgo func() { s.rebalance(s.closing) }()\n\ts.logger.Info(\"open store done\")\n\treturn nil\n}", "func (c *Chat) Sub(subName string, topics []LogCat) chan LogLineParsed {\n\tnewSub := make(chan LogLineParsed, 10)\n\tc.logger.newSubs <- subToChatPump{\n\t\tName: subName,\n\t\tSubbed: topics,\n\t\tC: newSub,\n\t}\n\n\treturn newSub\n}", "func (c *Conn) Open() error {\n\n\tc.commandC = make(chan Command, 0)\n\tresponseC := make(chan string, 10)\n\tresultC := make(chan Result, 10)\n\tnotificationC := make(chan Notification, 10)\n\texitC := make(chan bool, 0)\n\tvar err error\n\n\tconn, err := net.DialTimeout(\"tcp\", c.Device.LocationAddr(), time.Second*5)\n\n\tif err != nil {\n\t\tlg.Errorln(\"Failed to connect: %s\\n\", err)\n\t\treturn err\n\t}\n\tlg.Infoln(\"opened conn to \", c.Device.Name, c.Device.Location)\n\n\tgo func() {\n\t\t// TrackedCommand .\n\t\ttype TrackedCommand struct {\n\t\t\tCommand Command\n\t\t\tCreated time.Time\n\t\t}\n\t\tcommands := make(map[int]TrackedCommand, 0)\n\t\tcleanupTicker := time.NewTimer(30 * time.Second)\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase <-exitC:\n\t\t\t\tlg.Infoln(\"exiting...\")\n\t\t\t\tfor _, v := range commands {\n\t\t\t\t\tv.Command.resultC <- Result{\n\t\t\t\t\t\tErr: ErrClosing,\n\t\t\t\t\t}\n\t\t\t\t\tclose(v.Command.resultC)\n\t\t\t\t}\n\t\t\t\treturn\n\n\t\t\tcase command := <-c.commandC:\n\t\t\t\tcommands[command.ID] = TrackedCommand{command, time.Now()}\n\t\t\t\tdata, err := json.Marshal(&command)\n\t\t\t\tif err != nil {\n\t\t\t\t\tlg.Fatal(err)\n\t\t\t\t}\n\n\t\t\t\tlg.V(10).Infoln(string(data))\n\n\t\t\t\tif err := conn.SetWriteDeadline(time.Now().Add(5 * time.Second)); err != nil {\n\t\t\t\t\tlg.Errorln(err)\n\t\t\t\t}\n\n\t\t\t\t_, err = fmt.Fprintf(conn, \"%s\\r\\n\", data)\n\t\t\t\tif err != nil {\n\t\t\t\t\tlg.Fatal(err)\n\t\t\t\t}\n\n\t\t\tcase r := <-resultC:\n\t\t\t\tlg.V(10).Infoln(r)\n\t\t\t\tif v, ok := commands[r.ID]; ok {\n\t\t\t\t\tgo func(c Command, r Result) {\n\t\t\t\t\t\tc.resultC <- r\n\t\t\t\t\t\tclose(c.resultC)\n\t\t\t\t\t}(v.Command, r)\n\t\t\t\t\tdelete(commands, r.ID)\n\t\t\t\t} else {\n\t\t\t\t\tlg.Warningln(\"response from untracked command: %v\", r)\n\t\t\t\t}\n\n\t\t\tcase n := <-notificationC:\n\t\t\t\tlg.V(10).Infoln(n)\n\t\t\t\tif c.NotificationC != nil {\n\t\t\t\t\tc.NotificationC <- n\n\t\t\t\t}\n\n\t\t\tcase <-cleanupTicker.C:\n\t\t\t\tminuteAgo := time.Now().Add(-time.Minute)\n\t\t\t\tfor k, v := range commands {\n\t\t\t\t\tif v.Created.Before(minuteAgo) {\n\t\t\t\t\t\tv.Command.resultC <- Result{\n\t\t\t\t\t\t\tErr: ErrTimeout,\n\t\t\t\t\t\t}\n\t\t\t\t\t\tclose(v.Command.resultC)\n\t\t\t\t\t\tdelete(commands, k)\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}()\n\n\tgo func() {\n\tloop:\n\t\tfor str := range responseC {\n\t\t\tvar r resultOrNotification\n\t\t\terr := json.Unmarshal([]byte(str), &r)\n\t\t\tif err != nil {\n\t\t\t\tlg.Errorln(err, str)\n\t\t\t}\n\t\t\tif r.Notification == nil && r.Result == nil {\n\t\t\t\tlog.Printf(\"could not parse message from: %s\", str)\n\t\t\t\tcontinue loop\n\t\t\t}\n\t\t\tif r.Notification != nil {\n\t\t\t\tn := *r.Notification\n\t\t\t\tn.DeviceID = c.Device.ID\n\t\t\t\tnotificationC <- n\n\t\t\t}\n\t\t\tif r.Result != nil {\n\t\t\t\tresultC <- *r.Result\n\t\t\t}\n\t\t}\n\t}()\n\tscanner := bufio.NewScanner(conn)\n\n\tfor scanner.Scan() {\n\t\tresponse := scanner.Text()\n\t\tlg.V(10).Infoln(\"response\", response)\n\t\tresponseC <- response\n\t}\n\tclose(responseC)\n\n\tif err := scanner.Err(); err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}", "func BaseOpen(l *State) int {\n\tl.PushGlobalTable()\n\tl.PushGlobalTable()\n\tl.SetField(-2, \"_G\")\n\tSetFunctions(l, baseLibrary, 0)\n\tl.PushString(VersionString)\n\tl.SetField(-2, \"_VERSION\")\n\treturn 1\n}", "func (client *StatsdClient) Open() {\n\tconnectionString := fmt.Sprintf(\"%s:%d\", client.Host, client.Port)\n\tconn, err := net.Dial(\"udp\", connectionString)\n\tif err != nil {\n\t\tlog.Println(err)\n\t}\n\tclient.conn = conn\n}", "func (p *Printer) run() {\n\tdefer close(p.ch)\n\tconn, err := p.ln.Accept()\n\tif err != nil {\n\t\treturn\n\t}\n\tp.conn = conn\n\n\t// If Close() has been called, close the connection.\n\tif atomic.SwapInt32(&p.state, 2) == 1 {\n\t\tconn.Close()\n\t\treturn\n\t}\n\n\tdata, err := ioutil.ReadAll(conn)\n\tif err != nil {\n\t\treturn\n\t}\n\tp.ch <- data\n}", "func (s *replayService) OpenMultiCommandSession(config *SessionConfig) (MultiCommandSession, error) {\n\treturn NewReplayMultiCommandSession(s.shellPrompt, s.system, s.commands), nil\n}", "func openIncoming(w http.ResponseWriter, r *http.Request) {\n fmt.Fprintf(w, \"Hi there, I am openIncoming !\", r.URL.Path[1:])\n}", "func (o *handler) handle(client mqtt.Client, msg mqtt.Message) {\r\n\t// We extract the count and write that out first to simplify checking for missing values\r\n\tvar m Message\r\n\tvar resp Session\r\n\tif err := json.Unmarshal(msg.Payload(), &resp); err != nil {\r\n\t\tfmt.Printf(\"Message could not be parsed (%s): %s\", msg.Payload(), err)\r\n\t\treturn\r\n\t}\r\n\tfmt.Println(resp)\r\n\tswitch resp.Type {\r\n\tcase CMDMSG_OFFER:\r\n\t\tenc.Decode(resp.Data, &m)\r\n\t\tNotice(m)\r\n\tcase CMDMSG_DISC:\r\n\t\tvar devcmd DiscoveryCmd\r\n\t\tenc.Decode(resp.Data, &devcmd)\r\n\t\tDiscoveryDev(&devcmd)\r\n\tcase CMDMSG_WAKE:\r\n\t\tvar fing Fing\r\n\t\tenc.Decode(resp.Data, &fing)\r\n\t\twakemac(fing)\r\n\tcase CMDMSG_UPDATE:\r\n\t\tvar newver *versionUpdate\r\n\t\tGetUpdateMyself(newver)\r\n\tcase CMDMSG_MR2:\r\n\t\tvar mr2info Mr2Msg\r\n\t\tenc.Decode(resp.Data, &mr2info)\r\n\t\tMr2HostPort(&mr2info)\r\n\t}\r\n}", "func TestOpen(t *testing.T) {\n\tmockTr := new(mockTTransport)\n\ttr := NewTFramedTransport(mockTr)\n\tmockTr.On(\"Open\").Return(nil)\n\n\tassert.Nil(t, tr.Open())\n\tassert.Equal(t, uint32(defaultMaxLength), tr.maxLength)\n\tmockTr.AssertExpectations(t)\n}", "func (c *Context) Open(name string) {\n\t// we'll remove all old contents\n\tos.RemoveAll(c.StorageDir())\n\t// and recreate directories structure\n\tc.MediaDir()\n\n\tr, err := zip.OpenReader(path.Join(c.AppDir(), fmt.Sprintf(\"%v.%v\", name, fileExtension)))\n\tif err != nil {\n\t\tc.Log.Error(err.Error())\n\t}\n\tdefer r.Close()\n\n\t// Iterate through the files in the archive,\n\t// printing some of their contents.\n\tfor _, f := range r.File {\n\t\trc, err := f.Open()\n\t\tif err != nil {\n\t\t\tc.Log.Error(err.Error())\n\t\t}\n\t\tdst, _ := os.Create(path.Join(c.StorageDir(), f.Name))\n\t\t_, err = io.Copy(dst, rc)\n\t\tif err != nil {\n\t\t\tc.Log.Error(err.Error())\n\t\t}\n\t\trc.Close()\n\t\tdst.Close()\n\t}\n\n\tc.LoadCurrent()\n\tChannelTournament.Emit(EventTypeReload, \"new tournament opened\")\n}", "func (config Codec) Open(log gopi.Logger) (gopi.Driver, error) {\n\tlog.Debug(\"<remotes.Codec.RC5.Open>{ lirc=%v type=%v }\", config.LIRC, config.Type)\n\n\t// Check for LIRC\n\tif config.LIRC == nil {\n\t\treturn nil, gopi.ErrBadParameter\n\t}\n\n\tthis := new(codec)\n\n\t// Set log and lirc objects\n\tthis.log = log\n\tthis.lirc = config.LIRC\n\n\t// Set up channels\n\tthis.done = make(chan struct{})\n\tthis.events = this.lirc.Subscribe()\n\tthis.subscribers = evt.NewPubSub(0)\n\n\t// Set bit length to 14 bits\n\tthis.bit_length = 14\n\tthis.codec_type = remotes.CODEC_RC5\n\n\t// Reset\n\tthis.Reset(false)\n\n\t// Create background routine\n\tif ctx, cancel := context.WithCancel(context.Background()); ctx != nil {\n\t\tthis.cancel = cancel\n\t\tgo this.acceptEvents(ctx)\n\t}\n\n\t// Return success\n\treturn this, nil\n}", "func (s *Server) Open() error {\n\tln, err := net.Listen(\"tcp\", s.Addr)\n\tif err != nil {\n\t\treturn err\n\t}\n\ts.ln = ln\n\tfmt.Printf(\"server start at %s\\n\", s.Addr)\n\thandler := cors.Default().Handler(s.Handler)\n\thttp.Serve(s.ln, handler)\n\treturn nil\n}", "func (i *in) Open() (err error) {\n\tif i.IsOpen() {\n\t\treturn nil\n\t}\n\n\t//i.Lock()\n\n\ti.midiIn, err = rtmidi.NewMIDIInDefault()\n\tif err != nil {\n\t\ti.midiIn = nil\n\t\t//i.Unlock()\n\t\treturn fmt.Errorf(\"can't open default MIDI in: %v\", err)\n\t}\n\n\terr = i.midiIn.OpenPort(i.number, \"\")\n\t//i.Unlock()\n\n\tif err != nil {\n\t\ti.Close()\n\t\treturn fmt.Errorf(\"can't open MIDI in port %v (%s): %v\", i.number, i, err)\n\t}\n\n\t//i.driver.Lock()\n\t//i.midiIn.IgnoreTypes(i.driver.ignoreSysex, i.driver.ignoreTimeCode, i.driver.ignoreActiveSense)\n\ti.driver.opened = append(i.driver.opened, i)\n\t//i.driver.Unlock()\n\n\treturn nil\n}", "func newFSConn(token, infoPath string) (conn *FSConn, err error) {\n\tvar info slack.Info\n\tconn = new(FSConn)\n\n\tif infoPath != \"\" {\n\t\tbuf, err := ioutil.ReadFile(infoPath)\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"ReadFile(%s): %s\", infoPath, err)\n\t\t}\n\t\terr = json.Unmarshal(buf, &info)\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"Unmarshal: %s\", err)\n\t\t}\n\t} else {\n\t\tconn.api = slack.New(token)\n\t\t//conn.api.SetDebug(true)\n\t\tconn.ws, err = conn.api.StartRTM(\"\", \"https://slack.com\")\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"StartRTM(): %s\\n\", err)\n\t\t}\n\t\tinfo = conn.api.GetInfo()\n\t}\n\n\tconn.in = make(chan slack.SlackEvent)\n\tconn.sinks = make([]EventHandler, 0, 5)\n\tconn.Super = NewSuper()\n\n\tusers := make([]*User, 0, len(info.Users))\n\tfor _, u := range info.Users {\n\t\tusers = append(users, NewUser(u, conn))\n\t}\n\tconn.users, err = NewUserSet(\"users\", conn, NewUserDir, users)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"NewUserSet: %s\", err)\n\t}\n\n\tconn.self, err = NewSelf(conn, info.User, info.Team)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"NewSelf: %s\", err)\n\t}\n\n\tchans := make([]Room, 0, len(info.Channels))\n\tfor _, c := range info.Channels {\n\t\tchans = append(chans, NewChannel(c, conn))\n\t}\n\tconn.channels, err = NewRoomSet(\"channels\", conn, NewChannelDir, chans)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"NewRoomSet: %s\", err)\n\t}\n\n\tgroups := make([]Room, 0, len(info.Groups))\n\tfor _, g := range info.Groups {\n\t\tgroups = append(groups, NewGroup(g, conn))\n\t}\n\tconn.groups, err = NewRoomSet(\"groups\", conn, NewGroupDir, groups)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"NewRoomSet: %s\", err)\n\t}\n\n\tims := make([]Room, 0, len(info.IMs))\n\tfor _, im := range info.IMs {\n\t\tims = append(ims, NewIM(im, conn))\n\t}\n\tconn.ims, err = NewRoomSet(\"ims\", conn, NewIMDir, ims)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"NewRoomSet: %s\", err)\n\t}\n\n\t// simplify dispatch code by keeping track of event handlers\n\t// in a slice. We (FSConn) are an event sink too - add\n\t// ourselves to the list first, so that we can separate\n\t// routing logic from connection-level handling logic.\n\tconn.sinks = append(conn.sinks, conn,\n\t\tconn.users, conn.channels, conn.groups, conn.ims)\n\n\t// only spawn goroutines in online mode\n\tif infoPath == \"\" {\n\t\tgo conn.ws.HandleIncomingEvents(conn.in)\n\t\tgo conn.ws.Keepalive(10 * time.Second)\n\t\tgo conn.consumeEvents()\n\t}\n\n\treturn conn, nil\n}", "func (a *agent) openChannel() error {\n\tch, err := a.conn.Channel()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\ta.mu.Lock()\n\ta.ch = ch\n\ta.mu.Unlock()\n\n\t_, err = ch.QueueDeclare(\n\t\ta.queueName, // name\n\t\ttrue, // durable\n\t\tfalse, // delete when unused\n\t\tfalse, // exclusive\n\t\tfalse, // no-wait\n\t\tnil, // arguments\n\t)\n\treturn err\n}", "func (report *Report) open() {\n\n\t// open the report in the default browser\n\terr := open.Start(reportName + \".html\")\n\n\tif err != nil {\n\t\tlog.Println(\"Could not open report\")\n\t}\n\n}", "func (fb *FileBase) Open(ID string) (io.ReadCloser, error) {\n\treturn fb.fs.Open(filepath.Join(fb.root, ID))\n}", "func messageCreate(s *discordgo.Session, m *discordgo.MessageCreate) {\n\tchannel := \"private\"\n\tif c, _ := s.State.Channel(m.ChannelID); c != nil {\n\t\tchannel = c.Name\n\t}\n\tfmt.Printf(\"%s (%s):: %s\\n\", m.Author.Username, channel, m.Content)\n}", "func (hs *Handshake) OpenedStream(s p2p.Stream) {\n\n}", "func (h *Harvester) open() (encoding.Encoding, error) {\n\t// Special handling that \"-\" means to read from standard input\n\tif h.Path == \"-\" {\n\t\treturn h.openStdin()\n\t}\n\treturn h.openFile()\n}", "func Open(URL ...string) (pubsub.Hub, error) {\n\tif len(URL) == 0 {\n\t\tURL = []string{nats.DefaultURL}\n\t}\n\n\tlog.Info(\"connecting to nats hub: %v\", URL)\n\n\tconn, err := nats.Connect(URL[0], func(options *nats.Options) error {\n\t\toptions.Name = \"pandora-pubsub\"\n\t\toptions.AllowReconnect = true\n\t\toptions.Verbose = true\n\t\treturn nil\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn &hub{\n\t\tconn: conn,\n\t\tsubs: make(map[*sub]struct{}),\n\t}, nil\n}", "func (this *IoHandlerImp) ConnOpened(*IoFilter) {\n}", "func AvIOOpen(pb **AvIOContext, fi string, flags int) int {\n\tcfi := C.CString(fi)\n\tdefer C.free(unsafe.Pointer(cfi))\n\treturn int(C.avio_open((**C.struct_AVIOContext)(unsafe.Pointer(pb)), cfi, C.int(flags)))\n}", "func (cfg *ErrorOnlyConfig) Open(*zap.Logger, *certs.ServiceCerts) (users.Adapter, error) {\n\treturn &state{errors.New(cfg.Msg)}, nil\n}", "func (m *Manager) openSupermarket() {\n\t// Create a Supermarket\n\tm.supermarket = newSupermarket()\n\n\tgo m.customerStatusChangeListener()\n\tgo m.openCloseCheckoutListener()\n\n\tgo m.statPrint()\n}", "func (c *Client) Open(path string) error {\n\treturn c.Browser.Open(fmt.Sprintf(\"%s%s\", c.base, path))\n}" ]
[ "0.6114414", "0.5814772", "0.56604224", "0.5624466", "0.55345947", "0.55344206", "0.5481519", "0.5478093", "0.54761076", "0.5423344", "0.53903896", "0.53574616", "0.5348757", "0.53430444", "0.5336", "0.5325706", "0.5297685", "0.5275618", "0.52629817", "0.52551985", "0.52356577", "0.52347445", "0.52196753", "0.5203781", "0.52029884", "0.51717854", "0.51670486", "0.51527506", "0.5150952", "0.5126494", "0.50872904", "0.50765014", "0.50550544", "0.5054157", "0.50278956", "0.49949265", "0.4991296", "0.49846134", "0.49550137", "0.49484715", "0.4944672", "0.49295714", "0.4926833", "0.49143997", "0.4912138", "0.4911167", "0.49040082", "0.48902336", "0.4878044", "0.4876475", "0.4875747", "0.48734444", "0.48562947", "0.4855471", "0.48536834", "0.48532417", "0.48524773", "0.4841999", "0.48320836", "0.4824957", "0.479792", "0.4797882", "0.4795386", "0.47918215", "0.47872776", "0.47859952", "0.47836778", "0.47785038", "0.477522", "0.477488", "0.47728533", "0.47724444", "0.4755034", "0.47475868", "0.47473192", "0.4736273", "0.47340882", "0.4727991", "0.4727594", "0.4721415", "0.47199506", "0.47196692", "0.47146347", "0.47049728", "0.47048277", "0.46949103", "0.46882278", "0.46859714", "0.46857935", "0.46813998", "0.4681137", "0.46787426", "0.4675479", "0.46720257", "0.46699473", "0.4667784", "0.46642056", "0.4660383", "0.4657588", "0.4653033" ]
0.48950005
47
NewConsumer returns kafka consumer for the given brokers
func NewConsumer( brokers []string, logger log.Logger, options ...ConsumerOption, ) (*Consumer, error) { // default values cfg := kafgo.ReaderConfig{ Brokers: brokers, GroupID: defaultConsumerGroupID, Topic: defaultTopic, Logger: kafka.LoggerFunc(logger.Debugf), } cs := &Consumer{ reader: nil, config: &cfg, } for _, o := range options { o(cs) } if cs.end == nil { return nil, errors.Wrap( ErrCreatingConsumer, "missing endpoint", ) } if cs.dec == nil { return nil, errors.Wrap( ErrCreatingConsumer, "missing decoder", ) } if cs.errFn == nil { cs.errFn = defaultErrorFunc } if cs.errHandler == nil { cs.errHandler = transport.NewLogErrorHandler(logger) } return cs, nil }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func NewConsumer(addrs, zookeepers []string, group, topic string, config *Config) (*Consumer, error) {\n\tif config == nil {\n\t\tconfig = new(Config)\n\t}\n\n\tclient, err := sarama.NewClient(addrs, config.Config)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tc, err := NewConsumerFromClient(client, zookeepers, group, topic, config)\n\tif err != nil {\n\t\tclient.Close()\n\t\treturn nil, err\n\t}\n\tc.ownClient = true\n\treturn c, nil\n}", "func newKafkaConsumer() sarama.Consumer {\n\n\tkafkaBroker := os.Getenv(\"KAFKA_BROKER\")\n\n\tif runtime.GOOS == \"darwin\" {\n\t\tbrokers = []string{\"localhost:9092\"}\n\t} else {\n\t\tif kafkaBroker == \"\" {\n\t\t\tfmt.Printf(\"$KAFKA_BROKER must be set\")\n\t\t\tos.Exit(-1)\n\t\t}\n\t\tbrokers = []string{kafkaBroker}\n\t}\n\n\tconsumer, err := sarama.NewConsumer(brokers, newKafkaConfiguration())\n\n\tfmt.Print(\"Creating new Kafka Consumer \\n\")\n\n\tif err != nil {\n\t\tfmt.Printf(\"Kafka error: %s\\n\", err)\n\t\tos.Exit(-1)\n\t}\n\n\treturn consumer\n}", "func New_kafka_consumer(brokers []string) (sarama.Consumer, error) {\n\tif len(brokers) == 0 {\n\t\terr := errors.New(\"Invalid broker information provided\")\n\t\tlog.Error(err)\n\t\treturn nil, err\n\t}\n\tlog.Infof(\"RequestProcessor: new_kafka_consumer: start\")\n\tconfig := sarama.NewConfig()\n\tlog.Infof(\"Consumer Config: %v\\n\", config)\n\tconsumer, err := sarama.NewConsumer(brokers, config)\n\tif err != nil {\n\t\tlog.Errorf(\"Failed to start Sarama consumer: %s\", err)\n\t\treturn nil, err\n\t}\n\tlog.Infof(\"RequestProcessor: new_kafka_consumer: end\")\n\treturn consumer, nil\n}", "func NewConsumer(topics []string, valueFactory ValueFactory, opts ...ConsumerOption) (*Consumer, error) {\n\tc := &Consumer{\n\t\tvalueFactory: valueFactory,\n\t\tavroAPI: avro.DefaultConfig,\n\t\tensureTopics: true,\n\t}\n\t// Loop through each option\n\tfor _, opt := range opts {\n\t\t// apply option\n\t\topt.applyC(c)\n\t}\n\n\tvar err error\n\n\t// if consumer not provided - make one\n\tif c.KafkaConsumer == nil {\n\t\t// if kafka config not provided - build default one\n\t\tif c.kafkaCfg == nil {\n\t\t\tvar envCfg struct {\n\t\t\t\tBroker string `env:\"KAFKA_BROKER\" envDefault:\"localhost:9092\"`\n\t\t\t\tCAFile string `env:\"KAFKA_CA_FILE\"`\n\t\t\t\tKeyFile string `env:\"KAFKA_KEY_FILE\"`\n\t\t\t\tCertificateFile string `env:\"KAFKA_CERTIFICATE_FILE\"`\n\t\t\t\tGroupID string `env:\"KAFKA_GROUP_ID\"`\n\t\t\t}\n\t\t\tif err := env.Parse(&envCfg); err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\n\t\t\t// default configuration\n\t\t\tc.kafkaCfg = &kafka.ConfigMap{\n\t\t\t\t\"bootstrap.servers\": envCfg.Broker,\n\t\t\t\t\"socket.keepalive.enable\": true,\n\t\t\t\t\"enable.auto.commit\": false,\n\t\t\t\t\"enable.partition.eof\": true,\n\t\t\t\t\"session.timeout.ms\": 6000,\n\t\t\t\t\"auto.offset.reset\": \"earliest\",\n\t\t\t\t\"group.id\": envCfg.GroupID,\n\t\t\t}\n\n\t\t\tif envCfg.CAFile != \"\" {\n\t\t\t\t// configure SSL\n\t\t\t\tc.kafkaCfg.SetKey(\"security.protocol\", \"ssl\")\n\t\t\t\tc.kafkaCfg.SetKey(\"ssl.ca.location\", envCfg.CAFile)\n\t\t\t\tc.kafkaCfg.SetKey(\"ssl.key.location\", envCfg.KeyFile)\n\t\t\t\tc.kafkaCfg.SetKey(\"ssl.certificate.location\", envCfg.CertificateFile)\n\t\t\t}\n\t\t}\n\n\t\tif c.KafkaConsumer, err = kafka.NewConsumer(c.kafkaCfg); err != nil {\n\t\t\treturn nil, errors.WithMessage(err, \"cannot initialize kafka consumer\")\n\t\t}\n\t}\n\n\tif c.srClient == nil {\n\t\tif c.srURL == nil {\n\t\t\tvar envCfg struct {\n\t\t\t\tSchemaRegistry *url.URL `env:\"KAFKA_SCHEMA_REGISTRY\" envDefault:\"http://localhost:8081\"`\n\t\t\t}\n\t\t\tif err := env.Parse(&envCfg); err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\tc.srURL = envCfg.SchemaRegistry\n\t\t}\n\n\t\tif c.srClient, err = NewCachedSchemaRegistryClient(c.srURL.String()); err != nil {\n\t\t\treturn nil, errors.WithMessage(err, \"cannot initialize schema registry client\")\n\t\t}\n\t}\n\n\tif c.eventHandler == nil {\n\t\tc.eventHandler = func(event kafka.Event) {\n\t\t\tlog.Println(event)\n\t\t}\n\t}\n\n\tif topics != nil {\n\t\tif err := c.KafkaConsumer.SubscribeTopics(topics, nil); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tif c.ensureTopics {\n\t\t\tif err = c.EnsureTopics(topics); err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t}\n\t}\n\n\treturn c, nil\n}", "func NewConsumerClient(brokers, group, topics string, oldest, verbose bool) *consumerClient {\n\tc := &consumerClient{\n\t\tbrokers: brokers,\n\t\tgroup: group,\n\t\ttopics: topics,\n\t\toldest: oldest,\n\t\tverbose: verbose,\n\t\tversion: \"0.10.2.0\", //连云端ckafka版本必须是这个,没事别乱改\n\t}\n\treturn c\n}", "func NewConsumer(ctx context.Context) (*Consumer, error) {\n\t// TODO support filter in downstream sink\n\ttz, err := util.GetTimezone(timezone)\n\tif err != nil {\n\t\treturn nil, errors.Annotate(err, \"can not load timezone\")\n\t}\n\tctx = util.PutTimezoneInCtx(ctx, tz)\n\tfilter, err := cdcfilter.NewFilter(config.GetDefaultReplicaConfig())\n\tif err != nil {\n\t\treturn nil, errors.Trace(err)\n\t}\n\tc := new(Consumer)\n\tc.fakeTableIDGenerator = &fakeTableIDGenerator{\n\t\ttableIDs: make(map[string]int64),\n\t}\n\tc.sinks = make([]*struct {\n\t\tsink.Sink\n\t\tresolvedTs uint64\n\t}, kafkaPartitionNum)\n\tctx, cancel := context.WithCancel(ctx)\n\terrCh := make(chan error, 1)\n\topts := map[string]string{}\n\tfor i := 0; i < int(kafkaPartitionNum); i++ {\n\t\ts, err := sink.NewSink(ctx, \"kafka-consumer\", downstreamURIStr, filter, config.GetDefaultReplicaConfig(), opts, errCh)\n\t\tif err != nil {\n\t\t\tcancel()\n\t\t\treturn nil, errors.Trace(err)\n\t\t}\n\t\tc.sinks[i] = &struct {\n\t\t\tsink.Sink\n\t\t\tresolvedTs uint64\n\t\t}{Sink: s}\n\t}\n\tsink, err := sink.NewSink(ctx, \"kafka-consumer\", downstreamURIStr, filter, config.GetDefaultReplicaConfig(), opts, errCh)\n\tif err != nil {\n\t\tcancel()\n\t\treturn nil, errors.Trace(err)\n\t}\n\tgo func() {\n\t\terr := <-errCh\n\t\tif errors.Cause(err) != context.Canceled {\n\t\t\tlog.Error(\"error on running consumer\", zap.Error(err))\n\t\t} else {\n\t\t\tlog.Info(\"consumer exited\")\n\t\t}\n\t\tcancel()\n\t}()\n\tc.ddlSink = sink\n\tc.ready = make(chan bool)\n\treturn c, nil\n}", "func NewConsumer(cfg *ConsumerConfig, handler MessageHanlder) (*Consumer, error) {\n\tclusterConfig := cluster.NewConfig()\n\tclusterConfig.Metadata.RefreshFrequency = 1 * time.Minute\n\tclusterConfig.Group.Mode = cluster.ConsumerModePartitions\n\tclusterConfig.Group.Return.Notifications = true\n\tclusterConfig.Consumer.Offsets.Initial = sarama.OffsetNewest\n\tclusterConfig.Consumer.Return.Errors = true\n\tclientName := generateClientID(cfg.GroupID)\n\tclusterConfig.ClientID = clientName\n\n\tc, err := cluster.NewConsumer(cfg.Brokers, cfg.GroupID, cfg.Topic, clusterConfig)\n\tif err != nil {\n\t\tlog.Printf(\"Kafka Consumer: [%s] init fail, %v\", clientName, err)\n\t\treturn nil, err\n\t}\n\n\tvalidConfigValue(cfg)\n\tconsumer := &Consumer{\n\t\tclientName: clientName,\n\t\tcfg: cfg,\n\t\tconsumer: c,\n\t\tmsgHanlder: handler,\n\t\tpartitionWorkers: make([]*partitionConsumerWorker, 0),\n\t}\n\tlog.Printf(\"Kafka Consumer: [%s] init success\", clientName)\n\n\treturn consumer, nil\n}", "func New(c *kafka.ConfigMap, topic string, clientID string) (*Consumer, error) {\n\tif err := c.SetKey(\"group.id\", clientID); err != nil {\n\t\treturn nil, err\n\t}\n\tconsumer, err := kafka.NewConsumer(c)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn &Consumer{topic, consumer}, nil\n}", "func New(bootstrapServers, topic, consumerGroup, offsetResetType string) (c *Consumer) {\n\n\tkc, err := kafka.NewConsumer(&kafka.ConfigMap{\n\t\t\"bootstrap.servers\": bootstrapServers,\n\t\t\"group.id\": consumerGroup,\n\t\t\"auto.offset.reset\": offsetResetType,\n\t})\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\tkc.SubscribeTopics([]string{topic, \"^aRegex.*[Tt]opic\"}, nil)\n\n\tc = &Consumer{kc: kc}\n\n\treturn c\n}", "func NewConsumer(newsService services.NewsService, elasticService services.ElasticService, consumer sarama.Consumer) *KafkaConsumer {\n\treturn &KafkaConsumer{\n\t\tNewsService: newsService,\n\t\tElasticSearch: elasticService,\n\t\tConsumer: consumer,\n\t}\n}", "func (m *ManagedConsumer) newConsumer(ctx context.Context) (*Consumer, error) {\n\tmc, err := m.clientPool.ForTopic(ctx, m.cfg.ManagedClientConfig, m.cfg.Topic)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tclient, err := mc.Get(ctx)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// Create the topic consumer. A non-blank consumer name is required.\n\tif m.cfg.Exclusive {\n\t\treturn client.NewExclusiveConsumer(ctx, m.cfg.Topic, m.cfg.Name, m.cfg.Earliest, m.queue)\n\t}\n\treturn client.NewSharedConsumer(ctx, m.cfg.Topic, m.cfg.Name, m.queue)\n}", "func newConsumer(s cmdSender, dispatcher *frameDispatcher, topic string, reqID *monotonicID, consumerID uint64, queue chan Message) *Consumer {\n\treturn &Consumer{\n\t\ts: s,\n\t\ttopic: topic,\n\t\tconsumerID: consumerID,\n\t\treqID: reqID,\n\t\tdispatcher: dispatcher,\n\t\tqueue: queue,\n\t\tclosedc: make(chan struct{}),\n\t\tendOfTopicc: make(chan struct{}),\n\t}\n}", "func (c *Client) NewConsumer(ctx context.Context, config ConsumerConfig) (Consumer, error) {\n\t// TODO check connected state\n\n\tif config.TopicPattern != \"\" {\n\t\tif config.TopicPatternDiscoveryInterval <= 0 {\n\t\t\tconfig.TopicPatternDiscoveryInterval = 30000\n\t\t}\n\n\t\tb := c.newBrokerConnection()\n\t\tmulti, err := newMultiTopicConsumer(c, b, config)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tgo c.nameSpaceTopicLookup(multi, config)\n\t\treturn multi, nil\n\t}\n\n\tcons, err := c.createNewConsumer(config)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tc.topicLookup(cons.topic, cons.topicLookupFinished)\n\n\tselect {\n\tcase <-ctx.Done():\n\t\treturn nil, ctx.Err()\n\tcase err := <-cons.connected:\n\t\treturn cons, err\n\t}\n}", "func NewConsumer() (*cluster.Consumer, error) {\n\n\tconfig := cluster.NewConfig()\n\tconfig.Consumer.Return.Errors = true\n\tconfig.Group.Return.Notifications = true\n\tconfig.Config.Net.TLS.Enable = true\n\tconfig.Config.Net.SASL.Enable = true\n\tconfig.Config.Net.SASL.User = viper.GetString(\"kafka.user\")\n\tconfig.Config.Net.SASL.Password = viper.GetString(\"kafka.password\")\n\tconfig.ClientID = \"poke.ssl-checker\"\n\tconfig.Consumer.Offsets.Initial = sarama.OffsetOldest\n\tconfig.Consumer.Offsets.CommitInterval = 10 * time.Second\n\n\tconsumerGroup := config.Config.Net.SASL.User + \".\" + viper.GetString(\"host\")\n\tbrokers := viper.GetStringSlice(\"kafka.brokers\")\n\ttopics := viper.GetStringSlice(\"kafka.topics\")\n\n\tconsumer, err := cluster.NewConsumer(brokers, consumerGroup, topics, config)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn consumer, nil\n}", "func NewConsumer(log *zap.Logger, tlsCfg *TLSConfig, lookupds ...string) (*Consumer, error) {\n\tcfg := CreateNSQConfig(tlsCfg)\n\tcfg.LookupdPollInterval = time.Second * 5\n\tcfg.HeartbeatInterval = time.Second * 5\n\tcfg.DefaultRequeueDelay = time.Second * 5\n\tcfg.MaxInFlight = 10\n\n\treturn &Consumer{\n\t\tconfig: cfg,\n\t\tlookupds: lookupds,\n\t\tlog: log,\n\t\tlogLevel: nsq.LogLevelInfo,\n\t}, nil\n}", "func NewConsumer(addrs []string, config *sarama.Config, sensor instana.TracerLogger) (sarama.Consumer, error) {\n\tc, err := sarama.NewConsumer(addrs, config)\n\tif err != nil {\n\t\treturn c, err\n\t}\n\n\treturn WrapConsumer(c, sensor), nil\n}", "func New(brokerCfg broker.Configuration, storage storage.Storage) (*KafkaConsumer, error) {\n\treturn NewWithSaramaConfig(brokerCfg, storage, DefaultSaramaConfig)\n}", "func NewConsumer(\n\tlogger *zap.Logger,\n\tserviceName string,\n\tamqpDSN string,\n\tconcurrentProcessingLimit int,\n\thandler func(*Event) error,\n) (*Consumer, error) {\n\tprocessor := &Consumer{\n\t\tlogger: logger,\n\t\tserviceName: serviceName,\n\t\tamqpDSN: amqpDSN,\n\t\tconcurrentProcessingLimit: concurrentProcessingLimit,\n\t\thandler: handler,\n\t}\n\n\terr := processor.init()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn processor, nil\n}", "func NewConsumer(cfg *ConsumerConfig) *Consumer {\n\tcfg.init()\n\treturn &Consumer{\n\t\tcfg: cfg,\n\t\tshutdown: make(chan struct{}),\n\t\tLmstfyClient: NewLmstfyClient(cfg.Host, cfg.Port, cfg.Namespace, cfg.Token),\n\t}\n}", "func (this *Client) NewConsumer(queue string, prefetch int) *Consumer {\n\tchannel := newChannel(this.getConsumerConnection())\n\tgo channel.connect()\n\tchannel.awaitConnection()\n\n\treturn &Consumer{\n\t\tchannel: channel,\n\t\tqueue: queue,\n\t\tprefetch: prefetch,\n\t\twg: &sync.WaitGroup{},\n\t\ttoClose: make(chan struct{}),\n\t\topen: true,\n\t}\n}", "func NewConsumer(connector *connection.Connector, onChan OnChannel) *Consumer {\n\tfuncCh := make(chan OnChannel)\n\n\treturn &Consumer{\n\t\tconnector: connector,\n\t\tonChan: onChan,\n\t\tfuncCh: funcCh,\n\t}\n}", "func New(user1 IUserSvc, k *kafka.Kafka, subscriptions []string) Consumer {\n\tconsumer := Consumer{\n\t\tReady: make(chan bool),\n\t\tuserSvc: user1,\n\t}\n\t// Consumer\n\tgo func() {\n\t\tclient := k.GetConsumerGroup()\n\t\tctx := context.Background()\n\n\t\tfor {\n\t\t\tif err := client.Consume(ctx, subscriptions, &consumer); err != nil {\n\t\t\t\tlog.Panicf(\"Error from consumer: %v\", err)\n\t\t\t}\n\t\t\t// check if context was cancelled, signaling that the consumer should stop\n\t\t\tif ctx.Err() != nil {\n\t\t\t\treturn\n\t\t\t}\n\t\t\tconsumer.Ready = make(chan bool)\n\t\t}\n\t}()\n\n\t<-consumer.Ready\n\treturn consumer\n}", "func NewConsumer(topic string, clientID string, pollTimeMs int, handler MessageHandler) (*Consumer, error) {\n\tif handler == nil {\n\t\treturn nil, fmt.Errorf(\"consumer message handler missing\")\n\t}\n\n\tconsumerImpl, err := fwFactory.NewConsumer(topic, clientID, handler)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &Consumer{\n\t\tTopic: topic,\n\t\tConsumer: consumerImpl,\n\t\tShutdown: false,\n\t\tPollTimeMs: pollTimeMs,\n\t}, nil\n}", "func New(client *redis.Client, group, consumer string, options ...Option) *Consumer {\n\tcfg := &config{\n\t\tgroup: group,\n\t\tconsumer: consumer,\n\t}\n\tfor _, opt := range options {\n\t\topt(cfg)\n\t}\n\tlastIDs := make(map[string]string)\n\tfor _, stream := range cfg.streams {\n\t\tlastIDs[stream] = \"0-0\"\n\t}\n\n\treturn &Consumer{\n\t\tclient: client,\n\t\tcfg: cfg,\n\t\tlastIDs: lastIDs,\n\t}\n}", "func NewConsumer(c *aws.Config, stream string, shard string, optionFns ...ConsumerOptionsFn) (*Consumer, error) {\n\tconsumer := &Consumer{consumerOptions: defaultConsumerOptions()}\n\tfor _, optionFn := range optionFns {\n\t\toptionFn(consumer)\n\t}\n\n\tif consumer.reader == nil {\n\t\tr, err := NewKinesisReader(c, stream, shard)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tconsumer.reader = r\n\t}\n\n\tconsumer.LogHelper = &LogHelper{\n\t\tLogLevel: consumer.logLevel,\n\t\tLogger: c.Logger,\n\t}\n\n\treturn consumer, nil\n}", "func NewConsumer(ring *Buffer) *Consumer {\n\treturn &Consumer{ring, NewCache(len(ring.items)), 0}\n}", "func CreateConsumer(topic string) *kafka.Consumer {\n\t//create at least consumer\n\t//enable.auto.commit is true and sync processing and auto.commit.interval.ms = 10000 -> atleast once/atmost once\n\t//enable.auto.commit is true and async processing and auto.commit.interval.ms = 0 -> atmost once\n\t//enable.auto.commit is false -> atleast once\n\n\tconsumer, err := kafka.NewConsumer(&kafka.ConfigMap{\n\t\t\"bootstrap.servers\": \"localhost\",\n\t\t\"group.id\": \"article_scraper_processor\",\n\t\t\"auto.offset.reset\": \"earliest\",\n\t\t\"enable.auto.commit\": \"false\",\n\t})\n\n\t//raise the panic in case of error\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\tconsumer.SubscribeTopics([]string{topic}, nil)\n\n\t/*//poll to the topic and consume the message\n\tmsg, err := consumer.ReadMessage(-1)\n\t*/\n\treturn consumer\n}", "func NewConsumer(period time.Duration, tailer tailer.TailerT, exporter exporter.ExporterT) *Consumer {\n\treturn &Consumer{\n\t\tPeriod: period,\n\t\ttailer: tailer,\n\t\texporter: exporter,\n\t\tstop: make(chan bool, 1),\n\t}\n}", "func Consumer(config *kafka.ConfigMap, topic string, partition int32, group string) *kafka.Consumer {\n\n\t// return cached consumer, if any\n\tif consumer, ok := consumers[group]; ok {\n\t\tlog.Printf(\"retrieved consumer %s\", group)\n\t\treturn consumer\n\t}\n\n\t// not found in cache,\n\t// create a consumer and return it\n\tconfig.SetKey(\"group.id\", group)\n\tlog.Printf(\"config %v\", config)\n\tconsumer, err := kafka.NewConsumer(config)\n\tif err != nil {\n\t\tlog.Println(err)\n\t\treturn nil\n\t}\n\n\t// assign to a specific topic and partition\n\tassignment := []kafka.TopicPartition{{\n\t\tTopic: &topic,\n\t\tPartition: partition}}\n\tconsumer.Assign(assignment)\n\tconsumers[group] = consumer\n\n\t// cache values and subscribe\n\tlog.Printf(\"created consumer %s for %s:%d \", group, topic, partition)\n\treturn consumer\n}", "func (b *Broker) Consumer(conf ConsumerConf) (Consumer, error) {\n\treturn b.consumer(conf)\n}", "func NewConsumer(cfg Config) *Consumer {\n\tconsumer := &Consumer{\n\t\tconfig: cfg,\n\t\tpubDeliveryChannel: make(chan amqp.Delivery, 100), // TODO: buffer size to config\n\t\treconnectMutex: make(chan struct{}, 1),\n\t\tfatalChannel: make(chan struct{}, 1),\n\t\tinvalidationChannel: make(chan struct{}, 1),\n\t\twg: sync.WaitGroup{},\n\t}\n\treturn consumer\n}", "func NewConsumer(log logrus.FieldLogger, conf Config, opts ...ConfigOpt) (Consumer, error) {\n\t// See Reference at https://github.com/edenhill/librdkafka/blob/master/CONFIGURATION.md\n\tkafkaConf := conf.baseKafkaConfig()\n\t_ = kafkaConf.SetKey(\"enable.auto.offset.store\", false) // manually StoreOffset after processing a message. Otherwise races may happen.)\n\n\t// In case we try to assign an offset out of range (greater than log-end-offset), consumer will use start consuming from offset zero.\n\t_ = kafkaConf.SetKey(\"auto.offset.reset\", \"earliest\")\n\n\tconf.Consumer.Apply(kafkaConf)\n\tfor _, opt := range opts {\n\t\topt(kafkaConf)\n\t}\n\n\tif err := conf.configureAuth(kafkaConf); err != nil {\n\t\treturn nil, errors.Wrap(err, \"error configuring auth for the Kafka consumer\")\n\t}\n\n\tconsumer, err := kafkalib.NewConsumer(kafkaConf)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif conf.RequestTimeout == 0 {\n\t\tconf.RequestTimeout = DefaultTimeout\n\t}\n\n\tcc := &ConfluentConsumer{\n\t\tc: consumer,\n\t\tconf: conf,\n\t\tlog: log,\n\t}\n\n\tlogFields := logrus.Fields{\"kafka_topic\": cc.conf.Topic}\n\n\tif cc.conf.Consumer.Partition != nil || cc.conf.Consumer.PartitionKey != \"\" {\n\t\t// set the default partitioner algorithm\n\t\tif cc.conf.Consumer.PartitionerAlgorithm == \"\" {\n\t\t\tcc.conf.Consumer.PartitionerAlgorithm = PartitionerMurMur2\n\t\t}\n\t\t// Set the partition if a key is set to determine the partition\n\t\tif cc.conf.Consumer.PartitionKey != \"\" && cc.conf.Consumer.PartitionerAlgorithm != \"\" {\n\t\t\tcc.AssignPartitionByKey(cc.conf.Consumer.PartitionKey, cc.conf.Consumer.PartitionerAlgorithm)\n\t\t}\n\n\t\tlogFields[\"kafka_partition_key\"] = cc.conf.Consumer.PartitionKey\n\t\tlogFields[\"kafka_partition\"] = *cc.conf.Consumer.Partition\n\t}\n\n\tcc.setupRebalanceHandler()\n\tcc.log.WithFields(logFields).Debug(\"Subscribing to Kafka topic\")\n\tif serr := cc.c.Subscribe(cc.conf.Topic, cc.rebalanceHandler); serr != nil {\n\t\terr = errors.Wrap(serr, \"error subscribing to topic\")\n\t}\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn cc, nil\n}", "func NewKafkaBroker(peers []string) *Kafka {\n\tbrk := new(Kafka)\n\tbrk.Servers = peers\n\tbrk.Initialize()\n\treturn brk\n}", "func NewConsumerFromClient(client sarama.Client, zookeepers []string, group, topic string, config *Config) (*Consumer, error) {\n\tif config == nil {\n\t\tconfig = new(Config)\n\t}\n\tconfig.normalize()\n\n\t// Validate configuration\n\tif err := config.Validate(); err != nil {\n\t\treturn nil, err\n\t} else if topic == \"\" {\n\t\treturn nil, sarama.ConfigurationError(\"Empty topic\")\n\t} else if group == \"\" {\n\t\treturn nil, sarama.ConfigurationError(\"Empty group\")\n\t}\n\n\t// Generate unique consumer ID\n\tid := config.customID\n\tif id == \"\" {\n\t\tprefix := config.IDPrefix\n\t\tif prefix == \"\" {\n\t\t\tprefix = group\n\t\t}\n\t\tid = newGUID(prefix)\n\t}\n\n\t// Create sarama consumer instance\n\tscsmr, err := sarama.NewConsumerFromClient(client)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// Connect to zookeeper\n\tzoo, err := NewZK(zookeepers, config.ZKSessionTimeout)\n\tif err != nil {\n\t\tscsmr.Close()\n\t\treturn nil, err\n\t}\n\n\t// Initialize consumer\n\tconsumer := &Consumer{\n\t\tid: id,\n\t\tgroup: group,\n\t\ttopic: topic,\n\n\t\tzoo: zoo,\n\t\tconfig: config,\n\t\tclient: client,\n\t\tconsumer: scsmr,\n\n\t\tread: make(map[int32]int64),\n\t\tacked: make(map[int32]int64),\n\t\tpartIDs: make([]int32, 0),\n\n\t\tmessages: make(chan *sarama.ConsumerMessage),\n\t\terrors: make(chan *sarama.ConsumerError),\n\t}\n\n\t// Register consumer group and consumer itself\n\tif err := consumer.register(); err != nil {\n\t\tconsumer.closeAll()\n\t\treturn nil, err\n\t}\n\n\tconsumer.closer.Go(consumer.signalLoop)\n\tif config.CommitEvery > 0 {\n\t\tconsumer.closer.Go(consumer.commitLoop)\n\t}\n\treturn consumer, nil\n}", "func New(config *Config) (*KafkaClient, error) {\n\tif config == nil {\n\t\treturn nil, ErrNoClientConfig\n\t}\n\n\tif err := config.Validate(); err != nil {\n\t\treturn nil, err\n\t}\n\n\tbootstrapConnections := make([]*BrokerConnection, len(config.BrokerList))\n\tfor i := 0; i < len(config.BrokerList); i++ {\n\t\tbroker := config.BrokerList[i]\n\t\thostPort := strings.Split(broker, \":\")\n\t\tif len(hostPort) != 2 {\n\t\t\treturn nil, fmt.Errorf(\"incorrect broker connection string: %s\", broker)\n\t\t}\n\n\t\tport, err := strconv.Atoi(hostPort[1])\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"incorrect port in broker connection string: %s\", broker)\n\t\t}\n\n\t\tbootstrapConnections[i] = NewBrokerConnection(&Broker{\n\t\t\tID: -1,\n\t\t\tHost: hostPort[0],\n\t\t\tPort: int32(port),\n\t\t}, config.KeepAliveTimeout)\n\t}\n\n\tkafkaClient := &KafkaClient{\n\t\tconfig: *config,\n\t\tbootstrapBrokers: bootstrapConnections,\n\t}\n\tkafkaClient.metadata = NewMetadata(kafkaClient, NewBrokers(config.KeepAliveTimeout), config.MetadataTTL)\n\n\treturn kafkaClient, nil\n}", "func NewConsumer(stream string, sess *session.Session, opts ...Option) (*Consumer, error) {\n\tif stream == \"\" {\n\t\treturn nil, fmt.Errorf(\"must provide stream name\")\n\t}\n\n\tc := &Consumer{\n\t\tstreamName: stream,\n\t\tcheckpoint: &noopCheckpoint{},\n\t\tcounter: &noopCounter{},\n\t\tlogger: log.New(os.Stderr, \"kinesis-consumer: \", log.LstdFlags),\n\t}\n\n\t// set options\n\tfor _, opt := range opts {\n\t\tif err := opt(c); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\t// provide a default kinesis client\n\tif c.client == nil {\n\t\tc.client = kinesis.New(sess)\n\t}\n\n\treturn c, nil\n}", "func initConsumer() sarama.Consumer {\n\tsarama.Logger = log.New(os.Stdout, \"\", log.Ltime)\n\n\tconfig := sarama.NewConfig()\n\tconfig.ClientID = CLIENTID\n\tconfig.Consumer.Return.Errors = true\n\n\tbrokers := []string{BROKERS}\n\n\tmaster, err := sarama.NewConsumer(brokers, config)\n\tif err != nil {\n\t\tfmt.Println(\"error create master consumer: \")\n\t\tpanic(err)\n\t}\n\n\treturn master\n}", "func (e *ExternalServiceList) GetConsumer(ctx context.Context, cfg *config.Config) (kafkaConsumer *kafka.ConsumerGroup, err error) {\n\tcConfig := &kafka.ConsumerGroupConfig{KafkaVersion: &cfg.KafkaVersion}\n\tif cfg.KafkaSecProtocol == \"TLS\" {\n\t\tcConfig.SecurityConfig = kafka.GetSecurityConfig(\n\t\t\tcfg.KafkaSecCACerts,\n\t\t\tcfg.KafkaSecClientCert,\n\t\t\tcfg.KafkaSecClientKey,\n\t\t\tcfg.KafkaSecSkipVerify,\n\t\t)\n\t}\n\tkafkaConsumer, err = kafka.NewConsumerGroup(\n\t\tctx,\n\t\tcfg.KafkaAddr,\n\t\tcfg.FilterConsumerTopic,\n\t\tcfg.FilterConsumerGroup,\n\t\tkafka.CreateConsumerGroupChannels(cfg.KafkaConsumerWorkers),\n\t\tcConfig,\n\t)\n\tif err != nil {\n\t\treturn\n\t}\n\n\te.Consumer = true\n\n\treturn\n}", "func NewBroker(brokerArgs *args.BrokerHost) (*Broker, error) {\n\taddress := fmt.Sprintf(\"%s:%d\", brokerArgs.Host, brokerArgs.KafkaPort)\n\tprotocol := brokerArgs.KafkaProtocol\n\n\tconfig := sarama.NewConfig()\n\tconfig.Version = args.GlobalArgs.KafkaVersion\n\tconfig.ClientID = \"nri-kafka\"\n\n\tswitch protocol {\n\tcase \"PLAINTEXT\":\n\t\tsaramaBroker := sarama.NewBroker(address)\n\t\terr := saramaBroker.Open(config)\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"failed opening connection: %s\", err)\n\t\t}\n\t\tconnected, err := saramaBroker.Connected()\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"failed checking if connection opened successfully: %s\", err)\n\t\t}\n\t\tif !connected {\n\t\t\treturn nil, errors.New(\"broker is not connected\")\n\t\t}\n\n\t\t// TODO figure out how to get the ID from the broker. ID() returns -1\n\t\tnewBroker := &Broker{\n\t\t\tSaramaBroker: saramaBroker,\n\t\t\tHost: brokerArgs.Host,\n\t\t\tJMXPort: brokerArgs.JMXPort,\n\t\t\tJMXUser: brokerArgs.JMXUser,\n\t\t\tJMXPassword: brokerArgs.JMXPassword,\n\t\t\tID: fmt.Sprintf(\"%d\", saramaBroker.ID()),\n\t\t\tConfig: config,\n\t\t}\n\t\treturn newBroker, nil\n\tcase \"SSL\":\n\t\tsaramaBroker := sarama.NewBroker(address)\n\n\t\terr := configureTLS(config)\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"build TLS config: %v\", err)\n\t\t}\n\n\t\terr = saramaBroker.Open(config)\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"failed opening connection: %s\", err)\n\t\t}\n\t\tconnected, err := saramaBroker.Connected()\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"failed checking if connection opened successfully: %s\", err)\n\t\t}\n\t\tif !connected {\n\t\t\treturn nil, errors.New(\"broker is not connected\")\n\t\t}\n\t\tnewBroker := &Broker{\n\t\t\tSaramaBroker: saramaBroker,\n\t\t\tHost: brokerArgs.Host,\n\t\t\tJMXPort: brokerArgs.JMXPort,\n\t\t\tJMXUser: brokerArgs.JMXUser,\n\t\t\tJMXPassword: brokerArgs.JMXPassword,\n\t\t\tID: fmt.Sprintf(\"%d\", saramaBroker.ID()),\n\t\t\tConfig: config,\n\t\t}\n\t\treturn newBroker, nil\n\tcase \"SASL_PLAINTEXT\", \"SASL_SSL\":\n\t\tvar err error\n\t\tsaramaBroker := sarama.NewBroker(address)\n\n\t\terr = configureSASL(config)\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"create %s config: %v\", protocol, err)\n\t\t}\n\n\t\tif protocol == \"SASL_SSL\" {\n\t\t\terr = configureTLS(config)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, fmt.Errorf(\"build TLS config: %v\", err)\n\t\t\t}\n\t\t}\n\n\t\terr = saramaBroker.Open(config)\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"failed opening connection: %s\", err)\n\t\t}\n\t\tconnected, err := saramaBroker.Connected()\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"failed checking if connection opened successfully: %s\", err)\n\t\t}\n\t\tif !connected {\n\t\t\treturn nil, errors.New(\"broker is not connected\")\n\t\t}\n\n\t\t// TODO figure out how to get the ID from the broker. ID() returns -1\n\t\tnewBroker := &Broker{\n\t\t\tSaramaBroker: saramaBroker,\n\t\t\tHost: brokerArgs.Host,\n\t\t\tJMXPort: brokerArgs.JMXPort,\n\t\t\tJMXUser: brokerArgs.JMXUser,\n\t\t\tJMXPassword: brokerArgs.JMXPassword,\n\t\t\tID: fmt.Sprintf(\"%d\", saramaBroker.ID()),\n\t\t\tConfig: config,\n\t\t}\n\t\treturn newBroker, nil\n\tdefault:\n\t\treturn nil, fmt.Errorf(\"skipping %s://%s:%d because it uses unknown protocol '%s'\", brokerArgs.KafkaProtocol, brokerArgs.Host, brokerArgs.KafkaPort, brokerArgs.KafkaProtocol)\n\t}\n\n}", "func InitConsumer(broker, group string) {\n\tvar err error\n\tutils.ConsumerObject, err = kafka.NewConsumer(&kafka.ConfigMap{\n\t\t\"bootstrap.servers\": broker,\n\t\t\"group.id\": group,\n\t\t\"session.timeout.ms\": 6000,\n\t\t\"go.events.channel.enable\": true,\n\t\t\"go.application.rebalance.enable\": true,\n\t\t// Enable generation of PartitionEOF when the\n\t\t// end of a partition is reached.\n\t\t\"enable.partition.eof\": true,\n\t\t\"auto.offset.reset\": \"earliest\"})\n\n\tif err != nil {\n\t\tfmt.Fprintf(os.Stderr, \"Failed to create consumer: %s\\n\", err)\n\t\tos.Exit(1)\n\t}\n}", "func NewConsumer(\n\tqueueLogger *log.Logger,\n\tch *amqp.Channel,\n\tuserDBManager *user.DBManager,\n\tfriendPublisher *friend.Publisher,\n\tuserRedisManager *user.RedisManager,\n) *Consumer {\n\tqueue, err := ch.QueueDeclare(\n\t\t\"auth-long-token\", // name\n\t\ttrue, // durable\n\t\tfalse, // delete when unused\n\t\tfalse, // exclusive\n\t\tfalse, // no-wait\n\t\tnil, // arguments\n\t)\n\tutils.FailOnError(err, \"Failed to declare a queue\")\n\treturn &Consumer{\n\t\tlogger: queueLogger,\n\t\tchannel: ch,\n\t\tqueue: queue,\n\t\tuserDB: userDBManager,\n\t\tfriendPublisher: friendPublisher,\n\t\tuserRedis: userRedisManager,\n\t}\n}", "func NewConsumerApi(brokers []string, groupName, consumerOffset string) *Api {\n\tvar consumerGroup string\n\tif len(groupName) == 0 {\n\t\tconsumerGroup = consumerGroupName\n\t} else {\n\t\tconsumerGroup = groupName\n\t}\n\n\tvar offset int64\n\tswitch consumerOffset {\n\tcase \"earliest\":\n\t\toffset = sarama.OffsetOldest\n\tcase \"latest\":\n\t\toffset = sarama.OffsetNewest\n\tdefault:\n\t\toffset = sarama.OffsetNewest\n\t}\n\n\tconfig := newConfig()\n\t// 指定消费者组的消费策略\n\tconfig.Consumer.Group.Rebalance.Strategy = sarama.BalanceStrategyRange\n\t// 指定消费组读取消息的offset[OffsetNewest,OffsetOldest]\n\t// config.Consumer.Offsets.Initial = sarama.OffsetNewest\n\tconfig.Consumer.Offsets.Initial = offset\n\t// 指定队列长度\n\tconfig.ChannelBufferSize = 2\n\n\tconsumerGroupApi, consumerGroupApiErr := sarama.NewConsumerGroup(brokers, consumerGroup, config)\n\tif consumerGroupApiErr != nil {\n\t\tfmt.Println(\"consumer group api connection failed\")\n\t\tpanic(consumerGroupApiErr)\n\t}\n\n\treturn &Api{ConsumerApi: consumerGroupApi}\n}", "func (q *Queue) Consumer(config ConsumerConfig) *Consumer {\n\treturn NewConsumer(q.ledger, q.table, config)\n}", "func NewDetachedConsumer(log logrus.FieldLogger, conf Config, opts ...ConfigOpt) (Consumer, error) {\n\t// See Reference at https://github.com/edenhill/librdkafka/blob/master/CONFIGURATION.md\n\tkafkaConf := conf.baseKafkaConfig()\n\t_ = kafkaConf.SetKey(\"enable.auto.offset.store\", false) // manually StoreOffset after processing a message. It is mandatory for detached consumers.\n\n\t// In case we try to assign an offset out of range (greater than log-end-offset), consumer will use start consuming from offset zero.\n\t_ = kafkaConf.SetKey(\"auto.offset.reset\", \"earliest\")\n\n\tconf.Consumer.GroupID = conf.Topic // Defaults to topic name. See NOTE above)\n\n\tconf.Consumer.Apply(kafkaConf)\n\tfor _, opt := range opts {\n\t\topt(kafkaConf)\n\t}\n\n\tif err := conf.configureAuth(kafkaConf); err != nil {\n\t\treturn nil, errors.Wrap(err, \"error configuring auth for the Kafka consumer\")\n\t}\n\n\tconsumer, err := kafkalib.NewConsumer(kafkaConf)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif conf.RequestTimeout == 0 {\n\t\tconf.RequestTimeout = DefaultTimeout\n\t}\n\n\tcc := &ConfluentConsumer{\n\t\tc: consumer,\n\t\tconf: conf,\n\t\tlog: log,\n\t}\n\n\tlogFields := logrus.Fields{\"kafka_topic\": cc.conf.Topic}\n\n\tif cc.conf.Consumer.Partition == nil && cc.conf.Consumer.PartitionKey == \"\" {\n\t\treturn nil, errors.New(\"Either a partition or a partition key is required for creating a detached consumer\")\n\t}\n\n\tlogFields[\"kafka_partition_key\"] = cc.conf.Consumer.PartitionKey\n\tlogFields[\"kafka_partition\"] = cc.conf.Consumer.Partition\n\n\tif cc.conf.Consumer.Partition != nil {\n\t\tcc.log.WithFields(logFields).Debug(\"Assigning specified partition\")\n\t\tpt := []kafkalib.TopicPartition{\n\t\t\t{\n\t\t\t\tTopic: &cc.conf.Topic,\n\t\t\t\tPartition: *cc.conf.Consumer.Partition,\n\t\t\t},\n\t\t}\n\t\treturn cc, cc.c.Assign(pt)\n\t}\n\n\tif cc.conf.Consumer.PartitionerAlgorithm == \"\" {\n\t\tcc.conf.Consumer.PartitionerAlgorithm = PartitionerMurMur2\n\t}\n\n\tcc.log.WithFields(logFields).Debug(\"Assigning partition by partition key\")\n\n\treturn cc, cc.AssignPartitionByKey(cc.conf.Consumer.PartitionKey, cc.conf.Consumer.PartitionerAlgorithm)\n}", "func newPrivateOrPartnerConsumer(provider *Provider, authURL string) *oauth.Consumer {\n\tprivateKeyFileContents, err := ioutil.ReadFile(privateKeyFilePath)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\tblock, _ := pem.Decode(privateKeyFileContents)\n\n\tprivateKey, err := x509.ParsePKCS1PrivateKey(block.Bytes)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tc := oauth.NewRSAConsumer(\n\t\tprovider.ClientKey,\n\t\tprivateKey,\n\t\toauth.ServiceProvider{\n\t\t\tRequestTokenUrl: requestURL,\n\t\t\tAuthorizeTokenUrl: authURL,\n\t\t\tAccessTokenUrl: tokenURL},\n\t)\n\n\tc.Debug(provider.debug)\n\n\taccepttype := []string{\"application/json\"}\n\tuseragent := []string{userAgentString}\n\tc.AdditionalHeaders = map[string][]string{\n\t\t\"Accept\": accepttype,\n\t\t\"User-Agent\": useragent,\n\t}\n\n\treturn c\n}", "func New(ctx context.Context) *Consumer {\n\treturn &Consumer{ctx: ctx}\n}", "func New(address string, process Process, options ...OptionEvent) *Consumer {\n\tarl := &Consumer{\n\t\tcfg: ConsumerConfig{\n\t\t\tMaxInFlight: DefaultMaxInflight,\n\t\t\tNumOfConsumers: DefaultNumOfConsumers,\n\t\t\tMaxAttempts: DefaultMaxAttempts,\n\t\t\tRequeueInterval: DefaultRequeueInterval,\n\t\t\tTimeout: DefaultMsgTimeout,\n\t\t},\n\t\taddress: address,\n\t\tprocess: process,\n\t}\n\n\tfor _, opt := range options {\n\t\topt(arl)\n\t}\n\n\treturn arl\n}", "func NewConsumer(consumerKey string, consumerSecret string, requestMethod string, requestURL string) *Consumer {\n\tclock := &defaultClock{}\n\tconsumer := &Consumer{\n\t\tconsumerKey: consumerKey,\n\t\tconsumerSecret: consumerSecret,\n\t\trequestMethod: requestMethod,\n\t\trequestURL: requestURL,\n\t\tclock: clock,\n\t\tnonceGenerator: newLockedNonceGenerator(clock),\n\n\t\tAdditionalParams: make(map[string]string),\n\t}\n\n\tconsumer.signer = &HMACSigner{\n\t\tconsumerSecret: consumerSecret,\n\t\thashFunc: crypto.SHA1,\n\t}\n\n\treturn consumer\n}", "func TestNewConsumer(tb testing.TB, defaults bool, options ...Option) Consumer {\n\tc, err := NewConsumer()\n\trequire.NoError(tb, err)\n\n\tif !defaults {\n\t\tc.Inmem = inmemconfig.Consumer{Store: nil}\n\t\tc.Kafka = kafkaconfig.Consumer{}\n\t\tc.Pubsub = pubsubconfig.Consumer{}\n\t\tc.Standardstream = standardstreamconfig.Consumer{}\n\t\tc.Logger = nil\n\t\tc.HandleInterrupt = false\n\t\tc.HandleErrors = false\n\t\tc.Name = \"\"\n\t\tc.AllowEnvironmentBasedConfiguration = false\n\t}\n\n\tfor _, option := range options {\n\t\toption.apply(&c, nil)\n\t}\n\n\terr = envconfig.Process(c.Name, &c)\n\trequire.NoError(tb, err)\n\n\treturn c\n}", "func NewConsumer(ctx context.Context, db gorpmapper.SqlExecutorWithTx, userID string) (*sdk.AuthUserConsumer, error) {\n\treturn newConsumerWithData(ctx, db, userID, nil)\n}", "func NewConsumer(conn net.Conn, compression bool) (*Consumer, error) {\n\tif !compression {\n\t\tdec := gob.NewDecoder(conn)\n\t\treturn &Consumer{conn, dec, nil}, nil\n\t}\n\n\tcompR, err := zlib.NewReader(conn)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdec := gob.NewDecoder(compR)\n\treturn &Consumer{conn, dec, compR}, nil\n}", "func NewConsumer(amqpURI, exchange, exchangeType, queueName, key, ctag string) (*Consumer, error) {\n\tc := &Consumer{\n\t\tconn: nil,\n\t\tchannel: nil,\n\t\ttag: ctag,\n\t\tdone: make(chan error),\n\t}\n\n\tvar err error\n\n\tlog.Printf(\"dialing %q\", amqpURI)\n\tc.conn, err = amqp.Dial(amqpURI)\n\tif err != nil {\n\t\tlog.Printf(\"RMQERR Dial: %s\", err)\n\t\treturn nil, fmt.Errorf(\"Dial: %s\", err)\n\t}\n\n\tgo func() {\n\t\tlog.Printf(\"closing: %s\", <-c.conn.NotifyClose(make(chan *amqp.Error)))\n\t}()\n\n\tlog.Printf(\"got Connection, getting Channel\")\n\tc.channel, err = c.conn.Channel()\n\tif err != nil {\n\t\tlog.Printf(\"RMQERR Channel: %s\", err)\n\t\treturn nil, fmt.Errorf(\"Channel: %s\", err)\n\t}\n\n\tlog.Printf(\"got Channel, declaring Exchange (%q)\", exchange)\n\tif err = c.channel.ExchangeDeclare(\n\t\texchange, // name of the exchange\n\t\texchangeType, // type\n\t\ttrue, // durable\n\t\tfalse, // delete when complete\n\t\tfalse, // internal\n\t\tfalse, // noWait\n\t\tnil, // arguments\n\t); err != nil {\n\t\tlog.Printf(\"RMQERR Exchange Declare: %s\", err)\n\t\treturn nil, fmt.Errorf(\"Exchange Declare: %s\", err)\n\t}\n\n\tlog.Printf(\"declared Exchange, declaring Queue %q\", queueName)\n\tqueue, err := c.channel.QueueDeclare(\n\t\tqueueName, // name of the queue\n\t\ttrue, // durable\n\t\tfalse, // delete when usused\n\t\tfalse, // exclusive\n\t\tfalse, // noWait\n\t\tnil, // arguments\n\t)\n\tif err != nil {\n\t\tlog.Printf(\"RMQERR Queue Declare: %s\", err)\n\t\treturn nil, fmt.Errorf(\"Queue Declare: %s\", err)\n\t}\n\n\tlog.Printf(\"declared Queue (%q %d messages, %d consumers), binding to Exchange (key %q)\",\n\t\tqueue.Name, queue.Messages, queue.Consumers, key)\n\n\tif err = c.channel.QueueBind(\n\t\tqueue.Name, // name of the queue\n\t\tkey, // bindingKey\n\t\texchange, // sourceExchange\n\t\tfalse, // noWait\n\t\tnil, // arguments\n\t); err != nil {\n\t\tlog.Printf(\"RMQERR Queue Bind: %s\", err)\n\t\treturn nil, fmt.Errorf(\"Queue Bind: %s\", err)\n\t}\n\n\tlog.Printf(\"Queue bound to Exchange, starting Consume (consumer tag %q)\", c.tag)\n\tdeliveries, err := c.channel.Consume(\n\t\tqueue.Name, // name\n\t\tc.tag, // consumerTag,\n\t\tfalse, // noAck\n\t\tfalse, // exclusive\n\t\tfalse, // noLocal\n\t\tfalse, // noWait\n\t\tnil, // arguments\n\t)\n\tif err != nil {\n\t\tlog.Printf(\"RMQERR Queue Consume: %s\", err)\n\t\treturn nil, fmt.Errorf(\"Queue Consume: %s\", err)\n\t}\n\tgo handle(deliveries, c.done)\n\n\treturn c, nil\n}", "func (mq *MessageQueue) NewConsumer(id int, consumeQ, failEx, failQ string, work func([]byte) error) error {\n\tmq.Consumers = append(mq.Consumers, Consumer{\n\t\tID: id,\n\t\tConsumeFromQ: consumeQ,\n\t\tFailExchange: failEx,\n\t\tFailQueue: failQ,\n\t\tWork: work,\n\t})\n\treturn nil\n}", "func (m *Manager) NewConsumer(stream string, opts ...ConsumerOption) (consumer *Consumer, err error) {\n\tif !IsValidName(stream) {\n\t\treturn nil, fmt.Errorf(\"%q is not a valid stream name\", stream)\n\t}\n\n\treturn m.NewConsumerFromDefault(stream, DefaultConsumer, opts...)\n}", "func (cons *ConsumerObject) Init(broker string, group string) {\n\tvar err error\n\tC, err = kafka.NewConsumer(&kafka.ConfigMap{\n\t\t\"bootstrap.servers\": broker,\n\t\t\"broker.address.family\": \"v4\",\n\t\t\"group.id\": group,\n\t\t\"session.timeout.ms\": 6000,\n\t\t\"enable.auto.commit\": false,\n\t\t\"auto.offset.reset\": \"latest\"})\n\n\tif err != nil {\n\t\tfmt.Fprintf(os.Stderr, \"Failed to create consumer: %s\\n\", err)\n\t\tos.Exit(1)\n\t}\n\n\tfmt.Printf(\"Created Consumer %v\\n\", C)\n}", "func (f *Factory) Create() (async.Consumer, error) {\n\tcc := kafka.ConsumerConfig{\n\t\tBrokers: f.brokers,\n\t\tSaramaConfig: f.cfg,\n\t\tBuffer: f.cfg.ChannelBufferSize,\n\t}\n\n\tc := &consumer{\n\t\ttopics: f.topics,\n\t\tgroup: f.group,\n\t\ttraceTag: opentracing.Tag{Key: \"group\", Value: f.group},\n\t\tconfig: cc,\n\t}\n\n\tvar err error\n\tfor _, o := range f.oo {\n\t\terr = o(&c.config)\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"could not apply OptionFunc to consumer : %w\", err)\n\t\t}\n\t}\n\n\treturn c, nil\n}", "func NewBroker(address string, nodeID int32, config *BrokerConfig) (*Broker, error) {\n\t//TODO get available api versions\n\tbroker := &Broker{\n\t\tconfig: config,\n\t\taddress: address,\n\t\tnodeID: nodeID,\n\n\t\tcorrelationID: 0,\n\t\tdead: true,\n\t}\n\n\tconn, err := newConn(address, config)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to establish connection when init broker: %s\", err)\n\t}\n\tbroker.conn = conn\n\n\tif broker.config.SaslConfig != nil {\n\t\tif err := broker.sendSaslAuthenticate(); err != nil {\n\t\t\tglog.Errorf(\"sasl authenticate error : %s\", err)\n\t\t\treturn nil, err\n\t\t}\n\t}\n\tbroker.dead = false\n\n\t// TODO since ??\n\t//apiVersionsResponse, err := broker.requestApiVersions()\n\t//if err != nil {\n\t//return nil, fmt.Errorf(\"failed to request api versions when init broker: %s\", err)\n\t//}\n\t//broker.apiVersions = apiVersionsResponse.ApiVersions\n\n\treturn broker, nil\n}", "func NewConsumerConf(topic string, partition int32) ConsumerConf {\n\treturn ConsumerConf{\n\t\tTopic: topic,\n\t\tPartition: partition,\n\t\tRequestTimeout: time.Millisecond * 50,\n\t\tRetryLimit: -1,\n\t\tRetryWait: time.Millisecond * 50,\n\t\tRetryErrLimit: 10,\n\t\tRetryErrWait: time.Millisecond * 500,\n\t\tMinFetchSize: 1,\n\t\tMaxFetchSize: 2000000,\n\t\tStartOffset: StartOffsetOldest,\n\t}\n}", "func (e *ExternalServiceList) GetConsumer(ctx context.Context, cfg *config.Config) (kafkaConsumer kafka.IConsumerGroup, err error) {\n\tkafkaConsumer, err = e.Init.DoGetConsumer(ctx, cfg)\n\tif err != nil {\n\t\treturn\n\t}\n\te.Consumer = true\n\treturn\n}", "func newUpdateConsumer(t *testing.T) (*Consumer, chan Event) {\n\tc := make(chan Event, 2048)\n\tac := NewConsumer(t.Name(), c, ConsumerUpdate)\n\trequire.NoError(t, acctProbe.RegisterConsumer(ac))\n\n\treturn ac, c\n}", "func NewBroker(cfg *meta.ClusterConfig, nodeUUID string, logger log.Logger) (*Broker, error) {\n\t// start the metadata watcher\n\tmetaWatcher, err := meta.NewWatcher(nodeUUID, cfg)\n\tif err != nil {\n\t\tlogger.Errorf(\"Error creating the watcher. Err: %v\", err)\n\t\treturn nil, err\n\t}\n\n\t// TODO: enable by default in build\n\tmodels.EnableUintSupport()\n\n\t// create the broker instance\n\tbroker := Broker{\n\t\tnodeUUID: nodeUUID,\n\t\tmetaWatcher: metaWatcher,\n\t\trpcClients: make(map[string]*rpckit.RPCClient),\n\t\tlogger: logger.WithContext(\"brokeruuid\", nodeUUID),\n\t}\n\n\t// create routine context\n\tbroker.ctx, broker.ctxCancelFunc = context.WithCancel(context.Background())\n\n\t// continuous query\n\tbroker.cqMutex = sync.Mutex{}\n\tbroker.metricsWithCQCreated = make(map[string]bool)\n\tbroker.cqInfoMap = make(map[string]string)\n\tbroker.cqMeasurementMap = make(map[string]bool)\n\tgo broker.continuousQueryWaitDB()\n\n\treturn &broker, nil\n}", "func New(face iface.Face, cfg Config) (*Consumer, error) {\n\tsocket := face.NumaSocket()\n\trxC := (*C.TgConsumerRx)(eal.Zmalloc(\"TgConsumerRx\", C.sizeof_TgConsumerRx, socket))\n\tcfg.RxQueue.DisableCoDel = true\n\tif e := iface.PktQueueFromPtr(unsafe.Pointer(&rxC.rxQueue)).Init(cfg.RxQueue, socket); e != nil {\n\t\teal.Free(rxC)\n\t\treturn nil, nil\n\t}\n\n\ttxC := (*C.TgConsumerTx)(eal.Zmalloc(\"TgConsumerTx\", C.sizeof_TgConsumerTx, socket))\n\ttxC.face = (C.FaceID)(face.ID())\n\ttxC.interestMp = (*C.struct_rte_mempool)(ndni.InterestMempool.Get(socket).Ptr())\n\tC.pcg32_srandom_r(&txC.trafficRng, C.uint64_t(rand.Uint64()), C.uint64_t(rand.Uint64()))\n\tC.NonceGen_Init(&txC.nonceGen)\n\n\tvar consumer Consumer\n\tconsumer.rxC = rxC\n\tconsumer.txC = txC\n\tconsumer.Rx = ealthread.New(\n\t\tcptr.Func0.C(unsafe.Pointer(C.TgConsumerRx_Run), unsafe.Pointer(rxC)),\n\t\tealthread.InitStopFlag(unsafe.Pointer(&rxC.stop)),\n\t)\n\tconsumer.Tx = ealthread.New(\n\t\tcptr.Func0.C(unsafe.Pointer(C.TgConsumerTx_Run), unsafe.Pointer(txC)),\n\t\tealthread.InitStopFlag(unsafe.Pointer(&txC.stop)),\n\t)\n\n\tfor i, pattern := range cfg.Patterns {\n\t\tif _, e := consumer.addPattern(pattern); e != nil {\n\t\t\treturn nil, fmt.Errorf(\"pattern(%d): %s\", i, e)\n\t\t}\n\t}\n\tconsumer.SetInterval(cfg.Interval.Duration())\n\treturn &consumer, nil\n}", "func NewBroker(c core.Config) (*Broker, error) {\n\tif _broker != nil {\n\t\tpanic(\"Global broker had already been created\")\n\t}\n\tserviceMgr, err := core.NewServiceManager(\"broker\", c)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\t_broker = &Broker{\n\t\tServiceManager: *serviceMgr,\n\t\tbrokerId: uuid.NewV4().String(),\n\t}\n\treturn _broker, nil\n}", "func newBroker(config []byte) (*broker, error) {\n\tvar b broker\n\tvar c Config\n\tif err := json.Unmarshal(config, &c); err != nil {\n\t\treturn &b, errors.Wrap(err, \"unable to parse AMQP config\")\n\t}\n\tif err := c.validate(); err != nil {\n\t\treturn &b, errors.Wrap(err, \"could not validate config\")\n\t}\n\tb.Config = c\n\tb.closed = make(chan *amqp.Error)\n\tclose(b.closed)\n\treturn &b, nil\n}", "func ConstructConsumer(id int, proxy Proxy) Consumer {\n\treturn Consumer{\n\t\tid: id,\n\t\tch: make(chan Widget, MAX_NUM_WIDGETS),\n\t\tproxy: proxy,\n\t}\n}", "func (cons *ConsumerObject) GetConsumer() *kafka.Consumer {\n\treturn C\n}", "func NewBroker(clusterName string, nodeAddresses []string, conf BrokerConf) (*Broker, error) {\n\tmetadata, err := getMetadataCache().getOrCreateMetadata(clusterName, nodeAddresses, conf.ClusterConnectionConf)\n\tif err != nil {\n\t\tlog.Warningf(\"Failed to get cluster Metadata %s from cache\", nodeAddresses)\n\t\treturn nil, err\n\t}\n\n\tmetadataConnPool, err := metadata.connectionPoolForClient(conf.ClientID, conf.ClusterConnectionConf)\n\tif err != nil {\n\t\tlog.Warningf(\"Failed to get ConnectionPool for metadata from cache\")\n\t\treturn nil, err\n\t}\n\n\treturn &Broker{\n\t\tconf,\n\t\tmetadataConnPool,\n\t\tmetadata,\n\t}, nil\n}", "func NewConsumerLister(indexer cache.Indexer) ConsumerLister {\n\treturn &consumerLister{indexer: indexer}\n}", "func GetBrokerFromZookeeper(zkConn zookeeper.Connection, id, preferredListener string) (*Broker, error) {\n\t// Query Zookeeper for broker information\n\trawBrokerJSON, _, err := zkConn.Get(zookeeper.Path(fmt.Sprintf(\"/brokers/ids/%s\", id)))\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to retrieve broker information: %s\", err)\n\t}\n\n\t// Parse the JSON returned by Zookeeper\n\ttype brokerJSONDecoder struct {\n\t\tHost string\n\t\tJMXPort int `json:\"jmx_port\"`\n\t\tProtocolMap map[string]string `json:\"listener_security_protocol_map\"`\n\t\tEndpoints []string `json:\"endpoints\"`\n\t}\n\tvar brokerDecoded brokerJSONDecoder\n\terr = json.Unmarshal(rawBrokerJSON, &brokerDecoded)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to unmarshal broker information from zookeeper: %s\", err)\n\t}\n\tlog.Debug(\"zookeeper instance %s is reporting broker %s with the following information %+v\", zkConn.Server(), id, brokerDecoded)\n\n\t// Go through the list of brokers until we find one that uses a protocol we know how to handle\n\tfor _, endpoint := range brokerDecoded.Endpoints {\n\t\tlistener, host, port, err := parseEndpoint(endpoint)\n\t\tif err != nil {\n\t\t\tlog.Error(\"Failed to parse endpoint '%s' from zookeeper: %s\", endpoint, err)\n\t\t\tcontinue\n\t\t}\n\n\t\t// Skip this endpoint if it doesn't match the configured listener\n\t\tif preferredListener != \"\" && preferredListener != listener {\n\t\t\tlog.Debug(\"Skipping endpoint '%s' because it doesn't match the preferredListener configured\", endpoint)\n\t\t\tcontinue\n\t\t}\n\n\t\t// Check that the protocol map\n\t\tprotocol, ok := brokerDecoded.ProtocolMap[listener]\n\t\tif !ok {\n\t\t\tlog.Error(\"Listener '%s' not found in protocol map for %s:%d\", listener, host, port)\n\t\t\tcontinue\n\t\t}\n\n\t\tbrokerConfig := &args.BrokerHost{\n\t\t\tHost: host,\n\t\t\tKafkaPort: port,\n\t\t\tKafkaProtocol: protocol,\n\t\t\tJMXPort: brokerDecoded.JMXPort,\n\t\t\tJMXUser: args.GlobalArgs.DefaultJMXUser,\n\t\t\tJMXPassword: args.GlobalArgs.DefaultJMXPassword,\n\t\t}\n\t\tnewBroker, err := NewBroker(brokerConfig)\n\t\tif err != nil {\n\t\t\tlog.Warn(\"Failed creating client: %s\", err)\n\t\t\tcontinue\n\t\t}\n\t\tnewBroker.ID = id\n\n\t\treturn newBroker, nil\n\t}\n\n\treturn nil, fmt.Errorf(\"found no supported endpoint that successfully connected to broker with host %s\", brokerDecoded.Host)\n}", "func NewBroker() *Broker {\n\treturn &Broker{\n\t\tsubscribers: Subscribers{},\n\t\tslock: sync.RWMutex{},\n\t\ttopics: map[string]Subscribers{},\n\t\ttlock: sync.RWMutex{},\n\t}\n}", "func NewBroker(o Options) (*Broker, error) {\n\tmb := minibroker.NewClient(o.HelmRepoUrl, o.ServiceCatalogEnabledOnly)\n\terr := mb.Init()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// For example, if your Broker requires a parameter from the command\n\t// line, you would unpack it from the Options and set it on the\n\t// Broker here.\n\treturn &Broker{\n\t\tClient: mb,\n\t\tasync: true,\n\t\tdefaultNamespace: o.DefaultNamespace,\n\t}, nil\n}", "func NewConsumer(amqpURI, exchange, queue, routingKey, tag string, prefetch int) *RabbitMQ {\n\tconn, err := amqp.Dial(amqpURI)\n\tif err != nil {\n\t\tlog.Fatalf(\"writer failed to connect to Rabbit: %s\", err)\n\t\treturn nil\n\t}\n\n\tgo func() {\n\t\tlog.Printf(\"writer closing: %s\", <-conn.NotifyClose(make(chan *amqp.Error)))\n\t\tlog.Printf(\"writer blocked by rabbit: %v\", <-conn.NotifyBlocked(make(chan amqp.Blocking)))\n\t}()\n\n\tchannel, err := conn.Channel()\n\tif err != nil {\n\t\tlog.Fatalf(\"writer failed to get a channel from Rabbit: %s\", err)\n\t\treturn nil\n\t}\n\n\tq, err := channel.QueueDeclarePassive(\n\t\tqueue, // name of the queue\n\t\ttrue, // durable\n\t\tfalse, // delete when usused\n\t\tfalse, // exclusive\n\t\tfalse, // noWait\n\t\tnil, // arguments\n\t)\n\tif err != nil {\n\t\tlog.Fatalf(\"Queue Declare: %s\", err)\n\t}\n\tif q.Messages == 0 {\n\t\tlog.Fatalf(\"No messages in RabbitMQ Queue: %s\", q.Name)\n\t}\n\n\tr := &RabbitMQ{\n\t\tconn: conn,\n\t\tchannel: channel,\n\t\texchange: exchange,\n\t\tcontentType: \"application/json\",\n\t\tcontentEncoding: \"UTF-8\",\n\t}\n\tlog.Print(\"RabbitMQ connected: \", amqpURI)\n\tlog.Printf(\"Bind to Exchange: %q and Queue: %q, Messaging waiting: %d\", exchange, queue, q.Messages)\n\n\treturn r\n}", "func (client *ClientImpl) GetConsumer(ctx context.Context, args GetConsumerArgs) (*Consumer, error) {\n\trouteValues := make(map[string]string)\n\tif args.ConsumerId == nil || *args.ConsumerId == \"\" {\n\t\treturn nil, &azuredevops.ArgumentNilOrEmptyError{ArgumentName: \"args.ConsumerId\"}\n\t}\n\trouteValues[\"consumerId\"] = *args.ConsumerId\n\n\tqueryParams := url.Values{}\n\tif args.PublisherId != nil {\n\t\tqueryParams.Add(\"publisherId\", *args.PublisherId)\n\t}\n\tlocationId, _ := uuid.Parse(\"4301c514-5f34-4f5d-a145-f0ea7b5b7d19\")\n\tresp, err := client.Client.Send(ctx, http.MethodGet, locationId, \"7.1-preview.1\", routeValues, queryParams, nil, \"\", \"application/json\", nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar responseValue Consumer\n\terr = client.Client.UnmarshalBody(resp, &responseValue)\n\treturn &responseValue, err\n}", "func New(config *config.Config) (MsgBroker, error) {\n\n\t//create connection\n\tlog.Printf(\"Connecting to broker %s\", config.Broker)\n\tconn, err := amqp.Dial(config.Broker)\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tch, err := conn.Channel()\n\t//create channel\n\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tb := &amqpBroker{*conn, *ch, make(chan *amqp.Error), config}\n\tlog.Println(\"Returning broker\")\n\treturn b, nil\n}", "func NewBroker() *Broker {\n\treturn &Broker{\n\t\tactive: false,\n\t\tpublish: make(chan publishMessage),\n\t\tconnect: make(chan Client),\n\t\tdisconnect: make(chan Client),\n\t\tclients: map[string]Client{},\n\t}\n}", "func (c Consumer) BlockchainConsumer() *magmasc.Consumer {\n\treturn &magmasc.Consumer{\n\t\tID: node.GetSelfNode().ID(),\n\t\tExtID: c.ExtID,\n\t\tHost: c.Host,\n\t}\n}", "func LaunchConsumer(conf map[string]interface{}) {\n\tkafkaConsumer := xkafka.NewConsumer()\n\tkafkaConsumer.Init(conf)\n\tdefer kafkaConsumer.Close()\n\n\tkafkaConsumer.RunByCall(func(msg *sarama.ConsumerMessage) bool {\n\t\tb, err := JobExecutor(msg)\n\t\tcheckErr(err)\n\t\treturn b\n\t})\n}", "func NewBroker(name string) *unstructured.Unstructured {\n\tb := &unstructured.Unstructured{}\n\n\tb.SetAPIVersion(APIEventing)\n\tb.SetKind(\"Broker\")\n\tb.SetName(name)\n\n\treturn b\n}", "func (c ConsumerGroup) Consume(partitionIds []uint8, offset uint, n uint) (msgs map[uint8][][]byte, err error) {\n\n\tresult := make(map[uint8][][]byte)\n\tnumConsumers := c.NumConsumers\n\tpartitionLength := len(partitionIds)\n\n\tvar brokerAddr string\n\terr = c.ServerConn.Call(\"Server.GetBrokerAddr\", true, &brokerAddr)\n\tcheckError(err)\n\tif err != nil {\n\t\treturn result, err\n\t}\n\n\t// connect to broker\n\tbrokerConn, err := rpc.Dial(\"tcp\", brokerAddr)\n\tcheckError(err)\n\tdefer brokerConn.Close()\n\n\t// generate manifest for each consumer\n\tmanifests := make([][]uint8, numConsumers)\n\tacc := 0\n\tfor i := 0; i < numConsumers; i++ {\n\t\tif acc >= partitionLength {\n\t\t\tbreak\n\t\t}\n\t\tvar temp []uint8\n\t\tfor j := 0; j < int(math.Ceil(float64(partitionLength)/float64(numConsumers))); j++ {\n\t\t\tif acc >= partitionLength {\n\t\t\t\tbreak\n\t\t\t}\n\t\t\ttemp = append(temp, partitionIds[acc])\n\t\t\tacc++\n\t\t}\n\t\tmanifests = append(manifests, temp)\n\t}\n\n\t// set max processes (threads)\n\toldMaxProcs := runtime.GOMAXPROCS(numConsumers)\n\n\t// start consumer goroutines\n\tfor i := 0; i < numConsumers; i++ {\n\t\tgo startConsumer(result, manifests[i], offset, n, brokerConn, i)\n\t}\n\n\t// revert maxprocs to old value\n\truntime.GOMAXPROCS(oldMaxProcs)\n\n\treturn result, nil\n}", "func (tt *Tester) ConsumerBuilder() goka.SaramaConsumerBuilder {\n\treturn func(brokers []string, clientID string) (sarama.Consumer, error) {\n\t\ttt.mClients.RLock()\n\t\tdefer tt.mClients.RUnlock()\n\n\t\tclient, exists := tt.clients[clientID]\n\t\tif !exists {\n\t\t\treturn nil, fmt.Errorf(\"cannot create sarama consumer because no client registered with ID: %s\", clientID)\n\t\t}\n\n\t\treturn client.consumer, nil\n\t}\n}", "func NewBroker(namespace string) *Broker {\n\treturn &Broker{\n\t\tclient: knClientset.EventingV1beta1().Brokers(namespace),\n\t\tNamespace: namespace,\n\t}\n}", "func newBroker(eventBroker wire.EventBroker, handler *reductionHandler, timeout time.Duration, rpcBus *wire.RPCBus) *broker {\n\tscoreChan := initBestScoreUpdate(eventBroker)\n\tctx := newCtx(handler, timeout)\n\tfilter := launchReductionFilter(eventBroker, ctx)\n\troundChannel := consensus.InitRoundUpdate(eventBroker)\n\n\treturn &broker{\n\t\troundUpdateChan: roundChannel,\n\t\tctx: ctx,\n\t\tfilter: filter,\n\t\tselectionChan: scoreChan,\n\t\treducer: newReducer(ctx, eventBroker, filter, rpcBus),\n\t}\n}", "func NewReader(topicName string, groupID string) *kafka.Reader {\r\n\tbrokerUrls := Config.GetStringSlice(\"kafka.consumer.brokerUrls\")\r\n\tr := kafka.NewReader(kafka.ReaderConfig{\r\n\t\tBrokers: brokerUrls,\r\n\t\tGroupID: groupID,\r\n\t\tTopic: topicName,\r\n\t\tDialer: dialer,\r\n\t})\r\n\treturn r\r\n}", "func (s consumerNamespaceLister) Get(name string) (*arbv1.Consumer, error) {\n\tobj, exists, err := s.indexer.GetByKey(s.namespace + \"/\" + name)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif !exists {\n\t\treturn nil, errors.NewNotFound(arbv1.Resource(\"queue\"), name)\n\t}\n\treturn obj.(*arbv1.Consumer), nil\n}", "func NewConsumerGroup(addrs []string, groupID string, config *sarama.Config, sensor instana.TracerLogger) (sarama.ConsumerGroup, error) {\n\tc, err := sarama.NewConsumerGroup(addrs, groupID, config)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn consumerGroup{c, sensor}, nil\n}", "func (c *ConsumerManager) AddConsumer(topic, channel string, client ConsumerClient) error {\n\n}", "func New(opts ...Option) *Kafka {\n\tk := &Kafka{}\n\tfor _, opt := range opts {\n\t\topt(k)\n\t}\n\tif k.addrs == \"\" {\n\t\tk.addrs = \"localhost:9092\"\n\t}\n\tif k.encoder == nil {\n\t\tk.encoder = broker.JSONEncoder{}\n\t}\n\tif k.log == nil {\n\t\tk.log = log.Root()\n\t}\n\treturn k\n}", "func NewAWSBroker(o Options, awssess GetAwsSession, clients AwsClients, getCallerId GetCallerIder, updateCatalog UpdateCataloger, pollUpdate PollUpdater) (*AwsBroker, error) {\n\n\tsess := awssess(o.KeyID, o.SecretKey, o.Region, \"\", o.Profile, map[string]string{})\n\ts3sess := awssess(o.KeyID, o.SecretKey, o.S3Region, \"\", o.Profile, map[string]string{})\n\ts3svc := clients.NewS3(s3sess)\n\tddbsvc := clients.NewDdb(sess)\n\tstssvc := clients.NewSts(sess)\n\tcallerid, err := getCallerId(stssvc)\n\tif err != nil {\n\t\treturn &AwsBroker{}, err\n\t}\n\taccountid := *callerid.Account\n\taccountuuid := uuid.NewV5(uuid.NullUUID{}.UUID, accountid+o.BrokerID)\n\n\tglog.Infof(\"Running as caller identity '%+v'.\", callerid)\n\n\tvar db Db\n\tdb.Brokerid = o.BrokerID\n\tdb.Accountid = accountid\n\tdb.Accountuuid = accountuuid\n\n\t// connect DynamoDB adapter to storage port\n\tdb.DataStorePort = dynamodbadapter.DdbDataStore{\n\t\tAccountid: accountid,\n\t\tAccountuuid: accountuuid,\n\t\tBrokerid: o.BrokerID,\n\t\tRegion: o.Region,\n\t\tDdb: *ddbsvc,\n\t\tTablename: o.TableName,\n\t}\n\n\t// setup in memory cache\n\tvar catalogcache = cache.NewMemoryWithTTL(time.Duration(CacheTTL))\n\tvar listingcache = cache.NewMemoryWithTTL(time.Duration(CacheTTL))\n\tlistingcache.StartGC(time.Minute * 5)\n\tbd := &BucketDetailsRequest{\n\t\to.S3Bucket,\n\t\to.S3Key,\n\t\to.TemplateFilter,\n\t}\n\n\t// populate broker variables\n\tbl := AwsBroker{\n\t\taccountId: accountid,\n\t\tkeyid: o.KeyID,\n\t\tsecretkey: o.SecretKey,\n\t\tprofile: o.Profile,\n\t\ttablename: o.TableName,\n\t\ts3bucket: o.S3Bucket,\n\t\ts3region: o.S3Region,\n\t\ts3key: addTrailingSlash(o.S3Key),\n\t\ttemplatefilter: o.TemplateFilter,\n\t\tregion: o.Region,\n\t\ts3svc: s3svc,\n\t\tcatalogcache: catalogcache,\n\t\tlistingcache: listingcache,\n\t\tbrokerid: o.BrokerID,\n\t\tdb: db,\n\t\tGetSession: awssess,\n\t\tClients: clients,\n\t\tprescribeOverrides: o.PrescribeOverrides,\n\t\tglobalOverrides: getGlobalOverrides(o.BrokerID),\n\t}\n\n\t// get catalog and setup periodic updates from S3\n\terr = updateCatalog(listingcache, catalogcache, *bd, s3svc, db, bl, ListTemplates, ListingUpdate, MetadataUpdate)\n\tif err != nil {\n\t\treturn &AwsBroker{}, err\n\t}\n\tgo pollUpdate(600, listingcache, catalogcache, *bd, s3svc, db, bl, updateCatalog, ListTemplates)\n\treturn &bl, nil\n}", "func consume(topics []string, master sarama.Consumer) (chan *sarama.ConsumerMessage, chan *sarama.ConsumerError) {\n\tconsumers := make(chan *sarama.ConsumerMessage)\n\terrors := make(chan *sarama.ConsumerError)\n\n\tfor _, t := range topics {\n\t\tif strings.Contains(t, \"__consumer_offsets\") {\n\t\t\tcontinue\n\t\t}\n\n\t\tpartitions, err := master.Partitions(t)\n\t\tif err != nil {\n\t\t\tfmt.Println(\"error get partitions: \")\n\t\t\tpanic(err)\n\t\t}\n\n\t\tfor k, _ := range partitions {\n\n\t\t\tc, err := master.ConsumePartition(t, partitions[k], sarama.OffsetOldest)\n\t\t\tif err != nil {\n\t\t\t\tfmt.Printf(\"Topic: %d Partition: %d\", t, partitions[k])\n\t\t\t\tpanic(err)\n\t\t\t}\n\n\t\t\tgo func(t string, c sarama.PartitionConsumer) {\n\t\t\t\tfor {\n\t\t\t\t\tselect {\n\t\t\t\t\tcase consumerError := <-c.Errors():\n\t\t\t\t\t\terrors <- consumerError\n\t\t\t\t\tcase msg := <-c.Messages():\n\t\t\t\t\t\tconsumers <- msg\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}(t, c)\n\n\t\t}\n\t}\n\n\treturn consumers, errors\n}", "func NewBroker(stopCh <-chan struct{}) *Broker {\n\tb := &Broker{\n\t\tqueue: workqueue.NewRateLimitingQueue(workqueue.DefaultControllerRateLimiter()),\n\t\tproxyUpdatePubSub: pubsub.New(0),\n\t\tproxyUpdateCh: make(chan proxyUpdateEvent),\n\t\tkubeEventPubSub: pubsub.New(0),\n\t\tcertPubSub: pubsub.New(0),\n\t}\n\n\tgo b.runWorkqueueProcessor(stopCh)\n\tgo b.runProxyUpdateDispatcher(stopCh)\n\n\treturn b\n}", "func WrapConsumer(c sarama.Consumer, sensor instana.TracerLogger) *Consumer {\n\treturn &Consumer{\n\t\tConsumer: c,\n\t\tsensor: sensor,\n\t}\n}", "func newPublicConsumer(provider *Provider, authURL string) *oauth.Consumer {\n\tc := oauth.NewConsumer(\n\t\tprovider.ClientKey,\n\t\tprovider.Secret,\n\t\toauth.ServiceProvider{\n\t\t\tRequestTokenUrl: requestURL,\n\t\t\tAuthorizeTokenUrl: authURL,\n\t\t\tAccessTokenUrl: tokenURL},\n\t)\n\n\tc.Debug(provider.debug)\n\n\taccepttype := []string{\"application/json\"}\n\tuseragent := []string{userAgentString}\n\tc.AdditionalHeaders = map[string][]string{\n\t\t\"Accept\": accepttype,\n\t\t\"User-Agent\": useragent,\n\t}\n\n\treturn c\n}", "func NewKafka(conf Config, log log.Modular, stats metrics.Type) (Type, error) {\n\tk := Kafka{\n\t\trunning: 1,\n\t\tlog: log.NewModule(\".output.kafka\"),\n\t\tstats: stats,\n\t\tconf: conf,\n\t\tmessages: nil,\n\t\tresponseChan: make(chan types.Response),\n\t\tcloseChan: make(chan struct{}),\n\t\tclosedChan: make(chan struct{}),\n\t}\n\tfor _, addr := range conf.Kafka.Addresses {\n\t\tfor _, splitAddr := range strings.Split(addr, \",\") {\n\t\t\tif len(splitAddr) > 0 {\n\t\t\t\tk.addresses = append(k.addresses, splitAddr)\n\t\t\t}\n\t\t}\n\t}\n\n\treturn &k, nil\n}", "func NewLastMessageConsumer(config config.KafkaConsumer) (*LastMessageConsumer, error) {\n\tvar err error\n\tcons := &LastMessageConsumer{\n\t\tconfig: config,\n\t}\n\n\tkafkaconf := confluent.ConfigMap{\n\t\t\"bootstrap.servers\": config.Address,\n\t\t\"group.id\": config.GroupID,\n\t\t\"go.events.channel.enable\": false,\n\t\t\"enable.auto.commit\": false,\n\t\t\"go.application.rebalance.enable\": true,\n\t}\n\n\tcons.consumer, err = confluent.NewConsumer(&kafkaconf)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"cannot create new consumer: %w\", err)\n\t}\n\n\tok := cons.isLastMessageAvailable()\n\tif !ok {\n\t\treturn nil, fmt.Errorf(ErrEmptyTopic)\n\t}\n\n\treturn cons, nil\n}", "func NewBroker(config Config) *Broker {\n\tbroker := &Broker{Config: config}\n\tbroker.Init()\n\treturn broker\n}", "func NewBroker() *Broker {\n\treturn &Broker{\n\t\tmake(map[chan *Message]bool),\n\t\tmake(chan (chan *Message)),\n\t\tmake(chan (chan *Message)),\n\t\tmake(chan *Message),\n\t}\n}", "func (m *Manager) NewConsumerFromDefault(stream string, dflt api.ConsumerConfig, opts ...ConsumerOption) (consumer *Consumer, err error) {\n\tif !IsValidName(stream) {\n\t\treturn nil, fmt.Errorf(\"%q is not a valid stream name\", stream)\n\t}\n\n\tcfg, err := NewConsumerConfiguration(dflt, opts...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvalid, errs := cfg.Validate()\n\tif !valid {\n\t\treturn nil, fmt.Errorf(\"configuration validation failed: %s\", strings.Join(errs, \", \"))\n\t}\n\n\treq := api.JSApiConsumerCreateRequest{\n\t\tStream: stream,\n\t\tConfig: *cfg,\n\t}\n\n\tcreatedInfo, err := m.createConsumer(req)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif createdInfo == nil {\n\t\treturn nil, fmt.Errorf(\"expected a consumer name but none were generated\")\n\t}\n\n\tc := m.consumerFromCfg(stream, createdInfo.Name, &createdInfo.Config)\n\tc.lastInfo = createdInfo\n\n\treturn c, nil\n}", "func newKafkaChannel(name, namespace string) *kafkav1alpha1.KafkaChannel {\n\treturn &kafkav1alpha1.KafkaChannel{\n\t\tObjectMeta: v1.ObjectMeta{\n\t\t\tName: name,\n\t\t\tNamespace: namespace,\n\t\t},\n\t}\n}", "func createConsumer(c *cli.Context) error {\n\tusername := c.String(\"username\")\n\tcustomID := c.String(\"custom_id\")\n\n\tif username == \"\" && customID == \"\" {\n\t\treturn fmt.Errorf(\"username: %s or custom id: %s invalid\", username, customID)\n\t}\n\n\tcfg := &ConsumerConfig{\n\t\tUsername: username,\n\t\tCustomID: customID,\n\t}\n\n\tctx, cannel := context.WithTimeout(context.Background(), 30*time.Second)\n\tdefer cannel()\n\n\tserverResponse, err := client.GatewayClient.Post(ctx, CONSUMER_RESOURCE_OBJECT, nil, cfg, nil)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tbody, err := ioutil.ReadAll(serverResponse.Body)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\ttools.IndentFromBody(body)\n\n\treturn nil\n}", "func New(c *crossplane.Crossplane, logger lager.Logger) (*CrossplaneBroker, error) {\n\treturn &CrossplaneBroker{\n\t\tc: c,\n\t\tlogger: logger,\n\t}, nil\n}" ]
[ "0.76603764", "0.7496496", "0.7483704", "0.7291043", "0.72738576", "0.7219477", "0.71694016", "0.7165526", "0.70556706", "0.69840354", "0.6961893", "0.69204885", "0.6916017", "0.6884798", "0.6840952", "0.6827434", "0.6750205", "0.66685456", "0.6647801", "0.66370124", "0.65723056", "0.6553668", "0.6506999", "0.6461354", "0.64084095", "0.637375", "0.6361297", "0.6354123", "0.6345083", "0.63024354", "0.62962395", "0.62336105", "0.61596644", "0.6158419", "0.6115128", "0.6106003", "0.6098227", "0.6094411", "0.60873026", "0.60856986", "0.6074533", "0.60641766", "0.6061039", "0.6037396", "0.6024225", "0.60096353", "0.59980595", "0.5984082", "0.5982133", "0.59340304", "0.59287107", "0.59146774", "0.5890749", "0.58615106", "0.58488077", "0.5843992", "0.58067435", "0.5796795", "0.57811654", "0.57641697", "0.5715848", "0.57146835", "0.56790346", "0.56610715", "0.56579316", "0.5654871", "0.5646803", "0.563782", "0.5627134", "0.5625981", "0.56178594", "0.5605007", "0.5597035", "0.55885655", "0.5569023", "0.5566168", "0.55475795", "0.5546751", "0.5546247", "0.5544778", "0.5540356", "0.5535325", "0.55076075", "0.5503142", "0.5492892", "0.5488712", "0.54837114", "0.5481279", "0.5476688", "0.5472189", "0.5466653", "0.5457831", "0.5453892", "0.54483294", "0.54302156", "0.5421406", "0.54130787", "0.53937334", "0.53918046", "0.53870517" ]
0.7759017
0
InnerJoin selects records that have matching values in both tables. left datatable is used as reference datatable. InnerJoin transforms an expr column to a raw column
func (left *DataTable) InnerJoin(right *DataTable, on []JoinOn) (*DataTable, error) { return newJoinImpl(innerJoin, []*DataTable{left, right}, on).Compute() }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (self Accessor) InnerJoin(expr interface{}) *SelectManager {\n\treturn self.From(self.Relation()).InnerJoin(expr)\n}", "func (sd *SelectDataset) InnerJoin(table exp.Expression, condition exp.JoinCondition) *SelectDataset {\n\treturn sd.joinTable(exp.NewConditionedJoinExpression(exp.InnerJoinType, table, condition))\n}", "func (w *Wrapper) InnerJoin(table interface{}, condition string) *Wrapper {\n\tw.saveJoin(table, \"INNER JOIN\", condition)\n\treturn w\n}", "func InnerJoin(tables []*DataTable, on []JoinOn) (*DataTable, error) {\n\treturn newJoinImpl(innerJoin, tables, on).Compute()\n}", "func (sd *SelectDataset) Join(table exp.Expression, condition exp.JoinCondition) *SelectDataset {\n\treturn sd.InnerJoin(table, condition)\n}", "func InnerJoin(clause string, args ...interface{}) QueryMod {\n\treturn func(q *queries.Query) {\n\t\tqueries.AppendInnerJoin(q, clause, args...)\n\t}\n}", "func InnerJoin(lx, rx reflect.Value) reflect.Value {\n\tresult := reflect.MakeSlice(reflect.SliceOf(lx.Type().Elem()), 0, lx.Len()+rx.Len())\n\trhash := hashSlice(rx)\n\tlhash := make(map[interface{}]struct{}, lx.Len())\n\n\tfor i := 0; i < lx.Len(); i++ {\n\t\tv := lx.Index(i)\n\t\t_, ok := rhash[v.Interface()]\n\t\t_, alreadyExists := lhash[v.Interface()]\n\t\tif ok && !alreadyExists {\n\t\t\tlhash[v.Interface()] = struct{}{}\n\t\t\tresult = reflect.Append(result, v)\n\t\t}\n\t}\n\treturn result\n}", "func (b *Builder) InnerJoin(joinTable string, joinCond interface{}) *Builder {\r\n\treturn b.Join(\"INNER\", joinTable, joinCond)\r\n}", "func (b *Builder) InnerJoin(joinTable, joinCond interface{}) *Builder {\n\treturn b.Join(\"INNER\", joinTable, joinCond)\n}", "func NewInnerJoinOn(table string, from string, to string) JoinQuery {\n\treturn NewJoinWith(\"INNER JOIN\", table, from, to)\n}", "func NewInnerJoinOn(table string, from string, to string, filter ...FilterQuery) JoinQuery {\n\treturn NewJoinWith(\"INNER JOIN\", table, from, to, filter...)\n}", "func (b *JoinBuilder) InnerJoin(other *Table) *JoinBuilder {\n\treturn makeJoinBuilder(\"INNER JOIN\", b, other)\n}", "func (self Accessor) OuterJoin(expr interface{}) *SelectManager {\n\treturn self.From(self.Relation()).OuterJoin(expr)\n}", "func (left *DataTable) OuterJoin(right *DataTable, on []JoinOn) (*DataTable, error) {\n\treturn newJoinImpl(outerJoin, []*DataTable{left, right}, on).Compute()\n}", "func NewInnerJoin(table string, filter ...FilterQuery) JoinQuery {\n\treturn NewInnerJoinOn(table, \"\", \"\", filter...)\n}", "func (sd *SelectDataset) CrossJoin(table exp.Expression) *SelectDataset {\n\treturn sd.joinTable(exp.NewUnConditionedJoinExpression(exp.CrossJoinType, table))\n}", "func NewInnerJoin(table string) JoinQuery {\n\treturn NewInnerJoinOn(table, \"\", \"\")\n}", "func (sd *SelectDataset) joinTable(join exp.JoinExpression) *SelectDataset {\n\treturn sd.copy(sd.clauses.JoinsAppend(join))\n}", "func ExtractJoinEqualityFilter(\n\tleftCol, rightCol opt.ColumnID, leftCols, rightCols opt.ColSet, on FiltersExpr,\n) FiltersItem {\n\tfor i := range on {\n\t\tcondition := on[i].Condition\n\t\tok, left, right := ExtractJoinEquality(leftCols, rightCols, condition)\n\t\tif !ok {\n\t\t\tcontinue\n\t\t}\n\t\tif left == leftCol && right == rightCol {\n\t\t\treturn on[i]\n\t\t}\n\t}\n\tpanic(errors.AssertionFailedf(\"could not find equality between columns %d and %d in filters %s\",\n\t\tleftCol, rightCol, on.String(),\n\t))\n}", "func InnerJoin(table, on string) QueryOption {\n\treturn newFuncQueryOption(func(wrapper *QueryWrapper) {\n\t\twrapper.joins = append(wrapper.joins, \"INNER\", \"JOIN\", table, \"ON\", on)\n\t\twrapper.queryLen += 5\n\t})\n}", "func (q Query) Join(inner Query,\n\touterKeySelector func(interface{}) interface{},\n\tinnerKeySelector func(interface{}) interface{},\n\tresultSelector func(outer interface{}, inner interface{}) interface{}) Query {\n\n\treturn Query{\n\t\tIterate: func() Iterator {\n\t\t\touternext := q.Iterate()\n\t\t\tinnernext := inner.Iterate()\n\n\t\t\tinnerLookup := make(map[interface{}][]interface{})\n\t\t\tfor innerItem, ok := innernext(); ok; innerItem, ok = innernext() {\n\t\t\t\tinnerKey := innerKeySelector(innerItem)\n\t\t\t\tinnerLookup[innerKey] = append(innerLookup[innerKey], innerItem)\n\t\t\t}\n\n\t\t\tvar outerItem interface{}\n\t\t\tvar innerGroup []interface{}\n\t\t\tinnerLen, innerIndex := 0, 0\n\n\t\t\treturn func() (item interface{}, ok bool) {\n\t\t\t\tif innerIndex >= innerLen {\n\t\t\t\t\thas := false\n\t\t\t\t\tfor !has {\n\t\t\t\t\t\touterItem, ok = outernext()\n\t\t\t\t\t\tif !ok {\n\t\t\t\t\t\t\treturn\n\t\t\t\t\t\t}\n\n\t\t\t\t\t\tinnerGroup, has = innerLookup[outerKeySelector(outerItem)]\n\t\t\t\t\t\tinnerLen = len(innerGroup)\n\t\t\t\t\t\tinnerIndex = 0\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t\titem = resultSelector(outerItem, innerGroup[innerIndex])\n\t\t\t\tinnerIndex++\n\t\t\t\treturn item, true\n\t\t\t}\n\t\t},\n\t}\n}", "func ExtractJoinEquality(\n\tleftCols, rightCols opt.ColSet, condition opt.ScalarExpr,\n) (ok bool, left, right opt.ColumnID) {\n\tlvar, rvar, ok := isVarEquality(condition)\n\tif !ok {\n\t\treturn false, 0, 0\n\t}\n\n\t// Don't allow mixed types (see #22519).\n\tif !lvar.DataType().Equivalent(rvar.DataType()) {\n\t\treturn false, 0, 0\n\t}\n\n\tif leftCols.Contains(lvar.Col) && rightCols.Contains(rvar.Col) {\n\t\treturn true, lvar.Col, rvar.Col\n\t}\n\tif leftCols.Contains(rvar.Col) && rightCols.Contains(lvar.Col) {\n\t\treturn true, rvar.Col, lvar.Col\n\t}\n\n\treturn false, 0, 0\n}", "func (sd *SelectDataset) FullOuterJoin(table exp.Expression, condition exp.JoinCondition) *SelectDataset {\n\treturn sd.joinTable(exp.NewConditionedJoinExpression(exp.FullOuterJoinType, table, condition))\n}", "func (sd *SelectDataset) RightOuterJoin(table exp.Expression, condition exp.JoinCondition) *SelectDataset {\n\treturn sd.joinTable(exp.NewConditionedJoinExpression(exp.RightOuterJoinType, table, condition))\n}", "func ExtractJoinEqualityColumns(\n\tleftCols, rightCols opt.ColSet, on FiltersExpr,\n) (leftEq opt.ColList, rightEq opt.ColList) {\n\tfor i := range on {\n\t\tcondition := on[i].Condition\n\t\tok, left, right := ExtractJoinEquality(leftCols, rightCols, condition)\n\t\tif !ok {\n\t\t\tcontinue\n\t\t}\n\t\t// Don't allow any column to show up twice.\n\t\t// TODO(radu): need to figure out the right thing to do in cases\n\t\t// like: left.a = right.a AND left.a = right.b\n\t\tduplicate := false\n\t\tfor i := range leftEq {\n\t\t\tif leftEq[i] == left || rightEq[i] == right {\n\t\t\t\tduplicate = true\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t\tif !duplicate {\n\t\t\tleftEq = append(leftEq, left)\n\t\t\trightEq = append(rightEq, right)\n\t\t}\n\t}\n\treturn leftEq, rightEq\n}", "func (a joinedTable) equal(b joinedTable) bool {\n\treturn a.secondaryTable == b.secondaryTable && a.primaryColumn == b.primaryColumn && a.secondaryColumn == b.secondaryColumn\n}", "func (sd *SelectDataset) NaturalJoin(table exp.Expression) *SelectDataset {\n\treturn sd.joinTable(exp.NewUnConditionedJoinExpression(exp.NaturalJoinType, table))\n}", "func (session *Session) Join(joinOperator string, tablename interface{}, condition string, args ...interface{}) *Session {\n\tsession.Statement.Join(joinOperator, tablename, condition, args...)\n\treturn session\n}", "func ExampleTable() {\n\tuser := q.T(\"user\", \"usr\")\n\tpost := q.T(\"post\", \"pst\")\n\t// user.id -> post.user_id\n\tuser.InnerJoin(post, q.Eq(user.C(\"id\"), post.C(\"user_id\")))\n\tfmt.Println(\"Short:\", user)\n\n\tpostTag := q.T(\"posttag\", \"rel\")\n\ttag := q.T(\"tag\", \"tg\")\n\t// post.id -> posttag.post_id\n\tpost.InnerJoin(postTag, q.Eq(post.C(\"id\"), postTag.C(\"post_id\")))\n\t// posttag.tag_id -> tag.id\n\tpostTag.InnerJoin(tag, q.Eq(postTag.C(\"tag_id\"), tag.C(\"id\")))\n\tfmt.Println(\"Long: \", user)\n\t// Output:\n\t// Short: \"user\" AS \"usr\" INNER JOIN \"post\" AS \"pst\" ON \"usr\".\"id\" = \"pst\".\"user_id\" []\n\t// Long: \"user\" AS \"usr\" INNER JOIN (\"post\" AS \"pst\" INNER JOIN (\"posttag\" AS \"rel\" INNER JOIN \"tag\" AS \"tg\" ON \"rel\".\"tag_id\" = \"tg\".\"id\") ON \"pst\".\"id\" = \"rel\".\"post_id\") ON \"usr\".\"id\" = \"pst\".\"user_id\" []\n}", "func JoinWith(handler *model.JoinTableHandler, ne *engine.Engine, source interface{}) error {\n\tne.Scope.ContextValue(source)\n\ttableName := handler.TableName\n\tquotedTableName := Quote(ne, tableName)\n\tvar joinConditions []string\n\tm, err := GetModelStruct(ne, source)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif handler.Source.ModelType == m.ModelType {\n\t\td := reflect.New(handler.Destination.ModelType).Interface()\n\t\tdestinationTableName := QuotedTableName(ne, d)\n\t\tfor _, foreignKey := range handler.Destination.ForeignKeys {\n\t\t\tjoinConditions = append(joinConditions, fmt.Sprintf(\"%v.%v = %v.%v\",\n\t\t\t\tquotedTableName,\n\t\t\t\tQuote(ne, foreignKey.DBName),\n\t\t\t\tdestinationTableName,\n\t\t\t\tQuote(ne, foreignKey.AssociationDBName)))\n\t\t}\n\n\t\tvar foreignDBNames []string\n\t\tvar foreignFieldNames []string\n\t\tfor _, foreignKey := range handler.Source.ForeignKeys {\n\t\t\tforeignDBNames = append(foreignDBNames, foreignKey.DBName)\n\t\t\tif field, ok := FieldByName(ne, source, foreignKey.AssociationDBName); ok == nil {\n\t\t\t\tforeignFieldNames = append(foreignFieldNames, field.Name)\n\t\t\t}\n\t\t}\n\n\t\tforeignFieldValues := util.ColumnAsArray(foreignFieldNames, ne.Scope.ValueOf())\n\n\t\tvar condString string\n\t\tif len(foreignFieldValues) > 0 {\n\t\t\tvar quotedForeignDBNames []string\n\t\t\tfor _, dbName := range foreignDBNames {\n\t\t\t\tquotedForeignDBNames = append(quotedForeignDBNames, tableName+\".\"+dbName)\n\t\t\t}\n\n\t\t\tcondString = fmt.Sprintf(\"%v IN (%v)\",\n\t\t\t\tToQueryCondition(ne, quotedForeignDBNames),\n\t\t\t\tutil.ToQueryMarks(foreignFieldValues))\n\t\t} else {\n\t\t\tcondString = fmt.Sprintf(\"1 <> 1\")\n\t\t}\n\n\t\tsearch.Join(ne,\n\t\t\tfmt.Sprintf(\"INNER JOIN %v ON %v\",\n\t\t\t\tquotedTableName,\n\t\t\t\tstrings.Join(joinConditions, \" AND \")))\n\t\tsearch.Where(ne, condString, util.ToQueryValues(foreignFieldValues)...)\n\t\treturn nil\n\t}\n\treturn errors.New(\"wrong source type for join table handler\")\n}", "func (t *TableExpr) JoinSQL() string {\n\tif t.JoinConditions != \"\" && t.JoinType != \"\" {\n\t\treturn \" \" + t.JoinType + \" \" + t.Table.SQL() + \" on \" +\n\t\t\tt.JoinConditions\n\t}\n\treturn \", \" + t.Table.SQL()\n}", "func OuterJoin(tables []*DataTable, on []JoinOn) (*DataTable, error) {\n\treturn newJoinImpl(outerJoin, tables, on).Compute()\n}", "func (sd *SelectDataset) LeftOuterJoin(table exp.Expression, condition exp.JoinCondition) *SelectDataset {\n\treturn sd.joinTable(exp.NewConditionedJoinExpression(exp.LeftOuterJoinType, table, condition))\n}", "func (s *BaseMySqlParserListener) EnterInnerJoin(ctx *InnerJoinContext) {}", "func (sd *SelectDataset) FullJoin(table exp.Expression, condition exp.JoinCondition) *SelectDataset {\n\treturn sd.joinTable(exp.NewConditionedJoinExpression(exp.FullJoinType, table, condition))\n}", "func (b *Builder) CrossJoin(joinTable string, joinCond interface{}) *Builder {\r\n\treturn b.Join(\"CROSS\", joinTable, joinCond)\r\n}", "func (left *DataTable) LeftJoin(right *DataTable, on []JoinOn) (*DataTable, error) {\n\treturn newJoinImpl(leftJoin, []*DataTable{left, right}, on).Compute()\n}", "func TestJoinTableSqlBuilder(t *testing.T) {\n\tmock := NewMockOptimizer(false)\n\n\t// should pass\n\tsqls := []string{\n\t\t\"SELECT N_NAME,N_REGIONKEY FROM NATION join REGION on NATION.N_REGIONKEY = REGION.R_REGIONKEY\",\n\t\t\"SELECT N_NAME, N_REGIONKEY FROM NATION join REGION on NATION.N_REGIONKEY = REGION.R_REGIONKEY WHERE NATION.N_REGIONKEY > 0\",\n\t\t\"SELECT N_NAME, NATION2.R_REGIONKEY FROM NATION2 join REGION using(R_REGIONKEY) WHERE NATION2.R_REGIONKEY > 0\",\n\t\t\"SELECT N_NAME, NATION2.R_REGIONKEY FROM NATION2 NATURAL JOIN REGION WHERE NATION2.R_REGIONKEY > 0\",\n\t\t\"SELECT N_NAME FROM NATION NATURAL JOIN REGION\", //have no same column name but it's ok\n\t\t\"SELECT N_NAME,N_REGIONKEY FROM NATION a join REGION b on a.N_REGIONKEY = b.R_REGIONKEY WHERE a.N_REGIONKEY > 0\", //test alias\n\t\t\"SELECT l.L_ORDERKEY a FROM CUSTOMER c, ORDERS o, LINEITEM l WHERE c.C_CUSTKEY = o.O_CUSTKEY and l.L_ORDERKEY = o.O_ORDERKEY and o.O_ORDERKEY < 10\", //join three tables\n\t\t\"SELECT c.* FROM CUSTOMER c, ORDERS o, LINEITEM l WHERE c.C_CUSTKEY = o.O_CUSTKEY and l.L_ORDERKEY = o.O_ORDERKEY\", //test star\n\t\t\"SELECT * FROM CUSTOMER c, ORDERS o, LINEITEM l WHERE c.C_CUSTKEY = o.O_CUSTKEY and l.L_ORDERKEY = o.O_ORDERKEY\", //test star\n\t\t\"SELECT a.* FROM NATION a join REGION b on a.N_REGIONKEY = b.R_REGIONKEY WHERE a.N_REGIONKEY > 0\", //test star\n\t\t\"SELECT * FROM NATION a join REGION b on a.N_REGIONKEY = b.R_REGIONKEY WHERE a.N_REGIONKEY > 0\",\n\t\t\"SELECT N_NAME, R_REGIONKEY FROM NATION2 join REGION using(R_REGIONKEY)\",\n\t\t\"select nation.n_name from nation join nation2 on nation.n_name !='a' join region on nation.n_regionkey = region.r_regionkey\",\n\t\t\"select * from nation, nation2, region\",\n\t}\n\trunTestShouldPass(mock, t, sqls, false, false)\n\n\t// should error\n\tsqls = []string{\n\t\t\"SELECT N_NAME,N_REGIONKEY FROM NATION join REGION on NATION.N_REGIONKEY = REGION.NotExistColumn\", //column not exist\n\t\t\"SELECT N_NAME, R_REGIONKEY FROM NATION join REGION using(R_REGIONKEY)\", //column not exist\n\t\t\"SELECT N_NAME,N_REGIONKEY FROM NATION a join REGION b on a.N_REGIONKEY = b.R_REGIONKEY WHERE aaaaa.N_REGIONKEY > 0\", //table alias not exist\n\t\t\"select *\", //No table used\n\t\t\"SELECT * FROM NATION a join REGION b on a.N_REGIONKEY = b.R_REGIONKEY WHERE a.N_REGIONKEY > 0 for update\", //Not support\n\t\t\"select * from nation, nation2, region for update\", // Not support\n\t}\n\trunTestShouldError(mock, t, sqls)\n}", "func ExtractJoinEqualityFilters(leftCols, rightCols opt.ColSet, on FiltersExpr) FiltersExpr {\n\t// We want to avoid allocating a new slice unless strictly necessary.\n\tvar newFilters FiltersExpr\n\tfor i := range on {\n\t\tcondition := on[i].Condition\n\t\tok, _, _ := ExtractJoinEquality(leftCols, rightCols, condition)\n\t\tif ok {\n\t\t\tif newFilters != nil {\n\t\t\t\tnewFilters = append(newFilters, on[i])\n\t\t\t}\n\t\t} else {\n\t\t\tif newFilters == nil {\n\t\t\t\tnewFilters = make(FiltersExpr, i, len(on)-1)\n\t\t\t\tcopy(newFilters, on[:i])\n\t\t\t}\n\t\t}\n\t}\n\tif newFilters != nil {\n\t\treturn newFilters\n\t}\n\treturn on\n}", "func (session *Session) Join(joinOperator string, tablename interface{}, condition string, args ...interface{}) *Session {\n\tsession.Session = session.Session.Join(joinOperator, tablename, condition, args...)\n\treturn session\n}", "func (stmt *statement) LeftJoin(table, on string) Statement {\n\tstmt.join(\"LEFT JOIN \", table, on)\n\treturn stmt\n}", "func (b *Builder) CrossJoin(joinTable, joinCond interface{}) *Builder {\n\treturn b.Join(\"CROSS\", joinTable, joinCond)\n}", "func (w *Wrapper) JoinWhere(table interface{}, args ...interface{}) *Wrapper {\n\tw.saveJoinCondition(\"AND\", table, args...)\n\treturn w\n}", "func (stmt *statement) Join(table, on string) Statement {\n\tstmt.join(\"JOIN \", table, on)\n\treturn stmt\n}", "func ELTMap2SelectSQL(nodeLink *NodeLinkInfo, outputName string) (string, error) {\n\t// TODO: will return SELECT\n\tvar b bytes.Buffer\n\twhereConds := make([]string, 0, 0)\n\n\tb.WriteString(\"SELECT \")\n\n\tinputs, _ := getInputTables(&nodeLink.Node)\n\toutput, _ := getOutputTable(&nodeLink.Node, outputName)\n\n\tvar firstcol = true\n\tfor _, col := range output.Columns {\n\t\tif !firstcol {\n\t\t\tb.WriteString(\", \")\n\t\t}\n\t\tfirstcol = false\n\t\tb.WriteString(strings.Trim(col.Expression, \" \"))\n\t\tb.WriteString(\" AS \")\n\t\tb.WriteString(TakeRightObj(col.Name))\n\t}\n\n\tb.WriteString(\" FROM \")\n\n\tvar firsttable = true\n\tfor _, input := range inputs {\n\n\t\tvar linkInput *NodeLinkInfo\n\t\tfor _, prevConn := range nodeLink.PrevConns {\n\t\t\tif prevConn.Label == input.TableName {\n\t\t\t\tlinkInput = prevConn.Link\n\t\t\t}\n\t\t}\n\t\tif linkInput == nil {\n\t\t\tcontinue\n\t\t}\n\n\t\tcomponentType := GetComponentType(&linkInput.Node)\n\t\tvar fromItem string\n\t\tswitch componentType {\n\t\tcase ComponentELTInput:\n\t\t\tfromItem, _ = tELTInput2FromItemSQL(linkInput)\n\t\tcase ComponentELTMap:\n\t\t\tfromItem, _ = ELTMap2SelectSQL(linkInput, input.TableName)\n\t\t\tfromItem = \"(\" + fromItem + \")\"\n\t\t}\n\t\talias := input.Alias\n\n\t\tif input.JoinType == \"NO_JOIN\" {\n\t\t\tif !firsttable {\n\t\t\t\tb.WriteRune(',')\n\t\t\t}\n\t\t\tb.WriteString(fromItem + \" \" + TakeRightObj(alias) + \" \")\n\t\t} else {\n\t\t\t// append `join`` phrase\n\t\t\tb.WriteString(joinType2join(input.JoinType) + \" \" + fromItem + \" \" + TakeRightObj(alias))\n\n\t\t\t// make `on` phrase\n\t\t\tb.WriteString(\" ON (\")\n\t\t\tfirstcol := true\n\t\t\tfor _, col := range input.Columns {\n\t\t\t\tif !col.Join {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\tif !firstcol {\n\t\t\t\t\tb.WriteString(\" AND \")\n\t\t\t\t}\n\t\t\t\tfirstcol = false\n\t\t\t\tb.WriteString(col2cond(alias, &col))\n\t\t\t}\n\t\t\tb.WriteString(\")\")\n\t\t}\n\t\t// collect `where` phrase\n\t\tfor _, col := range input.Columns {\n\t\t\tif col.Join {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif col.Operator == \"\" {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\twhereConds = append(whereConds, col2cond(alias, &col))\n\t\t}\n\n\t\tfirsttable = false\n\t}\n\n\twhereConds = append(whereConds, output.Filters...)\n\n\tif len(whereConds) > 0 {\n\t\tb.WriteString(\" WHERE \")\n\t\tb.WriteString(strings.Join(whereConds, \" AND \"))\n\t}\n\tif len(output.OtherFilters) > 0 {\n\t\tb.WriteRune(' ')\n\t\tb.WriteString(strings.Join(output.OtherFilters, \" \"))\n\t}\n\n\treturn b.String(), nil\n}", "func JoinWithQL(handler *model.JoinTableHandler, ne *engine.Engine, source interface{}) error {\n\tne.Scope.ContextValue(source)\n\ttableName := handler.TableName\n\tquotedTableName := Quote(ne, tableName)\n\tvar joinConditions []string\n\tm, err := GetModelStruct(ne, source)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif handler.Source.ModelType == m.ModelType {\n\t\tne.Search.TableNames = append(ne.Search.TableNames, handler.TableName)\n\t\td := reflect.New(handler.Destination.ModelType).Interface()\n\t\tdestinationTableName := QuotedTableName(ne, d)\n\t\tfor _, foreignKey := range handler.Destination.ForeignKeys {\n\t\t\tjoinConditions = append(joinConditions, fmt.Sprintf(\"%v.%v = %v.%v\",\n\t\t\t\tquotedTableName,\n\t\t\t\tQuote(ne, foreignKey.DBName),\n\t\t\t\tdestinationTableName,\n\t\t\t\tQuote(ne, foreignKey.AssociationDBName)))\n\t\t}\n\n\t\tvar foreignDBNames []string\n\t\tvar foreignFieldNames []string\n\t\tfor _, foreignKey := range handler.Source.ForeignKeys {\n\t\t\tforeignDBNames = append(foreignDBNames, foreignKey.DBName)\n\t\t\tif field, ok := FieldByName(ne, source, foreignKey.AssociationDBName); ok == nil {\n\t\t\t\tforeignFieldNames = append(foreignFieldNames, field.Name)\n\t\t\t}\n\t\t}\n\n\t\tforeignFieldValues := util.ColumnAsArray(foreignFieldNames, ne.Scope.ValueOf())\n\n\t\tif len(foreignFieldValues) > 0 {\n\t\t\tvar quotedForeignDBNames []string\n\t\t\tfor _, dbName := range foreignDBNames {\n\t\t\t\tquotedForeignDBNames = append(quotedForeignDBNames, tableName+\".\"+dbName)\n\t\t\t}\n\t\t\tfor _, q := range quotedForeignDBNames {\n\t\t\t\tjoinConditions = append(joinConditions, fmt.Sprintf(\"%s=?\", q))\n\t\t\t}\n\t\t}\n\t\tsearch.Where(ne, strings.Join(joinConditions, \" AND \"), util.ToQueryValues(foreignFieldValues)...)\n\t\treturn nil\n\t}\n\treturn errors.New(\"wrong source type for join table handler\")\n}", "func (stmt *statement) FullJoin(table, on string) Statement {\n\tstmt.join(\"FULL JOIN \", table, on)\n\treturn stmt\n}", "func (q Query) JoinT(inner Query,\n\touterKeySelectorFn interface{},\n\tinnerKeySelectorFn interface{},\n\tresultSelectorFn interface{}) Query {\n\touterKeySelectorGenericFunc, err := newGenericFunc(\n\t\t\"JoinT\", \"outerKeySelectorFn\", outerKeySelectorFn,\n\t\tsimpleParamValidator(newElemTypeSlice(new(genericType)), newElemTypeSlice(new(genericType))),\n\t)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\touterKeySelectorFunc := func(item interface{}) interface{} {\n\t\treturn outerKeySelectorGenericFunc.Call(item)\n\t}\n\n\tinnerKeySelectorFuncGenericFunc, err := newGenericFunc(\n\t\t\"JoinT\", \"innerKeySelectorFn\",\n\t\tinnerKeySelectorFn,\n\t\tsimpleParamValidator(newElemTypeSlice(new(genericType)), newElemTypeSlice(new(genericType))),\n\t)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\tinnerKeySelectorFunc := func(item interface{}) interface{} {\n\t\treturn innerKeySelectorFuncGenericFunc.Call(item)\n\t}\n\n\tresultSelectorGenericFunc, err := newGenericFunc(\n\t\t\"JoinT\", \"resultSelectorFn\", resultSelectorFn,\n\t\tsimpleParamValidator(newElemTypeSlice(new(genericType), new(genericType)), newElemTypeSlice(new(genericType))),\n\t)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\tresultSelectorFunc := func(outer interface{}, inner interface{}) interface{} {\n\t\treturn resultSelectorGenericFunc.Call(outer, inner)\n\t}\n\n\treturn q.Join(inner, outerKeySelectorFunc, innerKeySelectorFunc, resultSelectorFunc)\n}", "func (sd *SelectDataset) LeftJoin(table exp.Expression, condition exp.JoinCondition) *SelectDataset {\n\treturn sd.joinTable(exp.NewConditionedJoinExpression(exp.LeftJoinType, table, condition))\n}", "func BreakExpressionInLHSandRHS(\n\tctx *plancontext.PlanningContext,\n\texpr sqlparser.Expr,\n\tlhs semantics.TableSet,\n) (col JoinColumn, err error) {\n\trewrittenExpr := sqlparser.CopyOnRewrite(expr, nil, func(cursor *sqlparser.CopyOnWriteCursor) {\n\t\tnode, ok := cursor.Node().(*sqlparser.ColName)\n\t\tif !ok {\n\t\t\treturn\n\t\t}\n\t\tdeps := ctx.SemTable.RecursiveDeps(node)\n\t\tif deps.IsEmpty() {\n\t\t\terr = vterrors.VT13001(\"unknown column. has the AST been copied?\")\n\t\t\tcursor.StopTreeWalk()\n\t\t\treturn\n\t\t}\n\t\tif !deps.IsSolvedBy(lhs) {\n\t\t\treturn\n\t\t}\n\n\t\tnode.Qualifier.Qualifier = sqlparser.NewIdentifierCS(\"\")\n\t\tcol.LHSExprs = append(col.LHSExprs, node)\n\t\tbvName := node.CompliantName()\n\t\tcol.BvNames = append(col.BvNames, bvName)\n\t\targ := sqlparser.NewArgument(bvName)\n\t\t// we are replacing one of the sides of the comparison with an argument,\n\t\t// but we don't want to lose the type information we have, so we copy it over\n\t\tctx.SemTable.CopyExprInfo(node, arg)\n\t\tcursor.Replace(arg)\n\t}, nil).(sqlparser.Expr)\n\n\tif err != nil {\n\t\treturn JoinColumn{}, err\n\t}\n\tctx.JoinPredicates[expr] = append(ctx.JoinPredicates[expr], rewrittenExpr)\n\tcol.RHSExpr = rewrittenExpr\n\treturn\n}", "func (w *Wrapper) LeftJoin(table interface{}, condition string) *Wrapper {\n\tw.saveJoin(table, \"LEFT JOIN\", condition)\n\treturn w\n}", "func (s *BasePlSqlParserListener) EnterOuter_join_sign(ctx *Outer_join_signContext) {}", "func (w *Wrapper) NaturalJoin(table interface{}, condition string) *Wrapper {\n\tw.saveJoin(table, \"NATURAL JOIN\", condition)\n\treturn w\n}", "func (dr *DataRow) joinOnColumnIndexRight(rightRow DataRow, onColumnIndexRight int) DataRow {\n\toutItems := make([]DataItem, 0, len(dr.Items)+len(rightRow.Items)-1)\n\t// append left row\n\toutItems = append(outItems, dr.Items...)\n\t// append right row except on column\n\toutItems = append(outItems, rightRow.Items[:onColumnIndexRight]...)\n\toutItems = append(outItems, rightRow.Items[onColumnIndexRight+1:]...)\n\n\treturn DataRow{\n\t\tItems: outItems,\n\t}\n}", "func (stmt *statement) RightJoin(table, on string) Statement {\n\tstmt.join(\"RIGHT JOIN \", table, on)\n\treturn stmt\n}", "func (r1 *csvTable) Join(r2 rel.Relation, zero interface{}) rel.Relation {\n\treturn rel.NewJoin(r1, r2, zero)\n}", "func OuterJoin(lx, rx reflect.Value) reflect.Value {\n\tljoin := LeftJoin(lx, rx)\n\trjoin := RightJoin(lx, rx)\n\n\tresult := reflect.MakeSlice(reflect.SliceOf(lx.Type().Elem()), ljoin.Len()+rjoin.Len(), ljoin.Len()+rjoin.Len())\n\tfor i := 0; i < ljoin.Len(); i++ {\n\t\tresult.Index(i).Set(ljoin.Index(i))\n\t}\n\tfor i := 0; i < rjoin.Len(); i++ {\n\t\tresult.Index(ljoin.Len() + i).Set(rjoin.Index(i))\n\t}\n\n\treturn result\n}", "func (left *DataTable) RightJoin(right *DataTable, on []JoinOn) (*DataTable, error) {\n\treturn newJoinImpl(rightJoin, []*DataTable{left, right}, on).Compute()\n}", "func (*outerJoinEliminator) extractInnerJoinKeys(join *LogicalJoin, innerChildIdx int) *expression.Schema {\n\tjoinKeys := make([]*expression.Column, 0, len(join.EqualConditions))\n\tfor _, eqCond := range join.EqualConditions {\n\t\tjoinKeys = append(joinKeys, eqCond.GetArgs()[innerChildIdx].(*expression.Column))\n\t}\n\treturn expression.NewSchema(joinKeys...)\n}", "func (sd *SelectDataset) NaturalFullJoin(table exp.Expression) *SelectDataset {\n\treturn sd.joinTable(exp.NewUnConditionedJoinExpression(exp.NaturalFullJoinType, table))\n}", "func (sd *SelectDataset) RightJoin(table exp.Expression, condition exp.JoinCondition) *SelectDataset {\n\treturn sd.joinTable(exp.NewConditionedJoinExpression(exp.RightJoinType, table, condition))\n}", "func EqualsTableExpr(inA, inB TableExpr) bool {\n\tif inA == nil && inB == nil {\n\t\treturn true\n\t}\n\tif inA == nil || inB == nil {\n\t\treturn false\n\t}\n\tswitch a := inA.(type) {\n\tcase *AliasedTableExpr:\n\t\tb, ok := inB.(*AliasedTableExpr)\n\t\tif !ok {\n\t\t\treturn false\n\t\t}\n\t\treturn EqualsRefOfAliasedTableExpr(a, b)\n\tcase *JoinTableExpr:\n\t\tb, ok := inB.(*JoinTableExpr)\n\t\tif !ok {\n\t\t\treturn false\n\t\t}\n\t\treturn EqualsRefOfJoinTableExpr(a, b)\n\tcase *ParenTableExpr:\n\t\tb, ok := inB.(*ParenTableExpr)\n\t\tif !ok {\n\t\t\treturn false\n\t\t}\n\t\treturn EqualsRefOfParenTableExpr(a, b)\n\tdefault:\n\t\t// this should never happen\n\t\treturn false\n\t}\n}", "func EqualsRefOfJoinTableExpr(a, b *JoinTableExpr) bool {\n\tif a == b {\n\t\treturn true\n\t}\n\tif a == nil || b == nil {\n\t\treturn false\n\t}\n\treturn EqualsTableExpr(a.LeftExpr, b.LeftExpr) &&\n\t\ta.Join == b.Join &&\n\t\tEqualsTableExpr(a.RightExpr, b.RightExpr) &&\n\t\tEqualsJoinCondition(a.Condition, b.Condition)\n}", "func makeJoinRow(a *Row, b *Row) *Row {\n\tret := &Row{\n\t\tRowKeys: make([]*RowKeyEntry, 0, len(a.RowKeys)+len(b.RowKeys)),\n\t\tData: make([]types.Datum, 0, len(a.Data)+len(b.Data)),\n\t}\n\tret.RowKeys = append(ret.RowKeys, a.RowKeys...)\n\tret.RowKeys = append(ret.RowKeys, b.RowKeys...)\n\tret.Data = append(ret.Data, a.Data...)\n\tret.Data = append(ret.Data, b.Data...)\n\treturn ret\n}", "func (f *predicateSqlizerFactory) addJoinsToSelectBuilder(q sq.SelectBuilder) sq.SelectBuilder {\n\tfor i, alias := range f.joinedTables {\n\t\taliasName := f.aliasName(alias.secondaryTable, i)\n\t\tjoinClause := fmt.Sprintf(\"%s AS %s ON %s = %s\",\n\t\t\tf.db.tableName(alias.secondaryTable), pq.QuoteIdentifier(aliasName),\n\t\t\tfullQuoteIdentifier(f.primaryTable, alias.primaryColumn),\n\t\t\tfullQuoteIdentifier(aliasName, alias.secondaryColumn))\n\t\tq = q.LeftJoin(joinClause)\n\t}\n\n\tif len(f.joinedTables) > 0 {\n\t\tq = q.Distinct()\n\t}\n\treturn q\n}", "func TestPlanner_Plan_Join(t *testing.T) {\n\tdb := NewDB(\"2000-01-01T12:00:00Z\")\n\tdb.WriteSeries(\"cpu.0\", map[string]string{}, \"2000-01-01T00:00:00Z\", map[string]interface{}{\"value\": float64(1)})\n\tdb.WriteSeries(\"cpu.0\", map[string]string{}, \"2000-01-01T00:00:10Z\", map[string]interface{}{\"value\": float64(2)})\n\tdb.WriteSeries(\"cpu.0\", map[string]string{}, \"2000-01-01T00:00:20Z\", map[string]interface{}{\"value\": float64(3)})\n\tdb.WriteSeries(\"cpu.0\", map[string]string{}, \"2000-01-01T00:00:30Z\", map[string]interface{}{\"value\": float64(4)})\n\n\tdb.WriteSeries(\"cpu.1\", map[string]string{}, \"2000-01-01T00:00:00Z\", map[string]interface{}{\"value\": float64(10)})\n\tdb.WriteSeries(\"cpu.1\", map[string]string{}, \"2000-01-01T00:00:10Z\", map[string]interface{}{\"value\": float64(20)})\n\tdb.WriteSeries(\"cpu.1\", map[string]string{}, \"2000-01-01T00:00:30Z\", map[string]interface{}{\"value\": float64(40)})\n\n\t// Query must join the series and sum the values.\n\trs := db.MustPlanAndExecute(`\n\t\tSELECT sum(cpu.0.value) + sum(cpu.1.value) AS sum\n\t\tFROM JOIN(cpu.0, cpu.1)\n\t\tWHERE time >= '2000-01-01 00:00:00' AND time < '2000-01-01 00:01:00'\n\t\tGROUP BY time(10s)`)\n\n\t// Expected resultset.\n\texp := minify(`[{\n\t\t\"columns\":[\"time\",\"sum\"],\n\t\t\"values\":[\n\t\t\t[946684800000000,11],\n\t\t\t[946684810000000,22],\n\t\t\t[946684820000000,3],\n\t\t\t[946684830000000,44],\n\t\t\t[946684840000000,0],\n\t\t\t[946684850000000,0]\n\t\t]\n\t}]`)\n\n\t// Compare resultsets.\n\tif act := jsonify(rs); exp != act {\n\t\tt.Fatalf(\"unexpected resultset: %s\", indent(act))\n\t}\n}", "func (t *Table) LeftJoin(offset int32, count int, crit string, target interface{}) error {\n\tbody, err := t.LeftJoinRaw(offset, count, crit)\n\tif err == nil {\n\t\terr = json.Unmarshal(body, &target)\n\t}\n\treturn err\n}", "func (sd *SelectDataset) NaturalRightJoin(table exp.Expression) *SelectDataset {\n\treturn sd.joinTable(exp.NewUnConditionedJoinExpression(exp.NaturalRightJoinType, table))\n}", "func TestBaseColExpNode_Intersect(t *testing.T) {\n\tcol := Column(myTb, \"Col\")\n\tarr := Array(3, 50, 70, 80)\n\tassert.Equal(t, fmt.Sprintf(`\"%s\".\"Col\" && ARRAY[3, 50, 70, 80]`, myTbTable),\n\t\tAstToSQL(col.Intersect(arr)))\n}", "func (s *BasePlSqlParserListener) EnterOuter_join_type(ctx *Outer_join_typeContext) {}", "func LeftJoin(tables []*DataTable, on []JoinOn) (*DataTable, error) {\n\treturn newJoinImpl(leftJoin, tables, on).Compute()\n}", "func EqualsRefOfAliasedTableExpr(a, b *AliasedTableExpr) bool {\n\tif a == b {\n\t\treturn true\n\t}\n\tif a == nil || b == nil {\n\t\treturn false\n\t}\n\treturn EqualsSimpleTableExpr(a.Expr, b.Expr) &&\n\t\tEqualsPartitions(a.Partitions, b.Partitions) &&\n\t\tEqualsTableIdent(a.As, b.As) &&\n\t\tEqualsRefOfIndexHints(a.Hints, b.Hints)\n}", "func (b *Builder) Join(joinType, joinTable string, joinCond interface{}) *Builder {\r\n\tswitch joinCond.(type) {\r\n\tcase Cond:\r\n\t\tb.joins = append(b.joins, join{joinType, joinTable, joinCond.(Cond)})\r\n\tcase string:\r\n\t\tb.joins = append(b.joins, join{joinType, joinTable, Expr(joinCond.(string))})\r\n\t}\r\n\r\n\treturn b\r\n}", "func NewJoinOn(table string, from string, to string, filter ...FilterQuery) JoinQuery {\n\treturn NewJoinWith(\"JOIN\", table, from, to, filter...)\n}", "func RightJoin(tables []*DataTable, on []JoinOn) (*DataTable, error) {\n\treturn newJoinImpl(rightJoin, tables, on).Compute()\n}", "func NewJoinOn(table string, from string, to string) JoinQuery {\n\treturn NewJoinWith(\"JOIN\", table, from, to)\n}", "func (b *JoinBuilder) Using(cols ...interface{}) *JoinBuilder {\n\tvar vals Columns\n\tfor _, c := range cols {\n\t\tvar name string\n\t\tswitch t := c.(type) {\n\t\tcase string:\n\t\t\tname = t\n\t\tcase ValExprBuilder:\n\t\t\tif n, ok := t.ValExpr.(*ColName); ok {\n\t\t\t\tname = n.Name\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t\tvar v ValExpr\n\t\tif len(name) == 0 {\n\t\t\tv = makeErrVal(\"unsupported type %T: %v\", c, c)\n\t\t} else if b.leftTable.column(name) == nil ||\n\t\t\tb.rightTable.column(name) == nil {\n\t\t\tv = makeErrVal(\"invalid join column: %s\", name)\n\t\t} else {\n\t\t\tv = &ColName{Name: name}\n\t\t}\n\t\tvals = append(vals, &NonStarExpr{Expr: v})\n\t}\n\tb.Cond = &UsingJoinCond{Cols: vals}\n\treturn b\n}", "func (mp *JoinMultiplicity) JoinPreservesRightRows(op opt.Operator) bool {\n\tswitch op {\n\tcase opt.InnerJoinOp, opt.LeftJoinOp:\n\t\tbreak\n\n\tcase opt.FullJoinOp:\n\t\treturn true\n\n\tcase opt.SemiJoinOp:\n\t\tpanic(errors.AssertionFailedf(\"right rows are not included in the output of a %v\", op))\n\n\tdefault:\n\t\tpanic(errors.AssertionFailedf(\"unsupported operator: %v\", op))\n\t}\n\treturn mp.JoinFiltersMatchAllRightRows()\n}", "func (sd *SelectDataset) NaturalLeftJoin(table exp.Expression) *SelectDataset {\n\treturn sd.joinTable(exp.NewUnConditionedJoinExpression(exp.NaturalLeftJoinType, table))\n}", "func (f *predicateSqlizerFactory) createLeftJoin(secondaryTable string, primaryColumn string, secondaryColumn string) string {\n\tnewAlias := joinedTable{secondaryTable, primaryColumn, secondaryColumn}\n\tfor i, alias := range f.joinedTables {\n\t\tif alias.equal(newAlias) {\n\t\t\treturn f.aliasName(secondaryTable, i)\n\t\t}\n\t}\n\n\tf.joinedTables = append(f.joinedTables, newAlias)\n\treturn f.aliasName(secondaryTable, len(f.joinedTables)-1)\n}", "func EqualsTableExprs(a, b TableExprs) bool {\n\tif len(a) != len(b) {\n\t\treturn false\n\t}\n\tfor i := 0; i < len(a); i++ {\n\t\tif !EqualsTableExpr(a[i], b[i]) {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}", "func (mySelf SQLJoin) Outer() SQLJoin {\n\tmySelf.outer = true\n\treturn mySelf\n}", "func JoinQuery(c *Context, sep string, values []interface{}) string {\n\ts := make([]interface{}, len(values)*2-1)\n\tfor k, v := range values {\n\t\tif k > 0 {\n\t\t\ts[k*2-1] = sep\n\t\t}\n\t\ts[k*2] = MakeField(v)\n\t}\n\n\treturn ConcatQuery(c, s...)\n}", "func (s *BaseMySqlParserListener) EnterOuterJoin(ctx *OuterJoinContext) {}", "func main() {\n\t// bigger table\n\tfmt.Printf(\"%X \\n\", JoinExample(\"./t/r0.tbl\", \"./t/r0.tbl\", []int{0}, []int{1})) // 767636031\n\tfmt.Printf(\"%X \\n\", JoinExample(\"./t/r0.tbl\", \"./t/r1.tbl\", []int{0}, []int{0})) // 49082128576\n\tfmt.Printf(\"%X \\n\", JoinExample(\"./t/r0.tbl\", \"./t/r1.tbl\", []int{1}, []int{1})) // 85306117839070\n\tfmt.Printf(\"%X \\n\", JoinExample(\"./t/r0.tbl\", \"./t/r2.tbl\", []int{0}, []int{0})) // 48860100254\n\tfmt.Printf(\"%X \\n\", JoinExample(\"./t/r0.tbl\", \"./t/r1.tbl\", []int{0, 1}, []int{0, 1})) //5552101\n\tfmt.Printf(\"%X \\n\", JoinExample(\"./t/r1.tbl\", \"./t/r2.tbl\", []int{0}, []int{0})) // 6331038719880\n\tfmt.Printf(\"%X \\n\", JoinExample(\"./t/r2.tbl\", \"./t/r2.tbl\", []int{0, 1}, []int{0, 1})) // 42056985375886\n}", "func JoinColumnValues(values ...Fragment) *ColumnValues {\n\treturn &ColumnValues{ColumnValues: values}\n}", "func (b *Builder) Join(joinType string, joinTable, joinCond interface{}) *Builder {\n\tswitch joinCond.(type) {\n\tcase Cond:\n\t\tb.joins = append(b.joins, join{joinType, joinTable, joinCond.(Cond)})\n\tcase string:\n\t\tb.joins = append(b.joins, join{joinType, joinTable, Expr(joinCond.(string))})\n\t}\n\n\treturn b\n}", "func (mp *JoinMultiplicity) JoinPreservesLeftRows(op opt.Operator) bool {\n\tswitch op {\n\tcase opt.InnerJoinOp, opt.SemiJoinOp:\n\t\tbreak\n\n\tcase opt.LeftJoinOp, opt.FullJoinOp:\n\t\treturn true\n\n\tdefault:\n\t\tpanic(errors.AssertionFailedf(\"unsupported operator: %v\", op))\n\t}\n\treturn mp.JoinFiltersMatchAllLeftRows()\n}", "func (ds *MySQL) Join(source, key, targetKey, joinType string, fields []string) {\n\tds.joinedRepositories[source] = builders.Join{\n\t\tSource: source,\n\t\tKey: key,\n\t\tTargetKey: targetKey,\n\t\tType: joinType,\n\t\tFields: fields,\n\t}\n}", "func (b *Builder) FullJoin(joinTable string, joinCond interface{}) *Builder {\r\n\treturn b.Join(\"FULL\", joinTable, joinCond)\r\n}", "func (w *Wrapper) RightJoin(table interface{}, condition string) *Wrapper {\n\tw.saveJoin(table, \"RIGHT JOIN\", condition)\n\treturn w\n}", "func (e *HashJoinExec) constructMatchedRows(ctx *hashJoinCtx, bigRow *Row) (matchedRows []*Row, err error) {\n\tsc := e.ctx.GetSessionVars().StmtCtx\n\thasNull, joinKey, err := getJoinKey(sc, e.bigHashKey, bigRow, e.targetTypes, ctx.datumBuffer, ctx.hashKeyBuffer[0:0:cap(ctx.hashKeyBuffer)])\n\tif err != nil {\n\t\treturn nil, errors.Trace(err)\n\t}\n\n\tif hasNull {\n\t\treturn\n\t}\n\tvalues := e.hashTable.Get(joinKey)\n\tif len(values) == 0 {\n\t\treturn\n\t}\n\t// match eq condition\n\tfor _, value := range values {\n\t\tvar smallRow *Row\n\t\tsmallRow, err = e.decodeRow(value)\n\t\tif err != nil {\n\t\t\treturn nil, errors.Trace(err)\n\t\t}\n\t\tvar matchedRow *Row\n\t\tif e.leftSmall {\n\t\t\tmatchedRow = makeJoinRow(smallRow, bigRow)\n\t\t} else {\n\t\t\tmatchedRow = makeJoinRow(bigRow, smallRow)\n\t\t}\n\t\totherMatched, err := expression.EvalBool(ctx.otherFilter, matchedRow.Data, e.ctx)\n\t\tif err != nil {\n\t\t\treturn nil, errors.Trace(err)\n\t\t}\n\t\tif otherMatched {\n\t\t\tmatchedRows = append(matchedRows, matchedRow)\n\t\t}\n\t}\n\treturn matchedRows, nil\n}", "func EqualsRefOfDerivedTable(a, b *DerivedTable) bool {\n\tif a == b {\n\t\treturn true\n\t}\n\tif a == nil || b == nil {\n\t\treturn false\n\t}\n\treturn EqualsSelectStatement(a.Select, b.Select)\n}", "func TestAggregateLeftJoin(t *testing.T) {\n\tmcmp, closer := start(t)\n\tdefer closer()\n\n\tmcmp.Exec(\"insert into t1(t1_id, name, value, shardKey) values (11, 'r', 'r', 1), (3, 'r', 'r', 0)\")\n\tmcmp.Exec(\"insert into t2(id, shardKey) values (11, 1)\")\n\n\tmcmp.AssertMatchesNoOrder(\"SELECT t1.shardkey FROM t1 LEFT JOIN t2 ON t1.t1_id = t2.id\", `[[INT64(1)] [INT64(0)]]`)\n\tmcmp.AssertMatches(\"SELECT count(t1.shardkey) FROM t1 LEFT JOIN t2 ON t1.t1_id = t2.id\", `[[INT64(2)]]`)\n\tmcmp.AssertMatches(\"SELECT count(t2.shardkey) FROM t1 LEFT JOIN t2 ON t1.t1_id = t2.id\", `[[INT64(1)]]`)\n\tmcmp.AssertMatches(\"SELECT count(*) FROM t1 LEFT JOIN t2 ON t1.t1_id = t2.id\", `[[INT64(2)]]`)\n\tmcmp.AssertMatches(\"SELECT sum(t1.shardkey) FROM t1 LEFT JOIN t2 ON t1.t1_id = t2.id\", `[[DECIMAL(1)]]`)\n\tmcmp.AssertMatches(\"SELECT sum(t2.shardkey) FROM t1 LEFT JOIN t2 ON t1.t1_id = t2.id\", `[[DECIMAL(1)]]`)\n\tmcmp.AssertMatches(\"SELECT count(*) FROM t2 LEFT JOIN t1 ON t1.t1_id = t2.id WHERE IFNULL(t1.name, 'NOTSET') = 'r'\", `[[INT64(1)]]`)\n}", "func (filter *JoinFilter) JoinClause(structMap TableAndColumnLocater, dialect gorp.Dialect, startBindIdx int) (string, []interface{}, error) {\n\tjoin := \" inner join \" + filter.QuotedJoinTable\n\ton, args, err := filter.AndFilter.Where(structMap, dialect, startBindIdx)\n\tif err != nil {\n\t\treturn \"\", nil, err\n\t}\n\tif on != \"\" {\n\t\tjoin += \" on \" + on\n\t}\n\treturn join, args, nil\n}", "func (pb *primitiveBuilder) expandStar(inrcs []*resultColumn, expr *sqlparser.StarExpr) (outrcs []*resultColumn, expanded bool, err error) {\n\ttables := pb.st.AllTables()\n\tif tables == nil {\n\t\t// no table metadata available.\n\t\treturn inrcs, false, nil\n\t}\n\tif expr.TableName.IsEmpty() {\n\t\tfor _, t := range tables {\n\t\t\t// All tables must have authoritative column lists.\n\t\t\tif !t.isAuthoritative {\n\t\t\t\treturn inrcs, false, nil\n\t\t\t}\n\t\t}\n\t\tsingleTable := false\n\t\tif len(tables) == 1 {\n\t\t\tsingleTable = true\n\t\t}\n\t\tfor _, t := range tables {\n\t\t\tfor _, col := range t.columnNames {\n\t\t\t\tvar expr *sqlparser.AliasedExpr\n\t\t\t\tif singleTable {\n\t\t\t\t\t// If there's only one table, we use unqualified column names.\n\t\t\t\t\texpr = &sqlparser.AliasedExpr{\n\t\t\t\t\t\tExpr: &sqlparser.ColName{\n\t\t\t\t\t\t\tMetadata: t.columns[col.Lowered()],\n\t\t\t\t\t\t\tName: col,\n\t\t\t\t\t\t},\n\t\t\t\t\t}\n\t\t\t\t} else {\n\t\t\t\t\t// If a and b have id as their column, then\n\t\t\t\t\t// select * from a join b should result in\n\t\t\t\t\t// select a.id as id, b.id as id from a join b.\n\t\t\t\t\texpr = &sqlparser.AliasedExpr{\n\t\t\t\t\t\tExpr: &sqlparser.ColName{\n\t\t\t\t\t\t\tMetadata: t.columns[col.Lowered()],\n\t\t\t\t\t\t\tName: col,\n\t\t\t\t\t\t\tQualifier: t.alias,\n\t\t\t\t\t\t},\n\t\t\t\t\t\tAs: col,\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\tnewBuilder, rc, _, err := planProjection(pb, pb.plan, expr, t.Origin())\n\t\t\t\tif err != nil {\n\t\t\t\t\t// Unreachable because PushSelect won't fail on ColName.\n\t\t\t\t\treturn inrcs, false, err\n\t\t\t\t}\n\t\t\t\tpb.plan = newBuilder\n\t\t\t\tinrcs = append(inrcs, rc)\n\t\t\t}\n\t\t}\n\t\treturn inrcs, true, nil\n\t}\n\n\t// Expression qualified with table name.\n\tt, err := pb.st.FindTable(expr.TableName)\n\tif err != nil {\n\t\treturn inrcs, false, err\n\t}\n\tif !t.isAuthoritative {\n\t\treturn inrcs, false, nil\n\t}\n\tfor _, col := range t.columnNames {\n\t\texpr := &sqlparser.AliasedExpr{\n\t\t\tExpr: &sqlparser.ColName{\n\t\t\t\tMetadata: t.columns[col.Lowered()],\n\t\t\t\tName: col,\n\t\t\t\tQualifier: expr.TableName,\n\t\t\t},\n\t\t}\n\t\tnewBuilder, rc, _, err := planProjection(pb, pb.plan, expr, t.Origin())\n\t\tif err != nil {\n\t\t\t// Unreachable because PushSelect won't fail on ColName.\n\t\t\treturn inrcs, false, err\n\t\t}\n\t\tpb.plan = newBuilder\n\t\tinrcs = append(inrcs, rc)\n\t}\n\treturn inrcs, true, nil\n}", "func (w *Wrapper) saveJoin(table interface{}, typ string, condition string) {\n\tswitch v := table.(type) {\n\t// Sub query joining.\n\tcase *Wrapper:\n\t\tw.joins[v.query] = &join{\n\t\t\ttyp: typ,\n\t\t\ttable: table,\n\t\t\tcondition: condition,\n\t\t}\n\t// Common table joining.\n\tcase string:\n\t\tw.joins[v] = &join{\n\t\t\ttyp: typ,\n\t\t\ttable: table,\n\t\t\tcondition: condition,\n\t\t}\n\t}\n}", "func NewLeftJoinOn(table string, from string, to string) JoinQuery {\n\treturn NewJoinWith(\"LEFT JOIN\", table, from, to)\n}", "func (q *Select) AddJoinTable(table *Table, joinExpr string, joinType string) *TableExpr {\n\treturn q.AddTableExpr(&TableExpr{\n\t\tTable: table,\n\t\tJoinConditions: joinExpr,\n\t\tJoinType: joinType,\n\t})\n}", "func getJoinKey(sc *variable.StatementContext, cols []*expression.Column, row *Row, targetTypes []*types.FieldType,\n\tvals []types.Datum, bytes []byte) (bool, []byte, error) {\n\tvar err error\n\tfor i, col := range cols {\n\t\tvals[i], err = col.Eval(row.Data)\n\t\tif err != nil {\n\t\t\treturn false, nil, errors.Trace(err)\n\t\t}\n\t\tif vals[i].IsNull() {\n\t\t\treturn true, nil, nil\n\t\t}\n\t\tvals[i], err = vals[i].ConvertTo(sc, targetTypes[i])\n\t\tif err != nil {\n\t\t\treturn false, nil, errors.Trace(err)\n\t\t}\n\t}\n\tif len(vals) == 0 {\n\t\treturn false, nil, nil\n\t}\n\tbytes, err = codec.EncodeValue(bytes, vals...)\n\treturn false, bytes, errors.Trace(err)\n}" ]
[ "0.66318774", "0.66049737", "0.63518083", "0.6313514", "0.6241781", "0.5914187", "0.58916134", "0.58709943", "0.57758653", "0.573988", "0.5735636", "0.56673557", "0.56110114", "0.55918485", "0.5525171", "0.5516812", "0.5477167", "0.54671127", "0.5388112", "0.5373619", "0.53682", "0.5357585", "0.52885693", "0.5269897", "0.5253985", "0.5234607", "0.5201106", "0.51873493", "0.5168981", "0.5146707", "0.51432276", "0.51402175", "0.51170504", "0.51100177", "0.50239694", "0.5008479", "0.50065243", "0.5001065", "0.4990703", "0.49846056", "0.49591756", "0.49545977", "0.4932776", "0.4882025", "0.48504597", "0.4849782", "0.48405907", "0.4826477", "0.48182452", "0.4787481", "0.47697172", "0.47553006", "0.47400615", "0.4725076", "0.4720869", "0.47203305", "0.47156873", "0.4706862", "0.4699785", "0.4692361", "0.46913052", "0.46846992", "0.46819404", "0.46697026", "0.46654597", "0.4663524", "0.46447864", "0.46126425", "0.4609841", "0.4608025", "0.45955786", "0.45854974", "0.45839345", "0.4578794", "0.45777047", "0.45439887", "0.4540637", "0.45396522", "0.45338076", "0.45144823", "0.44966337", "0.4489573", "0.44893768", "0.4479976", "0.4473193", "0.4467736", "0.44590804", "0.4434069", "0.44327664", "0.4431316", "0.4426347", "0.44228467", "0.4419065", "0.44114968", "0.44113812", "0.44029728", "0.43985966", "0.43973702", "0.43912247", "0.43843433" ]
0.66840595
0
InnerJoin selects records that have matching values in both tables. tables[0] is used as reference datatable.
func InnerJoin(tables []*DataTable, on []JoinOn) (*DataTable, error) { return newJoinImpl(innerJoin, tables, on).Compute() }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (left *DataTable) InnerJoin(right *DataTable, on []JoinOn) (*DataTable, error) {\n\treturn newJoinImpl(innerJoin, []*DataTable{left, right}, on).Compute()\n}", "func (sd *SelectDataset) InnerJoin(table exp.Expression, condition exp.JoinCondition) *SelectDataset {\n\treturn sd.joinTable(exp.NewConditionedJoinExpression(exp.InnerJoinType, table, condition))\n}", "func (w *Wrapper) InnerJoin(table interface{}, condition string) *Wrapper {\n\tw.saveJoin(table, \"INNER JOIN\", condition)\n\treturn w\n}", "func NewInnerJoinOn(table string, from string, to string, filter ...FilterQuery) JoinQuery {\n\treturn NewJoinWith(\"INNER JOIN\", table, from, to, filter...)\n}", "func (b *Builder) InnerJoin(joinTable string, joinCond interface{}) *Builder {\r\n\treturn b.Join(\"INNER\", joinTable, joinCond)\r\n}", "func NewInnerJoinOn(table string, from string, to string) JoinQuery {\n\treturn NewJoinWith(\"INNER JOIN\", table, from, to)\n}", "func (b *Builder) InnerJoin(joinTable, joinCond interface{}) *Builder {\n\treturn b.Join(\"INNER\", joinTable, joinCond)\n}", "func (b *JoinBuilder) InnerJoin(other *Table) *JoinBuilder {\n\treturn makeJoinBuilder(\"INNER JOIN\", b, other)\n}", "func (sd *SelectDataset) Join(table exp.Expression, condition exp.JoinCondition) *SelectDataset {\n\treturn sd.InnerJoin(table, condition)\n}", "func InnerJoin(lx, rx reflect.Value) reflect.Value {\n\tresult := reflect.MakeSlice(reflect.SliceOf(lx.Type().Elem()), 0, lx.Len()+rx.Len())\n\trhash := hashSlice(rx)\n\tlhash := make(map[interface{}]struct{}, lx.Len())\n\n\tfor i := 0; i < lx.Len(); i++ {\n\t\tv := lx.Index(i)\n\t\t_, ok := rhash[v.Interface()]\n\t\t_, alreadyExists := lhash[v.Interface()]\n\t\tif ok && !alreadyExists {\n\t\t\tlhash[v.Interface()] = struct{}{}\n\t\t\tresult = reflect.Append(result, v)\n\t\t}\n\t}\n\treturn result\n}", "func InnerJoin(table, on string) QueryOption {\n\treturn newFuncQueryOption(func(wrapper *QueryWrapper) {\n\t\twrapper.joins = append(wrapper.joins, \"INNER\", \"JOIN\", table, \"ON\", on)\n\t\twrapper.queryLen += 5\n\t})\n}", "func NewInnerJoin(table string, filter ...FilterQuery) JoinQuery {\n\treturn NewInnerJoinOn(table, \"\", \"\", filter...)\n}", "func InnerJoin(clause string, args ...interface{}) QueryMod {\n\treturn func(q *queries.Query) {\n\t\tqueries.AppendInnerJoin(q, clause, args...)\n\t}\n}", "func (sd *SelectDataset) joinTable(join exp.JoinExpression) *SelectDataset {\n\treturn sd.copy(sd.clauses.JoinsAppend(join))\n}", "func NewInnerJoin(table string) JoinQuery {\n\treturn NewInnerJoinOn(table, \"\", \"\")\n}", "func (self Accessor) InnerJoin(expr interface{}) *SelectManager {\n\treturn self.From(self.Relation()).InnerJoin(expr)\n}", "func ExampleTable() {\n\tuser := q.T(\"user\", \"usr\")\n\tpost := q.T(\"post\", \"pst\")\n\t// user.id -> post.user_id\n\tuser.InnerJoin(post, q.Eq(user.C(\"id\"), post.C(\"user_id\")))\n\tfmt.Println(\"Short:\", user)\n\n\tpostTag := q.T(\"posttag\", \"rel\")\n\ttag := q.T(\"tag\", \"tg\")\n\t// post.id -> posttag.post_id\n\tpost.InnerJoin(postTag, q.Eq(post.C(\"id\"), postTag.C(\"post_id\")))\n\t// posttag.tag_id -> tag.id\n\tpostTag.InnerJoin(tag, q.Eq(postTag.C(\"tag_id\"), tag.C(\"id\")))\n\tfmt.Println(\"Long: \", user)\n\t// Output:\n\t// Short: \"user\" AS \"usr\" INNER JOIN \"post\" AS \"pst\" ON \"usr\".\"id\" = \"pst\".\"user_id\" []\n\t// Long: \"user\" AS \"usr\" INNER JOIN (\"post\" AS \"pst\" INNER JOIN (\"posttag\" AS \"rel\" INNER JOIN \"tag\" AS \"tg\" ON \"rel\".\"tag_id\" = \"tg\".\"id\") ON \"pst\".\"id\" = \"rel\".\"post_id\") ON \"usr\".\"id\" = \"pst\".\"user_id\" []\n}", "func (a joinedTable) equal(b joinedTable) bool {\n\treturn a.secondaryTable == b.secondaryTable && a.primaryColumn == b.primaryColumn && a.secondaryColumn == b.secondaryColumn\n}", "func (left *DataTable) OuterJoin(right *DataTable, on []JoinOn) (*DataTable, error) {\n\treturn newJoinImpl(outerJoin, []*DataTable{left, right}, on).Compute()\n}", "func OuterJoin(tables []*DataTable, on []JoinOn) (*DataTable, error) {\n\treturn newJoinImpl(outerJoin, tables, on).Compute()\n}", "func (w *Wrapper) JoinWhere(table interface{}, args ...interface{}) *Wrapper {\n\tw.saveJoinCondition(\"AND\", table, args...)\n\treturn w\n}", "func (q Query) Join(inner Query,\n\touterKeySelector func(interface{}) interface{},\n\tinnerKeySelector func(interface{}) interface{},\n\tresultSelector func(outer interface{}, inner interface{}) interface{}) Query {\n\n\treturn Query{\n\t\tIterate: func() Iterator {\n\t\t\touternext := q.Iterate()\n\t\t\tinnernext := inner.Iterate()\n\n\t\t\tinnerLookup := make(map[interface{}][]interface{})\n\t\t\tfor innerItem, ok := innernext(); ok; innerItem, ok = innernext() {\n\t\t\t\tinnerKey := innerKeySelector(innerItem)\n\t\t\t\tinnerLookup[innerKey] = append(innerLookup[innerKey], innerItem)\n\t\t\t}\n\n\t\t\tvar outerItem interface{}\n\t\t\tvar innerGroup []interface{}\n\t\t\tinnerLen, innerIndex := 0, 0\n\n\t\t\treturn func() (item interface{}, ok bool) {\n\t\t\t\tif innerIndex >= innerLen {\n\t\t\t\t\thas := false\n\t\t\t\t\tfor !has {\n\t\t\t\t\t\touterItem, ok = outernext()\n\t\t\t\t\t\tif !ok {\n\t\t\t\t\t\t\treturn\n\t\t\t\t\t\t}\n\n\t\t\t\t\t\tinnerGroup, has = innerLookup[outerKeySelector(outerItem)]\n\t\t\t\t\t\tinnerLen = len(innerGroup)\n\t\t\t\t\t\tinnerIndex = 0\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t\titem = resultSelector(outerItem, innerGroup[innerIndex])\n\t\t\t\tinnerIndex++\n\t\t\t\treturn item, true\n\t\t\t}\n\t\t},\n\t}\n}", "func (sd *SelectDataset) FullOuterJoin(table exp.Expression, condition exp.JoinCondition) *SelectDataset {\n\treturn sd.joinTable(exp.NewConditionedJoinExpression(exp.FullOuterJoinType, table, condition))\n}", "func (stmt *statement) FullJoin(table, on string) Statement {\n\tstmt.join(\"FULL JOIN \", table, on)\n\treturn stmt\n}", "func (sd *SelectDataset) CrossJoin(table exp.Expression) *SelectDataset {\n\treturn sd.joinTable(exp.NewUnConditionedJoinExpression(exp.CrossJoinType, table))\n}", "func (sd *SelectDataset) FullJoin(table exp.Expression, condition exp.JoinCondition) *SelectDataset {\n\treturn sd.joinTable(exp.NewConditionedJoinExpression(exp.FullJoinType, table, condition))\n}", "func (s *BaseMySqlParserListener) EnterInnerJoin(ctx *InnerJoinContext) {}", "func ExtractJoinEqualityFilters(leftCols, rightCols opt.ColSet, on FiltersExpr) FiltersExpr {\n\t// We want to avoid allocating a new slice unless strictly necessary.\n\tvar newFilters FiltersExpr\n\tfor i := range on {\n\t\tcondition := on[i].Condition\n\t\tok, _, _ := ExtractJoinEquality(leftCols, rightCols, condition)\n\t\tif ok {\n\t\t\tif newFilters != nil {\n\t\t\t\tnewFilters = append(newFilters, on[i])\n\t\t\t}\n\t\t} else {\n\t\t\tif newFilters == nil {\n\t\t\t\tnewFilters = make(FiltersExpr, i, len(on)-1)\n\t\t\t\tcopy(newFilters, on[:i])\n\t\t\t}\n\t\t}\n\t}\n\tif newFilters != nil {\n\t\treturn newFilters\n\t}\n\treturn on\n}", "func (session *Session) Join(joinOperator string, tablename interface{}, condition string, args ...interface{}) *Session {\n\tsession.Statement.Join(joinOperator, tablename, condition, args...)\n\treturn session\n}", "func (stmt *statement) Join(table, on string) Statement {\n\tstmt.join(\"JOIN \", table, on)\n\treturn stmt\n}", "func (r1 *csvTable) Join(r2 rel.Relation, zero interface{}) rel.Relation {\n\treturn rel.NewJoin(r1, r2, zero)\n}", "func (session *Session) Join(joinOperator string, tablename interface{}, condition string, args ...interface{}) *Session {\n\tsession.Session = session.Session.Join(joinOperator, tablename, condition, args...)\n\treturn session\n}", "func NewJoinOn(table string, from string, to string, filter ...FilterQuery) JoinQuery {\n\treturn NewJoinWith(\"JOIN\", table, from, to, filter...)\n}", "func (arr *filterTableArr) UnionEqual(other filterTableArr) *filterTableArr {\n\tfor i, el := range other {\n\t\tif el {\n\t\t\tarr[i] = true\n\t\t}\n\t}\n\n\treturn arr\n}", "func ExtractJoinEqualityFilter(\n\tleftCol, rightCol opt.ColumnID, leftCols, rightCols opt.ColSet, on FiltersExpr,\n) FiltersItem {\n\tfor i := range on {\n\t\tcondition := on[i].Condition\n\t\tok, left, right := ExtractJoinEquality(leftCols, rightCols, condition)\n\t\tif !ok {\n\t\t\tcontinue\n\t\t}\n\t\tif left == leftCol && right == rightCol {\n\t\t\treturn on[i]\n\t\t}\n\t}\n\tpanic(errors.AssertionFailedf(\"could not find equality between columns %d and %d in filters %s\",\n\t\tleftCol, rightCol, on.String(),\n\t))\n}", "func (sd *SelectDataset) NaturalJoin(table exp.Expression) *SelectDataset {\n\treturn sd.joinTable(exp.NewUnConditionedJoinExpression(exp.NaturalJoinType, table))\n}", "func NewJoinOn(table string, from string, to string) JoinQuery {\n\treturn NewJoinWith(\"JOIN\", table, from, to)\n}", "func Using(fields ...string) []JoinOn {\n\tvar jon []JoinOn\n\tfor _, f := range fields {\n\t\tjon = append(jon, JoinOn{Table: \"*\", Field: f})\n\t}\n\treturn jon\n}", "func NewFullJoinOn(table string, from string, to string, filter ...FilterQuery) JoinQuery {\n\treturn NewJoinWith(\"FULL JOIN\", table, from, to, filter...)\n}", "func (imw *innerMergeWorker) fetchInnerRowsWithSameKey(ctx context.Context, task *lookUpMergeJoinTask, key chunk.Row) (noneInnerRows bool, err error) {\n\ttask.sameKeyInnerRows = task.sameKeyInnerRows[:0]\n\tcurRow := task.innerIter.Current()\n\tvar cmpRes int\n\tfor cmpRes, err = imw.compare(key, curRow); ((cmpRes >= 0 && !imw.desc) || (cmpRes <= 0 && imw.desc)) && err == nil; cmpRes, err = imw.compare(key, curRow) {\n\t\tif cmpRes == 0 {\n\t\t\ttask.sameKeyInnerRows = append(task.sameKeyInnerRows, curRow)\n\t\t}\n\t\tcurRow = task.innerIter.Next()\n\t\tif curRow == task.innerIter.End() {\n\t\t\tcurRow, err = imw.fetchNextInnerResult(ctx, task)\n\t\t\tif err != nil || task.innerResult.NumRows() == 0 {\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t}\n\ttask.sameKeyIter = chunk.NewIterator4Slice(task.sameKeyInnerRows)\n\ttask.sameKeyIter.Begin()\n\tnoneInnerRows = task.innerResult.NumRows() == 0\n\treturn\n}", "func (encryptor *QueryDataEncryptor) getTablesFromUpdate(tables sqlparser.TableExprs) []*tableData {\n\tvar outputTables []*tableData\n\tfor _, tableExpr := range tables {\n\t\tswitch statement := tableExpr.(type) {\n\t\tcase *sqlparser.AliasedTableExpr:\n\t\t\taliasedStatement := statement.Expr.(sqlparser.SimpleTableExpr)\n\t\t\tswitch simpleTableStatement := aliasedStatement.(type) {\n\t\t\tcase sqlparser.TableName:\n\t\t\t\toutputTables = append(outputTables, &tableData{TableName: simpleTableStatement, As: statement.As})\n\t\t\tcase *sqlparser.Subquery:\n\t\t\t\t// unsupported\n\t\t\tdefault:\n\t\t\t\tlogrus.Debugf(\"Unsupported SimpleTableExpr type %s\", reflect.TypeOf(simpleTableStatement))\n\t\t\t}\n\t\tcase *sqlparser.ParenTableExpr:\n\t\t\toutputTables = append(outputTables, encryptor.getTablesFromUpdate(statement.Exprs)...)\n\t\tcase *sqlparser.JoinTableExpr:\n\t\t\toutputTables = append(outputTables, encryptor.getTablesFromUpdate(sqlparser.TableExprs{statement.LeftExpr, statement.RightExpr})...)\n\t\tdefault:\n\t\t\tlogrus.Debugf(\"Unsupported TableExpr type %s\", reflect.TypeOf(tableExpr))\n\t\t}\n\t}\n\treturn outputTables\n}", "func NewFullJoinOn(table string, from string, to string) JoinQuery {\n\treturn NewJoinWith(\"FULL JOIN\", table, from, to)\n}", "func (b *Builder) CrossJoin(joinTable, joinCond interface{}) *Builder {\n\treturn b.Join(\"CROSS\", joinTable, joinCond)\n}", "func (b *Builder) CrossJoin(joinTable string, joinCond interface{}) *Builder {\r\n\treturn b.Join(\"CROSS\", joinTable, joinCond)\r\n}", "func (b *Builder) FullJoin(joinTable string, joinCond interface{}) *Builder {\r\n\treturn b.Join(\"FULL\", joinTable, joinCond)\r\n}", "func EqualsRefOfDerivedTable(a, b *DerivedTable) bool {\n\tif a == b {\n\t\treturn true\n\t}\n\tif a == nil || b == nil {\n\t\treturn false\n\t}\n\treturn EqualsSelectStatement(a.Select, b.Select)\n}", "func ExtractJoinEqualityColumns(\n\tleftCols, rightCols opt.ColSet, on FiltersExpr,\n) (leftEq opt.ColList, rightEq opt.ColList) {\n\tfor i := range on {\n\t\tcondition := on[i].Condition\n\t\tok, left, right := ExtractJoinEquality(leftCols, rightCols, condition)\n\t\tif !ok {\n\t\t\tcontinue\n\t\t}\n\t\t// Don't allow any column to show up twice.\n\t\t// TODO(radu): need to figure out the right thing to do in cases\n\t\t// like: left.a = right.a AND left.a = right.b\n\t\tduplicate := false\n\t\tfor i := range leftEq {\n\t\t\tif leftEq[i] == left || rightEq[i] == right {\n\t\t\t\tduplicate = true\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t\tif !duplicate {\n\t\t\tleftEq = append(leftEq, left)\n\t\t\trightEq = append(rightEq, right)\n\t\t}\n\t}\n\treturn leftEq, rightEq\n}", "func ExtractJoinEquality(\n\tleftCols, rightCols opt.ColSet, condition opt.ScalarExpr,\n) (ok bool, left, right opt.ColumnID) {\n\tlvar, rvar, ok := isVarEquality(condition)\n\tif !ok {\n\t\treturn false, 0, 0\n\t}\n\n\t// Don't allow mixed types (see #22519).\n\tif !lvar.DataType().Equivalent(rvar.DataType()) {\n\t\treturn false, 0, 0\n\t}\n\n\tif leftCols.Contains(lvar.Col) && rightCols.Contains(rvar.Col) {\n\t\treturn true, lvar.Col, rvar.Col\n\t}\n\tif leftCols.Contains(rvar.Col) && rightCols.Contains(lvar.Col) {\n\t\treturn true, rvar.Col, lvar.Col\n\t}\n\n\treturn false, 0, 0\n}", "func (sd *SelectDataset) RightOuterJoin(table exp.Expression, condition exp.JoinCondition) *SelectDataset {\n\treturn sd.joinTable(exp.NewConditionedJoinExpression(exp.RightOuterJoinType, table, condition))\n}", "func RightJoin(tables []*DataTable, on []JoinOn) (*DataTable, error) {\n\treturn newJoinImpl(rightJoin, tables, on).Compute()\n}", "func (b *Builder) FullJoin(joinTable, joinCond interface{}) *Builder {\n\treturn b.Join(\"FULL\", joinTable, joinCond)\n}", "func JoinWith(handler *model.JoinTableHandler, ne *engine.Engine, source interface{}) error {\n\tne.Scope.ContextValue(source)\n\ttableName := handler.TableName\n\tquotedTableName := Quote(ne, tableName)\n\tvar joinConditions []string\n\tm, err := GetModelStruct(ne, source)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif handler.Source.ModelType == m.ModelType {\n\t\td := reflect.New(handler.Destination.ModelType).Interface()\n\t\tdestinationTableName := QuotedTableName(ne, d)\n\t\tfor _, foreignKey := range handler.Destination.ForeignKeys {\n\t\t\tjoinConditions = append(joinConditions, fmt.Sprintf(\"%v.%v = %v.%v\",\n\t\t\t\tquotedTableName,\n\t\t\t\tQuote(ne, foreignKey.DBName),\n\t\t\t\tdestinationTableName,\n\t\t\t\tQuote(ne, foreignKey.AssociationDBName)))\n\t\t}\n\n\t\tvar foreignDBNames []string\n\t\tvar foreignFieldNames []string\n\t\tfor _, foreignKey := range handler.Source.ForeignKeys {\n\t\t\tforeignDBNames = append(foreignDBNames, foreignKey.DBName)\n\t\t\tif field, ok := FieldByName(ne, source, foreignKey.AssociationDBName); ok == nil {\n\t\t\t\tforeignFieldNames = append(foreignFieldNames, field.Name)\n\t\t\t}\n\t\t}\n\n\t\tforeignFieldValues := util.ColumnAsArray(foreignFieldNames, ne.Scope.ValueOf())\n\n\t\tvar condString string\n\t\tif len(foreignFieldValues) > 0 {\n\t\t\tvar quotedForeignDBNames []string\n\t\t\tfor _, dbName := range foreignDBNames {\n\t\t\t\tquotedForeignDBNames = append(quotedForeignDBNames, tableName+\".\"+dbName)\n\t\t\t}\n\n\t\t\tcondString = fmt.Sprintf(\"%v IN (%v)\",\n\t\t\t\tToQueryCondition(ne, quotedForeignDBNames),\n\t\t\t\tutil.ToQueryMarks(foreignFieldValues))\n\t\t} else {\n\t\t\tcondString = fmt.Sprintf(\"1 <> 1\")\n\t\t}\n\n\t\tsearch.Join(ne,\n\t\t\tfmt.Sprintf(\"INNER JOIN %v ON %v\",\n\t\t\t\tquotedTableName,\n\t\t\t\tstrings.Join(joinConditions, \" AND \")))\n\t\tsearch.Where(ne, condString, util.ToQueryValues(foreignFieldValues)...)\n\t\treturn nil\n\t}\n\treturn errors.New(\"wrong source type for join table handler\")\n}", "func JoinTableValues(cols ...string) string {\n\tcols = strings_.SliceTrimEmpty(cols...)\n\tif len(cols) == 0 {\n\t\t// https://dev.mysql.com/doc/refman/5.7/en/data-type-defaults.html\n\t\t// DEFAULT\n\t\treturn \"\"\n\t}\n\treturn strings.Join(TableValues(cols...), \",\")\n}", "func applyTags(db *gorm.DB, tagIds []int64) *gorm.DB {\n\n\tdb = db.Joins(\"INNER JOIN products_product_item as it ON (products_product.id = it.product_id)\")\n\n\tfor i, tagID := range tagIds {\n\t\trelname := fmt.Sprintf(\"tagrel_%v\", i)\n\t\tdb = db.Joins(\n\t\t\tfmt.Sprintf(\n\t\t\t\t\"INNER JOIN products_product_item_tags as %v ON (%v.product_item_id = it.id AND %v.tag_id = ?)\",\n\t\t\t\trelname, relname, relname),\n\t\t\ttagID,\n\t\t)\n\t}\n\n\treturn db\n}", "func mergeTable(table1 *Table, table2 Table) {\n\tfor _, column := range table2.columns {\n\t\tif containsString(convertColumnsToColumnNames(table1.columns), column.name) {\n\t\t\ttable1.columns = append(table1.columns, column)\n\t\t}\n\t}\n\n\tfor _, index := range table2.indexes {\n\t\tif containsString(convertIndexesToIndexNames(table1.indexes), index.name) {\n\t\t\ttable1.indexes = append(table1.indexes, index)\n\t\t}\n\t}\n}", "func (sd *SelectDataset) Intersect(other *SelectDataset) *SelectDataset {\n\treturn sd.withCompound(exp.IntersectCompoundType, other.CompoundFromSelf())\n}", "func GetTablesMatching(ctx context.Context, dsExt *dataset.Dataset, filter string) ([]AnnotatedTable, error) {\n\talt := make([]AnnotatedTable, 0)\n\tti := dsExt.Tables(ctx)\n\tfor t, err := ti.Next(); err == nil; t, err = ti.Next() {\n\t\t// TODO should this be starts with? Or a regex?\n\t\tif strings.Contains(t.TableID(), filter) {\n\t\t\t// TODO - make this run in parallel\n\t\t\tat := AnnotatedTable{Table: t, dataset: dsExt}\n\t\t\t_, err := at.CachedMeta(ctx)\n\t\t\tif err == ErrNotRegularTable {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\talt = append(alt, at)\n\t\t}\n\t}\n\tsort.Slice(alt[:], func(i, j int) bool {\n\t\treturn alt[i].LastModifiedTime(ctx).Before(alt[j].LastModifiedTime(ctx))\n\t})\n\treturn alt, nil\n}", "func NewFullJoin(table string, filter ...FilterQuery) JoinQuery {\n\treturn NewFullJoinOn(table, \"\", \"\", filter...)\n}", "func (s *BaseMySqlParserListener) ExitInnerJoin(ctx *InnerJoinContext) {}", "func OuterJoin(lx, rx reflect.Value) reflect.Value {\n\tljoin := LeftJoin(lx, rx)\n\trjoin := RightJoin(lx, rx)\n\n\tresult := reflect.MakeSlice(reflect.SliceOf(lx.Type().Elem()), ljoin.Len()+rjoin.Len(), ljoin.Len()+rjoin.Len())\n\tfor i := 0; i < ljoin.Len(); i++ {\n\t\tresult.Index(i).Set(ljoin.Index(i))\n\t}\n\tfor i := 0; i < rjoin.Len(); i++ {\n\t\tresult.Index(ljoin.Len() + i).Set(rjoin.Index(i))\n\t}\n\n\treturn result\n}", "func (sd *SelectDataset) NaturalFullJoin(table exp.Expression) *SelectDataset {\n\treturn sd.joinTable(exp.NewUnConditionedJoinExpression(exp.NaturalFullJoinType, table))\n}", "func (left *DataTable) RightJoin(right *DataTable, on []JoinOn) (*DataTable, error) {\n\treturn newJoinImpl(rightJoin, []*DataTable{left, right}, on).Compute()\n}", "func (e *HashJoinExec) constructMatchedRows(ctx *hashJoinCtx, bigRow *Row) (matchedRows []*Row, err error) {\n\tsc := e.ctx.GetSessionVars().StmtCtx\n\thasNull, joinKey, err := getJoinKey(sc, e.bigHashKey, bigRow, e.targetTypes, ctx.datumBuffer, ctx.hashKeyBuffer[0:0:cap(ctx.hashKeyBuffer)])\n\tif err != nil {\n\t\treturn nil, errors.Trace(err)\n\t}\n\n\tif hasNull {\n\t\treturn\n\t}\n\tvalues := e.hashTable.Get(joinKey)\n\tif len(values) == 0 {\n\t\treturn\n\t}\n\t// match eq condition\n\tfor _, value := range values {\n\t\tvar smallRow *Row\n\t\tsmallRow, err = e.decodeRow(value)\n\t\tif err != nil {\n\t\t\treturn nil, errors.Trace(err)\n\t\t}\n\t\tvar matchedRow *Row\n\t\tif e.leftSmall {\n\t\t\tmatchedRow = makeJoinRow(smallRow, bigRow)\n\t\t} else {\n\t\t\tmatchedRow = makeJoinRow(bigRow, smallRow)\n\t\t}\n\t\totherMatched, err := expression.EvalBool(ctx.otherFilter, matchedRow.Data, e.ctx)\n\t\tif err != nil {\n\t\t\treturn nil, errors.Trace(err)\n\t\t}\n\t\tif otherMatched {\n\t\t\tmatchedRows = append(matchedRows, matchedRow)\n\t\t}\n\t}\n\treturn matchedRows, nil\n}", "func (w *Wrapper) NaturalJoin(table interface{}, condition string) *Wrapper {\n\tw.saveJoin(table, \"NATURAL JOIN\", condition)\n\treturn w\n}", "func (s UserSet) Intersect(other m.UserSet) m.UserSet {\n\tres := s.Collection().Call(\"Intersect\", other)\n\tresTyped := res.(models.RecordSet).Collection().Wrap(\"User\").(m.UserSet)\n\treturn resTyped\n}", "func (w *Wrapper) saveJoin(table interface{}, typ string, condition string) {\n\tswitch v := table.(type) {\n\t// Sub query joining.\n\tcase *Wrapper:\n\t\tw.joins[v.query] = &join{\n\t\t\ttyp: typ,\n\t\t\ttable: table,\n\t\t\tcondition: condition,\n\t\t}\n\t// Common table joining.\n\tcase string:\n\t\tw.joins[v] = &join{\n\t\t\ttyp: typ,\n\t\t\ttable: table,\n\t\t\tcondition: condition,\n\t\t}\n\t}\n}", "func NewFullJoin(table string) JoinQuery {\n\treturn NewFullJoinOn(table, \"\", \"\")\n}", "func (*outerJoinEliminator) extractInnerJoinKeys(join *LogicalJoin, innerChildIdx int) *expression.Schema {\n\tjoinKeys := make([]*expression.Column, 0, len(join.EqualConditions))\n\tfor _, eqCond := range join.EqualConditions {\n\t\tjoinKeys = append(joinKeys, eqCond.GetArgs()[innerChildIdx].(*expression.Column))\n\t}\n\treturn expression.NewSchema(joinKeys...)\n}", "func TablesEqual(tbl1, tbl2 array.Table) (bool, string) {\n\n\tm1 := tbl1.NumCols()\n\tm2 := tbl2.NumCols()\n\tif m1 != m2 {\n\t\treturn false, fmt.Sprintf(\"Inconsistent number of columns, %d != %d\", m1, m2)\n\t}\n\n\tfor i := 0; i < int(m1); i++ {\n\n\t\tcol1 := tbl1.Column(i)\n\t\tcol2 := tbl2.Column(i)\n\n\t\tb, msg := ColumnsEqual(col1, col2)\n\t\tif !b {\n\t\t\treturn false, msg\n\t\t}\n\t}\n\n\treturn true, \"\"\n}", "func NewJoin(table string, filter ...FilterQuery) JoinQuery {\n\treturn NewJoinWith(\"JOIN\", table, \"\", \"\", filter...)\n}", "func JoinTableColumnsValues(cmp string, table string, cols ...string) string {\n\t//cols = strings_.SliceTrimEmpty(cols...)\n\treturn strings.Join(TableColumnsValues(cmp, table, cols...), \",\")\n}", "func searchFK(tableName string, id int) (query orm.QuerySeter) {\n\n\to := orm.NewOrm()\n\n\tquery = o.QueryTable(tableName).Filter(\"id\", id).Filter(\"deleted_at__isnull\", true).RelatedSel()\n\n\treturn\n}", "func (stmt *statement) RightJoin(table, on string) Statement {\n\tstmt.join(\"RIGHT JOIN \", table, on)\n\treturn stmt\n}", "func (q Query) JoinT(inner Query,\n\touterKeySelectorFn interface{},\n\tinnerKeySelectorFn interface{},\n\tresultSelectorFn interface{}) Query {\n\touterKeySelectorGenericFunc, err := newGenericFunc(\n\t\t\"JoinT\", \"outerKeySelectorFn\", outerKeySelectorFn,\n\t\tsimpleParamValidator(newElemTypeSlice(new(genericType)), newElemTypeSlice(new(genericType))),\n\t)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\touterKeySelectorFunc := func(item interface{}) interface{} {\n\t\treturn outerKeySelectorGenericFunc.Call(item)\n\t}\n\n\tinnerKeySelectorFuncGenericFunc, err := newGenericFunc(\n\t\t\"JoinT\", \"innerKeySelectorFn\",\n\t\tinnerKeySelectorFn,\n\t\tsimpleParamValidator(newElemTypeSlice(new(genericType)), newElemTypeSlice(new(genericType))),\n\t)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\tinnerKeySelectorFunc := func(item interface{}) interface{} {\n\t\treturn innerKeySelectorFuncGenericFunc.Call(item)\n\t}\n\n\tresultSelectorGenericFunc, err := newGenericFunc(\n\t\t\"JoinT\", \"resultSelectorFn\", resultSelectorFn,\n\t\tsimpleParamValidator(newElemTypeSlice(new(genericType), new(genericType)), newElemTypeSlice(new(genericType))),\n\t)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\tresultSelectorFunc := func(outer interface{}, inner interface{}) interface{} {\n\t\treturn resultSelectorGenericFunc.Call(outer, inner)\n\t}\n\n\treturn q.Join(inner, outerKeySelectorFunc, innerKeySelectorFunc, resultSelectorFunc)\n}", "func (r *MysqlDatasource) JoinWithAuthors(transaction *gorm.DB, id int) ([]*BookAuthorJoinModel, error) {\n\tdb := r.db\n\tif transaction != nil {\n\t\tdb = transaction\n\t}\n\n\tresult := []*BookAuthorJoinModel{}\n\terr := db.Raw(`\n\tSELECT \n\t\tbook.*,\n\t\tauthor.id as author_id, \n\t\tauthor.name as author_name, \n\t\tauthor.birthdate as author_birthdate\n\tFROM books book\n\tINNER JOIN book_authors ba on book.id = ba.book_id \n\tINNER JOIN authors author on author.id = ba.author_id \n\twhere book.id = ?\n\t`, id).Find(&result).Error\n\tif err != nil {\n\t\treturn nil, err\n\t} else if len(result) == 0 {\n\t\treturn nil, gorm.ErrRecordNotFound\n\t}\n\n\treturn result, nil\n}", "func makeJoinRow(a *Row, b *Row) *Row {\n\tret := &Row{\n\t\tRowKeys: make([]*RowKeyEntry, 0, len(a.RowKeys)+len(b.RowKeys)),\n\t\tData: make([]types.Datum, 0, len(a.Data)+len(b.Data)),\n\t}\n\tret.RowKeys = append(ret.RowKeys, a.RowKeys...)\n\tret.RowKeys = append(ret.RowKeys, b.RowKeys...)\n\tret.Data = append(ret.Data, a.Data...)\n\tret.Data = append(ret.Data, b.Data...)\n\treturn ret\n}", "func JoinQuery(c *Context, sep string, values []interface{}) string {\n\ts := make([]interface{}, len(values)*2-1)\n\tfor k, v := range values {\n\t\tif k > 0 {\n\t\t\ts[k*2-1] = sep\n\t\t}\n\t\ts[k*2] = MakeField(v)\n\t}\n\n\treturn ConcatQuery(c, s...)\n}", "func FullJoin(table, on string) QueryOption {\n\treturn newFuncQueryOption(func(wrapper *QueryWrapper) {\n\t\twrapper.joins = append(wrapper.joins, \"FULL\", \"JOIN\", table, \"ON\", on)\n\t\twrapper.queryLen += 5\n\t})\n}", "func (conn *Conn) Select(dataSet map[int][]string) (map[int][]string, error) {\n\tdb := conn.db\n\tresult := make(map[int][]string)\n\n\tfor userid, emails := range dataSet {\n\t\ttableName := \"unsub_\" + strconv.Itoa(modId(userid))\n\t\t//sqlStr := \"SELECT email FROM \" + tableName + \" WHERE user_id = ? and email = ?\"\n\t\tsqlStr := fmt.Sprintf(\"SELECT email FROM %s WHERE user_id = ? and email IN (%s)\",\n\t\t\ttableName,\n\t\t\tfmt.Sprintf(\"?\"+strings.Repeat(\",?\", len(emails)-1)))\n\t\targs := make([]interface{}, len(emails)+1)\n\t\targs[0] = userid\n\t\tfor i, email := range emails {\n\t\t\targs[i+1] = email\n\t\t}\n\t\trows, err := db.Query(sqlStr, args...)\n\t\tif err != nil {\n\t\t\tlog.Printf(\"Error querying db: %v\\n\", err)\n\t\t\treturn result, err\n\t\t}\n\t\tvar email string\n\t\tfor rows.Next() {\n\t\t\terr = rows.Scan(&email)\n\t\t\tif err != nil {\n\t\t\t\tlog.Printf(\"Error scanning row: %v\\n\", err)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tresult[userid] = append(result[userid], email)\n\t\t}\n\t\tdefer rows.Close()\n\t\t/*\n\t\t\tif err != nil {\n\t\t\t\tlog.Printf(\"Error preparing statement\", err)\n\t\t\t\treturn nil\n\t\t\t}\n\t\t\tfor e := range emails {\n\t\t\t\tvar user_id int\n\t\t\t\tvar email string\n\n\t\t\t\terr = stmt.QueryRow(userid, emails[e]).Scan(&user_id, &email)\n\t\t\t\tif err != nil {\n\t\t\t\t\tif err == sql.ErrNoRows {\n\t\t\t\t\t\tcontinue\n\t\t\t\t\t} else {\n\t\t\t\t\t\tlog.Printf(\"Error querying row\", err)\n\t\t\t\t\t\treturn nil\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\tresult[user_id] = append(result[user_id], email)\n\t\t\t}\n\t\t*/\n\t}\n\treturn result, nil\n}", "func (s *BaseMySqlParserListener) EnterOuterJoin(ctx *OuterJoinContext) {}", "func NewJoin(table string) JoinQuery {\n\treturn NewJoinWith(\"JOIN\", table, \"\", \"\")\n}", "func (self Accessor) OuterJoin(expr interface{}) *SelectManager {\n\treturn self.From(self.Relation()).OuterJoin(expr)\n}", "func TestJoinTableSqlBuilder(t *testing.T) {\n\tmock := NewMockOptimizer(false)\n\n\t// should pass\n\tsqls := []string{\n\t\t\"SELECT N_NAME,N_REGIONKEY FROM NATION join REGION on NATION.N_REGIONKEY = REGION.R_REGIONKEY\",\n\t\t\"SELECT N_NAME, N_REGIONKEY FROM NATION join REGION on NATION.N_REGIONKEY = REGION.R_REGIONKEY WHERE NATION.N_REGIONKEY > 0\",\n\t\t\"SELECT N_NAME, NATION2.R_REGIONKEY FROM NATION2 join REGION using(R_REGIONKEY) WHERE NATION2.R_REGIONKEY > 0\",\n\t\t\"SELECT N_NAME, NATION2.R_REGIONKEY FROM NATION2 NATURAL JOIN REGION WHERE NATION2.R_REGIONKEY > 0\",\n\t\t\"SELECT N_NAME FROM NATION NATURAL JOIN REGION\", //have no same column name but it's ok\n\t\t\"SELECT N_NAME,N_REGIONKEY FROM NATION a join REGION b on a.N_REGIONKEY = b.R_REGIONKEY WHERE a.N_REGIONKEY > 0\", //test alias\n\t\t\"SELECT l.L_ORDERKEY a FROM CUSTOMER c, ORDERS o, LINEITEM l WHERE c.C_CUSTKEY = o.O_CUSTKEY and l.L_ORDERKEY = o.O_ORDERKEY and o.O_ORDERKEY < 10\", //join three tables\n\t\t\"SELECT c.* FROM CUSTOMER c, ORDERS o, LINEITEM l WHERE c.C_CUSTKEY = o.O_CUSTKEY and l.L_ORDERKEY = o.O_ORDERKEY\", //test star\n\t\t\"SELECT * FROM CUSTOMER c, ORDERS o, LINEITEM l WHERE c.C_CUSTKEY = o.O_CUSTKEY and l.L_ORDERKEY = o.O_ORDERKEY\", //test star\n\t\t\"SELECT a.* FROM NATION a join REGION b on a.N_REGIONKEY = b.R_REGIONKEY WHERE a.N_REGIONKEY > 0\", //test star\n\t\t\"SELECT * FROM NATION a join REGION b on a.N_REGIONKEY = b.R_REGIONKEY WHERE a.N_REGIONKEY > 0\",\n\t\t\"SELECT N_NAME, R_REGIONKEY FROM NATION2 join REGION using(R_REGIONKEY)\",\n\t\t\"select nation.n_name from nation join nation2 on nation.n_name !='a' join region on nation.n_regionkey = region.r_regionkey\",\n\t\t\"select * from nation, nation2, region\",\n\t}\n\trunTestShouldPass(mock, t, sqls, false, false)\n\n\t// should error\n\tsqls = []string{\n\t\t\"SELECT N_NAME,N_REGIONKEY FROM NATION join REGION on NATION.N_REGIONKEY = REGION.NotExistColumn\", //column not exist\n\t\t\"SELECT N_NAME, R_REGIONKEY FROM NATION join REGION using(R_REGIONKEY)\", //column not exist\n\t\t\"SELECT N_NAME,N_REGIONKEY FROM NATION a join REGION b on a.N_REGIONKEY = b.R_REGIONKEY WHERE aaaaa.N_REGIONKEY > 0\", //table alias not exist\n\t\t\"select *\", //No table used\n\t\t\"SELECT * FROM NATION a join REGION b on a.N_REGIONKEY = b.R_REGIONKEY WHERE a.N_REGIONKEY > 0 for update\", //Not support\n\t\t\"select * from nation, nation2, region for update\", // Not support\n\t}\n\trunTestShouldError(mock, t, sqls)\n}", "func (f *predicateSqlizerFactory) addJoinsToSelectBuilder(q sq.SelectBuilder) sq.SelectBuilder {\n\tfor i, alias := range f.joinedTables {\n\t\taliasName := f.aliasName(alias.secondaryTable, i)\n\t\tjoinClause := fmt.Sprintf(\"%s AS %s ON %s = %s\",\n\t\t\tf.db.tableName(alias.secondaryTable), pq.QuoteIdentifier(aliasName),\n\t\t\tfullQuoteIdentifier(f.primaryTable, alias.primaryColumn),\n\t\t\tfullQuoteIdentifier(aliasName, alias.secondaryColumn))\n\t\tq = q.LeftJoin(joinClause)\n\t}\n\n\tif len(f.joinedTables) > 0 {\n\t\tq = q.Distinct()\n\t}\n\treturn q\n}", "func (b *JoinBuilder) Using(cols ...interface{}) *JoinBuilder {\n\tvar vals Columns\n\tfor _, c := range cols {\n\t\tvar name string\n\t\tswitch t := c.(type) {\n\t\tcase string:\n\t\t\tname = t\n\t\tcase ValExprBuilder:\n\t\t\tif n, ok := t.ValExpr.(*ColName); ok {\n\t\t\t\tname = n.Name\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t\tvar v ValExpr\n\t\tif len(name) == 0 {\n\t\t\tv = makeErrVal(\"unsupported type %T: %v\", c, c)\n\t\t} else if b.leftTable.column(name) == nil ||\n\t\t\tb.rightTable.column(name) == nil {\n\t\t\tv = makeErrVal(\"invalid join column: %s\", name)\n\t\t} else {\n\t\t\tv = &ColName{Name: name}\n\t\t}\n\t\tvals = append(vals, &NonStarExpr{Expr: v})\n\t}\n\tb.Cond = &UsingJoinCond{Cols: vals}\n\treturn b\n}", "func (ds *MySQL) Join(source, key, targetKey, joinType string, fields []string) {\n\tds.joinedRepositories[source] = builders.Join{\n\t\tSource: source,\n\t\tKey: key,\n\t\tTargetKey: targetKey,\n\t\tType: joinType,\n\t\tFields: fields,\n\t}\n}", "func (mySelf SQLJoin) Outer() SQLJoin {\n\tmySelf.outer = true\n\treturn mySelf\n}", "func (jr *joinReader) isLookupJoin() bool {\n\treturn len(jr.lookupCols) > 0\n}", "func (s *BasePlSqlParserListener) EnterOuter_join_sign(ctx *Outer_join_signContext) {}", "func JoinWithQL(handler *model.JoinTableHandler, ne *engine.Engine, source interface{}) error {\n\tne.Scope.ContextValue(source)\n\ttableName := handler.TableName\n\tquotedTableName := Quote(ne, tableName)\n\tvar joinConditions []string\n\tm, err := GetModelStruct(ne, source)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif handler.Source.ModelType == m.ModelType {\n\t\tne.Search.TableNames = append(ne.Search.TableNames, handler.TableName)\n\t\td := reflect.New(handler.Destination.ModelType).Interface()\n\t\tdestinationTableName := QuotedTableName(ne, d)\n\t\tfor _, foreignKey := range handler.Destination.ForeignKeys {\n\t\t\tjoinConditions = append(joinConditions, fmt.Sprintf(\"%v.%v = %v.%v\",\n\t\t\t\tquotedTableName,\n\t\t\t\tQuote(ne, foreignKey.DBName),\n\t\t\t\tdestinationTableName,\n\t\t\t\tQuote(ne, foreignKey.AssociationDBName)))\n\t\t}\n\n\t\tvar foreignDBNames []string\n\t\tvar foreignFieldNames []string\n\t\tfor _, foreignKey := range handler.Source.ForeignKeys {\n\t\t\tforeignDBNames = append(foreignDBNames, foreignKey.DBName)\n\t\t\tif field, ok := FieldByName(ne, source, foreignKey.AssociationDBName); ok == nil {\n\t\t\t\tforeignFieldNames = append(foreignFieldNames, field.Name)\n\t\t\t}\n\t\t}\n\n\t\tforeignFieldValues := util.ColumnAsArray(foreignFieldNames, ne.Scope.ValueOf())\n\n\t\tif len(foreignFieldValues) > 0 {\n\t\t\tvar quotedForeignDBNames []string\n\t\t\tfor _, dbName := range foreignDBNames {\n\t\t\t\tquotedForeignDBNames = append(quotedForeignDBNames, tableName+\".\"+dbName)\n\t\t\t}\n\t\t\tfor _, q := range quotedForeignDBNames {\n\t\t\t\tjoinConditions = append(joinConditions, fmt.Sprintf(\"%s=?\", q))\n\t\t\t}\n\t\t}\n\t\tsearch.Where(ne, strings.Join(joinConditions, \" AND \"), util.ToQueryValues(foreignFieldValues)...)\n\t\treturn nil\n\t}\n\treturn errors.New(\"wrong source type for join table handler\")\n}", "func (mp *JoinMultiplicity) JoinFiltersMatchAllRightRows() bool {\n\treturn mp.RightMultiplicity&MultiplicityPreservedVal != 0\n}", "func (s StringSet) Intersect(other StringSet) StringSet {\n\tresultSet := make(StringSet, len(s))\n\tfor val := range s {\n\t\tif other[val] {\n\t\t\tresultSet[val] = true\n\t\t}\n\t}\n\n\treturn resultSet\n}", "func main() {\n\t// bigger table\n\tfmt.Printf(\"%X \\n\", JoinExample(\"./t/r0.tbl\", \"./t/r0.tbl\", []int{0}, []int{1})) // 767636031\n\tfmt.Printf(\"%X \\n\", JoinExample(\"./t/r0.tbl\", \"./t/r1.tbl\", []int{0}, []int{0})) // 49082128576\n\tfmt.Printf(\"%X \\n\", JoinExample(\"./t/r0.tbl\", \"./t/r1.tbl\", []int{1}, []int{1})) // 85306117839070\n\tfmt.Printf(\"%X \\n\", JoinExample(\"./t/r0.tbl\", \"./t/r2.tbl\", []int{0}, []int{0})) // 48860100254\n\tfmt.Printf(\"%X \\n\", JoinExample(\"./t/r0.tbl\", \"./t/r1.tbl\", []int{0, 1}, []int{0, 1})) //5552101\n\tfmt.Printf(\"%X \\n\", JoinExample(\"./t/r1.tbl\", \"./t/r2.tbl\", []int{0}, []int{0})) // 6331038719880\n\tfmt.Printf(\"%X \\n\", JoinExample(\"./t/r2.tbl\", \"./t/r2.tbl\", []int{0, 1}, []int{0, 1})) // 42056985375886\n}", "func (filter *CombinedFilter) joinFilters(separator string, structMap TableAndColumnLocater, dialect gorp.Dialect, startBindIdx int) (string, []interface{}, error) {\n\tbuffer := bytes.Buffer{}\n\targs := make([]interface{}, 0, len(filter.subFilters))\n\tif len(filter.subFilters) > 1 {\n\t\tbuffer.WriteString(\"(\")\n\t}\n\tfor index, subFilter := range filter.subFilters {\n\t\tnextWhere, nextArgs, err := subFilter.Where(structMap, dialect, startBindIdx+len(args))\n\t\tif err != nil {\n\t\t\treturn \"\", nil, err\n\t\t}\n\t\targs = append(args, nextArgs...)\n\t\tif index != 0 {\n\t\t\tbuffer.WriteString(separator)\n\t\t}\n\t\tbuffer.WriteString(nextWhere)\n\t}\n\tif len(filter.subFilters) > 1 {\n\t\tbuffer.WriteString(\")\")\n\t}\n\treturn buffer.String(), args, nil\n}", "func JoinedIn(vs ...time.Time) predicate.User {\n\tv := make([]interface{}, len(vs))\n\tfor i := range v {\n\t\tv[i] = vs[i]\n\t}\n\treturn predicate.User(func(s *sql.Selector) {\n\t\t// if not arguments were provided, append the FALSE constants,\n\t\t// since we can't apply \"IN ()\". This will make this predicate falsy.\n\t\tif len(v) == 0 {\n\t\t\ts.Where(sql.False())\n\t\t\treturn\n\t\t}\n\t\ts.Where(sql.In(s.C(FieldJoined), v...))\n\t})\n}", "func (this *Dao) Join (joinType string, joinTable string, joinOn string) *Dao {\n\tjoin := fmt.Sprintf(\"%s JOIN %s ON %s\", strings.ToUpper(joinType), _table(joinTable), joinOn)\n\n\tthis.queryJoins = append(this.queryJoins, join)\n\treturn this\n}", "func joinFactSets(a, b FactSet) FactSet {\n\tr := FactSet{\n\t\tFacts: make([]rpc.Fact, len(a.Facts)+len(b.Facts)),\n\t}\n\tcopy(r.Facts, a.Facts)\n\tcopy(r.Facts[len(a.Facts):], b.Facts)\n\treturn r\n}", "func NewRightJoinOn(table string, from string, to string, filter ...FilterQuery) JoinQuery {\n\treturn NewJoinWith(\"RIGHT JOIN\", table, from, to, filter...)\n}", "func EqualsTableExprs(a, b TableExprs) bool {\n\tif len(a) != len(b) {\n\t\treturn false\n\t}\n\tfor i := 0; i < len(a); i++ {\n\t\tif !EqualsTableExpr(a[i], b[i]) {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}", "func (b *ExactMatchLookupJobRequestBuilder) MatchingRows() *ExactMatchLookupJobMatchingRowsCollectionRequestBuilder {\n\tbb := &ExactMatchLookupJobMatchingRowsCollectionRequestBuilder{BaseRequestBuilder: b.BaseRequestBuilder}\n\tbb.baseURL += \"/matchingRows\"\n\treturn bb\n}" ]
[ "0.6751454", "0.65702766", "0.64586174", "0.6028002", "0.60178965", "0.59976506", "0.597665", "0.59714115", "0.59106946", "0.58595383", "0.58453965", "0.57453084", "0.56433755", "0.5607307", "0.56053597", "0.55745494", "0.5502053", "0.54919076", "0.54228294", "0.5411585", "0.52169406", "0.51203614", "0.5102999", "0.5089563", "0.50045013", "0.49897593", "0.4955275", "0.48896784", "0.48503256", "0.48342502", "0.47881123", "0.47787637", "0.47762138", "0.4739697", "0.47336188", "0.47151208", "0.47097674", "0.46908787", "0.46684068", "0.4646711", "0.46390003", "0.46352112", "0.46220288", "0.4619037", "0.45928127", "0.45504197", "0.4540922", "0.4532716", "0.4531488", "0.45310977", "0.45180878", "0.45134822", "0.44953787", "0.44865742", "0.44858405", "0.44637614", "0.44563252", "0.44494882", "0.44461772", "0.44422212", "0.4381334", "0.4375123", "0.43727687", "0.43560064", "0.43527785", "0.434012", "0.43315125", "0.43293017", "0.4317816", "0.43040413", "0.42733705", "0.4266053", "0.4249733", "0.4244372", "0.4227373", "0.4225258", "0.4198956", "0.41979963", "0.4190164", "0.41772574", "0.41649687", "0.41630054", "0.41468138", "0.41326448", "0.41158122", "0.4114132", "0.41096726", "0.41063848", "0.41059974", "0.4104981", "0.40852863", "0.40769392", "0.40700445", "0.40696225", "0.40634492", "0.4062339", "0.4054411", "0.40504432", "0.4038289", "0.40344635" ]
0.7018755
0
LeftJoin returns all records from the left table (table1), and the matched records from the right table (table2). The result is NULL from the right side, if there is no match. LeftJoin transforms an expr column to a raw column
func (left *DataTable) LeftJoin(right *DataTable, on []JoinOn) (*DataTable, error) { return newJoinImpl(leftJoin, []*DataTable{left, right}, on).Compute() }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (stmt *statement) LeftJoin(table, on string) Statement {\n\tstmt.join(\"LEFT JOIN \", table, on)\n\treturn stmt\n}", "func (w *Wrapper) LeftJoin(table interface{}, condition string) *Wrapper {\n\tw.saveJoin(table, \"LEFT JOIN\", condition)\n\treturn w\n}", "func LeftJoin(tables []*DataTable, on []JoinOn) (*DataTable, error) {\n\treturn newJoinImpl(leftJoin, tables, on).Compute()\n}", "func (sd *SelectDataset) LeftJoin(table exp.Expression, condition exp.JoinCondition) *SelectDataset {\n\treturn sd.joinTable(exp.NewConditionedJoinExpression(exp.LeftJoinType, table, condition))\n}", "func (sd *SelectDataset) LeftOuterJoin(table exp.Expression, condition exp.JoinCondition) *SelectDataset {\n\treturn sd.joinTable(exp.NewConditionedJoinExpression(exp.LeftOuterJoinType, table, condition))\n}", "func (t *Table) LeftJoin(offset int32, count int, crit string, target interface{}) error {\n\tbody, err := t.LeftJoinRaw(offset, count, crit)\n\tif err == nil {\n\t\terr = json.Unmarshal(body, &target)\n\t}\n\treturn err\n}", "func (b *Builder) LeftJoin(joinTable string, joinCond interface{}) *Builder {\r\n\treturn b.Join(\"LEFT\", joinTable, joinCond)\r\n}", "func (b *Builder) LeftJoin(joinTable, joinCond interface{}) *Builder {\n\treturn b.Join(\"LEFT\", joinTable, joinCond)\n}", "func (b *JoinBuilder) LeftJoin(other *Table) *JoinBuilder {\n\treturn makeJoinBuilder(\"LEFT JOIN\", b, other)\n}", "func NewLeftJoin(table string, filter ...FilterQuery) JoinQuery {\n\treturn NewLeftJoinOn(table, \"\", \"\", filter...)\n}", "func NewLeftJoin(table string) JoinQuery {\n\treturn NewLeftJoinOn(table, \"\", \"\")\n}", "func LeftJoin(table, on string) QueryOption {\n\treturn newFuncQueryOption(func(wrapper *QueryWrapper) {\n\t\twrapper.joins = append(wrapper.joins, \"LEFT\", \"JOIN\", table, \"ON\", on)\n\t\twrapper.queryLen += 5\n\t})\n}", "func (sd *SelectDataset) NaturalLeftJoin(table exp.Expression) *SelectDataset {\n\treturn sd.joinTable(exp.NewUnConditionedJoinExpression(exp.NaturalLeftJoinType, table))\n}", "func NewLeftJoinOn(table string, from string, to string, filter ...FilterQuery) JoinQuery {\n\treturn NewJoinWith(\"LEFT JOIN\", table, from, to, filter...)\n}", "func NewLeftJoinOn(table string, from string, to string) JoinQuery {\n\treturn NewJoinWith(\"LEFT JOIN\", table, from, to)\n}", "func (f *predicateSqlizerFactory) createLeftJoin(secondaryTable string, primaryColumn string, secondaryColumn string) string {\n\tnewAlias := joinedTable{secondaryTable, primaryColumn, secondaryColumn}\n\tfor i, alias := range f.joinedTables {\n\t\tif alias.equal(newAlias) {\n\t\t\treturn f.aliasName(secondaryTable, i)\n\t\t}\n\t}\n\n\tf.joinedTables = append(f.joinedTables, newAlias)\n\treturn f.aliasName(secondaryTable, len(f.joinedTables)-1)\n}", "func (t *Table) LeftJoinRaw(offset int32, count int, crit string) ([]byte, error) {\n\tp := \"https://%s/api/getLeftJoin.sjs?json&object=%s&limit=%d,%d\"\n\tx := fmt.Sprintf(p, t.Host, t.Name, offset, count)\n\tif len(crit) != 0 {\n\t\tx = x + \"&condition=\" + FixCrit(crit)\n\t}\n\t_, body, err := t.Get(x)\n\treturn body, err\n}", "func LeftJoin(lx, rx reflect.Value) reflect.Value {\n\tresult := reflect.MakeSlice(reflect.SliceOf(lx.Type().Elem()), 0, lx.Len())\n\trhash := hashSlice(rx)\n\n\tfor i := 0; i < lx.Len(); i++ {\n\t\tv := lx.Index(i)\n\t\t_, ok := rhash[v.Interface()]\n\t\tif !ok {\n\t\t\tresult = reflect.Append(result, v)\n\t\t}\n\t}\n\treturn result\n}", "func (t *Table) LeftJoinMap(offset int32, count int, crit string) ([]map[string]string, error) {\n\tvar a []map[string]string\n\tbody, err := t.LeftJoinRaw(offset, count, crit)\n\ta = unpackGJsonArray(body)\n\treturn a, err\n}", "func (mp *JoinMultiplicity) JoinPreservesLeftRows(op opt.Operator) bool {\n\tswitch op {\n\tcase opt.InnerJoinOp, opt.SemiJoinOp:\n\t\tbreak\n\n\tcase opt.LeftJoinOp, opt.FullJoinOp:\n\t\treturn true\n\n\tdefault:\n\t\tpanic(errors.AssertionFailedf(\"unsupported operator: %v\", op))\n\t}\n\treturn mp.JoinFiltersMatchAllLeftRows()\n}", "func TestAggregateLeftJoin(t *testing.T) {\n\tmcmp, closer := start(t)\n\tdefer closer()\n\n\tmcmp.Exec(\"insert into t1(t1_id, name, value, shardKey) values (11, 'r', 'r', 1), (3, 'r', 'r', 0)\")\n\tmcmp.Exec(\"insert into t2(id, shardKey) values (11, 1)\")\n\n\tmcmp.AssertMatchesNoOrder(\"SELECT t1.shardkey FROM t1 LEFT JOIN t2 ON t1.t1_id = t2.id\", `[[INT64(1)] [INT64(0)]]`)\n\tmcmp.AssertMatches(\"SELECT count(t1.shardkey) FROM t1 LEFT JOIN t2 ON t1.t1_id = t2.id\", `[[INT64(2)]]`)\n\tmcmp.AssertMatches(\"SELECT count(t2.shardkey) FROM t1 LEFT JOIN t2 ON t1.t1_id = t2.id\", `[[INT64(1)]]`)\n\tmcmp.AssertMatches(\"SELECT count(*) FROM t1 LEFT JOIN t2 ON t1.t1_id = t2.id\", `[[INT64(2)]]`)\n\tmcmp.AssertMatches(\"SELECT sum(t1.shardkey) FROM t1 LEFT JOIN t2 ON t1.t1_id = t2.id\", `[[DECIMAL(1)]]`)\n\tmcmp.AssertMatches(\"SELECT sum(t2.shardkey) FROM t1 LEFT JOIN t2 ON t1.t1_id = t2.id\", `[[DECIMAL(1)]]`)\n\tmcmp.AssertMatches(\"SELECT count(*) FROM t2 LEFT JOIN t1 ON t1.t1_id = t2.id WHERE IFNULL(t1.name, 'NOTSET') = 'r'\", `[[INT64(1)]]`)\n}", "func (self Accessor) OuterJoin(expr interface{}) *SelectManager {\n\treturn self.From(self.Relation()).OuterJoin(expr)\n}", "func (left *DataTable) OuterJoin(right *DataTable, on []JoinOn) (*DataTable, error) {\n\treturn newJoinImpl(outerJoin, []*DataTable{left, right}, on).Compute()\n}", "func JoinWithLeftAssociativeOp(op OpCode, a Expr, b Expr) Expr {\n\t// \"(a, b) op c\" => \"a, b op c\"\n\tif comma, ok := a.Data.(*EBinary); ok && comma.Op == BinOpComma {\n\t\tcomma.Right = JoinWithLeftAssociativeOp(op, comma.Right, b)\n\t\treturn a\n\t}\n\n\t// \"a op (b op c)\" => \"(a op b) op c\"\n\t// \"a op (b op (c op d))\" => \"((a op b) op c) op d\"\n\tif binary, ok := b.Data.(*EBinary); ok && binary.Op == op {\n\t\treturn JoinWithLeftAssociativeOp(\n\t\t\top,\n\t\t\tJoinWithLeftAssociativeOp(op, a, binary.Left),\n\t\t\tbinary.Right,\n\t\t)\n\t}\n\n\t// \"a op b\" => \"a op b\"\n\t// \"(a op b) op c\" => \"(a op b) op c\"\n\treturn Expr{Loc: a.Loc, Data: &EBinary{Op: op, Left: a, Right: b}}\n}", "func (mp *JoinMultiplicity) JoinDoesNotDuplicateLeftRows(op opt.Operator) bool {\n\tswitch op {\n\tcase opt.InnerJoinOp, opt.LeftJoinOp, opt.FullJoinOp:\n\t\tbreak\n\n\tcase opt.SemiJoinOp:\n\t\treturn true\n\n\tdefault:\n\t\tpanic(errors.AssertionFailedf(\"unsupported operator: %v\", op))\n\t}\n\treturn mp.JoinFiltersDoNotDuplicateLeftRows()\n}", "func (h *joinPlanningHelper) remapOnExpr(\n\tplanCtx *PlanningCtx, onCond tree.TypedExpr,\n) (execinfrapb.Expression, error) {\n\tif onCond == nil {\n\t\treturn execinfrapb.Expression{}, nil\n\t}\n\n\tjoinColMap := make([]int, h.numLeftOutCols+h.numRightOutCols)\n\tidx := 0\n\tleftCols := 0\n\tfor i := 0; i < h.numLeftOutCols; i++ {\n\t\tjoinColMap[idx] = h.leftPlanToStreamColMap[i]\n\t\tif h.leftPlanToStreamColMap[i] != -1 {\n\t\t\tleftCols++\n\t\t}\n\t\tidx++\n\t}\n\tfor i := 0; i < h.numRightOutCols; i++ {\n\t\tjoinColMap[idx] = leftCols + h.rightPlanToStreamColMap[i]\n\t\tidx++\n\t}\n\n\treturn physicalplan.MakeExpression(onCond, planCtx, joinColMap)\n}", "func (d *dbBasePostgres) GenerateOperatorLeftCol(fi *fieldInfo, operator string, leftCol *string) {\n\tswitch operator {\n\tcase \"contains\", \"startswith\", \"endswith\":\n\t\t*leftCol = fmt.Sprintf(\"%s::text\", *leftCol)\n\tcase \"iexact\", \"icontains\", \"istartswith\", \"iendswith\":\n\t\t*leftCol = fmt.Sprintf(\"UPPER(%s::text)\", *leftCol)\n\t}\n}", "func leftRecursive(expr ast.Node) ast.Node {\n\tswitch node := expr.(type) {\n\tcase *ast.Apply:\n\t\treturn node.Target\n\tcase *ast.ApplyBrace:\n\t\treturn node.Left\n\tcase *ast.Binary:\n\t\treturn node.Left\n\tcase *ast.Index:\n\t\treturn node.Target\n\tcase *ast.InSuper:\n\t\treturn node.Index\n\tcase *ast.Slice:\n\t\treturn node.Target\n\tdefault:\n\t\treturn nil\n\t}\n}", "func OuterJoin(tables []*DataTable, on []JoinOn) (*DataTable, error) {\n\treturn newJoinImpl(outerJoin, tables, on).Compute()\n}", "func (fn *formulaFuncs) LEFT(argsList *list.List) formulaArg {\n\treturn fn.leftRight(\"LEFT\", argsList)\n}", "func joinLeft(g []string) string {\n\tif g == nil || len(g) == 0 {\n\t\treturn \"\"\n\t}\n\tvar bf bytes.Buffer\n\tfor i := range g {\n\t\tc := strings.Index(g[i], \"#\")\n\t\tif c == -1 {\n\t\t\tbf.WriteString(g[i])\n\t\t} else {\n\t\t\tbf.WriteString(g[i][0:c])\n\t\t\tbreak\n\t\t}\n\t}\n\treturn string(bf.Bytes())\n}", "func (l Left) Eval(ctx *sql.Context, row sql.Row) (interface{}, error) {\n\tstr, err := l.str.Eval(ctx, row)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar text []rune\n\tswitch str := str.(type) {\n\tcase string:\n\t\ttext = []rune(str)\n\tcase []byte:\n\t\ttext = []rune(string(str))\n\tcase nil:\n\t\treturn nil, nil\n\tdefault:\n\t\treturn nil, sql.ErrInvalidType.New(reflect.TypeOf(str).String())\n\t}\n\n\tvar length int64\n\truneCount := int64(len(text))\n\tlen, err := l.len.Eval(ctx, row)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif len == nil {\n\t\treturn nil, nil\n\t}\n\n\tlen, err = sql.Int64.Convert(len)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tlength = len.(int64)\n\n\tif length > runeCount {\n\t\tlength = runeCount\n\t}\n\tif length <= 0 {\n\t\treturn \"\", nil\n\t}\n\n\treturn string(text[:length]), nil\n}", "func OuterJoin(lx, rx reflect.Value) reflect.Value {\n\tljoin := LeftJoin(lx, rx)\n\trjoin := RightJoin(lx, rx)\n\n\tresult := reflect.MakeSlice(reflect.SliceOf(lx.Type().Elem()), ljoin.Len()+rjoin.Len(), ljoin.Len()+rjoin.Len())\n\tfor i := 0; i < ljoin.Len(); i++ {\n\t\tresult.Index(i).Set(ljoin.Index(i))\n\t}\n\tfor i := 0; i < rjoin.Len(); i++ {\n\t\tresult.Index(ljoin.Len() + i).Set(rjoin.Index(i))\n\t}\n\n\treturn result\n}", "func (sd *SelectDataset) NaturalJoin(table exp.Expression) *SelectDataset {\n\treturn sd.joinTable(exp.NewUnConditionedJoinExpression(exp.NaturalJoinType, table))\n}", "func (t *TableExpr) JoinSQL() string {\n\tif t.JoinConditions != \"\" && t.JoinType != \"\" {\n\t\treturn \" \" + t.JoinType + \" \" + t.Table.SQL() + \" on \" +\n\t\t\tt.JoinConditions\n\t}\n\treturn \", \" + t.Table.SQL()\n}", "func (d *dbBase) GenerateOperatorLeftCol(*fieldInfo, string, *string) {\n\t// default not use\n}", "func (sd *SelectDataset) CrossJoin(table exp.Expression) *SelectDataset {\n\treturn sd.joinTable(exp.NewUnConditionedJoinExpression(exp.CrossJoinType, table))\n}", "func (w *Wrapper) NaturalJoin(table interface{}, condition string) *Wrapper {\n\tw.saveJoin(table, \"NATURAL JOIN\", condition)\n\treturn w\n}", "func LeftFromTable(timeTable map[string]map[rune][]uint) (ret []uint) {\n\tfor _, day := range timeTable {\n\t\tfor _, ids := range day {\n\t\t\tfor _, id := range ids {\n\t\t\t\tret = append(ret, id)\n\t\t\t}\n\t\t}\n\t}\n\treturn SliceUniqMap(ret)\n}", "func NewLeft(str, len sql.Expression) sql.Expression {\n\treturn Left{str, len}\n}", "func (sd *SelectDataset) Join(table exp.Expression, condition exp.JoinCondition) *SelectDataset {\n\treturn sd.InnerJoin(table, condition)\n}", "func LeftOf(x ...interface{}) Either {\n\treturn newEither(false, x...)\n}", "func (f *FilterExpression) WithLeft(left string) *FilterExpression {\n\tf.Left = left\n\treturn f\n}", "func JoinedIsNil() predicate.User {\n\treturn predicate.User(func(s *sql.Selector) {\n\t\ts.Where(sql.IsNull(s.C(FieldJoined)))\n\t})\n}", "func NewNaturalJoin(left, right sql.Node) *JoinNode {\n\treturn NewJoin(left, right, JoinTypeUsing, nil)\n}", "func (sd *SelectDataset) FullOuterJoin(table exp.Expression, condition exp.JoinCondition) *SelectDataset {\n\treturn sd.joinTable(exp.NewConditionedJoinExpression(exp.FullOuterJoinType, table, condition))\n}", "func (stmt *statement) RightJoin(table, on string) Statement {\n\tstmt.join(\"RIGHT JOIN \", table, on)\n\treturn stmt\n}", "func BreakExpressionInLHSandRHS(\n\tctx *plancontext.PlanningContext,\n\texpr sqlparser.Expr,\n\tlhs semantics.TableSet,\n) (col JoinColumn, err error) {\n\trewrittenExpr := sqlparser.CopyOnRewrite(expr, nil, func(cursor *sqlparser.CopyOnWriteCursor) {\n\t\tnode, ok := cursor.Node().(*sqlparser.ColName)\n\t\tif !ok {\n\t\t\treturn\n\t\t}\n\t\tdeps := ctx.SemTable.RecursiveDeps(node)\n\t\tif deps.IsEmpty() {\n\t\t\terr = vterrors.VT13001(\"unknown column. has the AST been copied?\")\n\t\t\tcursor.StopTreeWalk()\n\t\t\treturn\n\t\t}\n\t\tif !deps.IsSolvedBy(lhs) {\n\t\t\treturn\n\t\t}\n\n\t\tnode.Qualifier.Qualifier = sqlparser.NewIdentifierCS(\"\")\n\t\tcol.LHSExprs = append(col.LHSExprs, node)\n\t\tbvName := node.CompliantName()\n\t\tcol.BvNames = append(col.BvNames, bvName)\n\t\targ := sqlparser.NewArgument(bvName)\n\t\t// we are replacing one of the sides of the comparison with an argument,\n\t\t// but we don't want to lose the type information we have, so we copy it over\n\t\tctx.SemTable.CopyExprInfo(node, arg)\n\t\tcursor.Replace(arg)\n\t}, nil).(sqlparser.Expr)\n\n\tif err != nil {\n\t\treturn JoinColumn{}, err\n\t}\n\tctx.JoinPredicates[expr] = append(ctx.JoinPredicates[expr], rewrittenExpr)\n\tcol.RHSExpr = rewrittenExpr\n\treturn\n}", "func (mp *JoinMultiplicity) JoinFiltersMatchAllLeftRows() bool {\n\treturn mp.LeftMultiplicity&MultiplicityPreservedVal != 0\n}", "func (stmt *statement) FullJoin(table, on string) Statement {\n\tstmt.join(\"FULL JOIN \", table, on)\n\treturn stmt\n}", "func CloneRefOfJoinTableExpr(n *JoinTableExpr) *JoinTableExpr {\n\tif n == nil {\n\t\treturn nil\n\t}\n\tout := *n\n\tout.LeftExpr = CloneTableExpr(n.LeftExpr)\n\tout.RightExpr = CloneTableExpr(n.RightExpr)\n\tout.Condition = CloneJoinCondition(n.Condition)\n\treturn &out\n}", "func (sd *SelectDataset) NaturalFullJoin(table exp.Expression) *SelectDataset {\n\treturn sd.joinTable(exp.NewUnConditionedJoinExpression(exp.NaturalFullJoinType, table))\n}", "func (sd *SelectDataset) NaturalRightJoin(table exp.Expression) *SelectDataset {\n\treturn sd.joinTable(exp.NewUnConditionedJoinExpression(exp.NaturalRightJoinType, table))\n}", "func (b *Builder) CrossJoin(joinTable string, joinCond interface{}) *Builder {\r\n\treturn b.Join(\"CROSS\", joinTable, joinCond)\r\n}", "func (sd *SelectDataset) FullJoin(table exp.Expression, condition exp.JoinCondition) *SelectDataset {\n\treturn sd.joinTable(exp.NewConditionedJoinExpression(exp.FullJoinType, table, condition))\n}", "func (session *Session) Join(joinOperator string, tablename interface{}, condition string, args ...interface{}) *Session {\n\tsession.Statement.Join(joinOperator, tablename, condition, args...)\n\treturn session\n}", "func (sd *SelectDataset) RightOuterJoin(table exp.Expression, condition exp.JoinCondition) *SelectDataset {\n\treturn sd.joinTable(exp.NewConditionedJoinExpression(exp.RightOuterJoinType, table, condition))\n}", "func defaultExprMetaLeftDenotation(\n\tp *parser,\n\trightBindingPower int,\n\tleft ast.Expression,\n) (\n\tresult ast.Expression,\n\terr error,\n\tdone bool,\n) {\n\tleftBindingPower, err := exprLeftBindingPower(p)\n\tif err != nil {\n\t\treturn nil, err, true\n\t}\n\n\tif rightBindingPower >= leftBindingPower {\n\t\treturn left, nil, true\n\t}\n\n\tt := p.current\n\n\tp.next()\n\n\tresult, err = applyExprLeftDenotation(p, t, left)\n\treturn result, err, false\n}", "func RightJoin(lx, rx reflect.Value) reflect.Value { return LeftJoin(rx, lx) }", "func CloneTableExpr(in TableExpr) TableExpr {\n\tif in == nil {\n\t\treturn nil\n\t}\n\tswitch in := in.(type) {\n\tcase *AliasedTableExpr:\n\t\treturn CloneRefOfAliasedTableExpr(in)\n\tcase *JoinTableExpr:\n\t\treturn CloneRefOfJoinTableExpr(in)\n\tcase *ParenTableExpr:\n\t\treturn CloneRefOfParenTableExpr(in)\n\tdefault:\n\t\t// this should never happen\n\t\treturn nil\n\t}\n}", "func (b *Builder) CrossJoin(joinTable, joinCond interface{}) *Builder {\n\treturn b.Join(\"CROSS\", joinTable, joinCond)\n}", "func ColumnLeft(name string) {\n\tidx := colIndex(name)\n\tif idx > 0 {\n\t\tswapCols(idx, idx-1)\n\t}\n}", "func (r1 *csvTable) Join(r2 rel.Relation, zero interface{}) rel.Relation {\n\treturn rel.NewJoin(r1, r2, zero)\n}", "func (left *DataTable) InnerJoin(right *DataTable, on []JoinOn) (*DataTable, error) {\n\treturn newJoinImpl(innerJoin, []*DataTable{left, right}, on).Compute()\n}", "func ExtractJoinEquality(\n\tleftCols, rightCols opt.ColSet, condition opt.ScalarExpr,\n) (ok bool, left, right opt.ColumnID) {\n\tlvar, rvar, ok := isVarEquality(condition)\n\tif !ok {\n\t\treturn false, 0, 0\n\t}\n\n\t// Don't allow mixed types (see #22519).\n\tif !lvar.DataType().Equivalent(rvar.DataType()) {\n\t\treturn false, 0, 0\n\t}\n\n\tif leftCols.Contains(lvar.Col) && rightCols.Contains(rvar.Col) {\n\t\treturn true, lvar.Col, rvar.Col\n\t}\n\tif leftCols.Contains(rvar.Col) && rightCols.Contains(lvar.Col) {\n\t\treturn true, rvar.Col, lvar.Col\n\t}\n\n\treturn false, 0, 0\n}", "func (l Left) WithChildren(children ...sql.Expression) (sql.Expression, error) {\n\tif len(children) != 2 {\n\t\treturn nil, sql.ErrInvalidChildrenNumber.New(l, len(children), 2)\n\t}\n\treturn NewLeft(children[0], children[1]), nil\n}", "func (self Accessor) InnerJoin(expr interface{}) *SelectManager {\n\treturn self.From(self.Relation()).InnerJoin(expr)\n}", "func NewRightJoinOn(table string, from string, to string) JoinQuery {\n\treturn NewJoinWith(\"RIGHT JOIN\", table, from, to)\n}", "func (mySelf SQLJoin) Outer() SQLJoin {\n\tmySelf.outer = true\n\treturn mySelf\n}", "func (m *Machine) Left() {\n\tfmt.Printf(\">> LEFT\\n\")\n\t// If we're at the 0th position, then we need to expand our tape array:\n\tif m.position == 0 {\n\t\tsize := len(m.Tape)\n\t\tm.Tape = append(make([]Cell, size), m.Tape...)\n\t\tm.position += size\n\t}\n\n\tm.position -= 1\n}", "func leftRecursiveDeep(expr ast.Node) ast.Node {\n\tlast := expr\n\tleft := leftRecursive(expr)\n\tfor left != nil {\n\t\tlast = left\n\t\tleft = leftRecursive(last)\n\t}\n\treturn last\n}", "func EqualsRefOfJoinTableExpr(a, b *JoinTableExpr) bool {\n\tif a == b {\n\t\treturn true\n\t}\n\tif a == nil || b == nil {\n\t\treturn false\n\t}\n\treturn EqualsTableExpr(a.LeftExpr, b.LeftExpr) &&\n\t\ta.Join == b.Join &&\n\t\tEqualsTableExpr(a.RightExpr, b.RightExpr) &&\n\t\tEqualsJoinCondition(a.Condition, b.Condition)\n}", "func (stmt *statement) Join(table, on string) Statement {\n\tstmt.join(\"JOIN \", table, on)\n\treturn stmt\n}", "func NewUsingJoin(left, right sql.Node, op JoinType, cols []string) *JoinNode {\n\treturn &JoinNode{\n\t\tOp: op,\n\t\tBinaryNode: BinaryNode{left: left, right: right},\n\t\tUsingCols: cols,\n\t}\n}", "func NewRightJoinOn(table string, from string, to string, filter ...FilterQuery) JoinQuery {\n\treturn NewJoinWith(\"RIGHT JOIN\", table, from, to, filter...)\n}", "func TestJoinTableSqlBuilder(t *testing.T) {\n\tmock := NewMockOptimizer(false)\n\n\t// should pass\n\tsqls := []string{\n\t\t\"SELECT N_NAME,N_REGIONKEY FROM NATION join REGION on NATION.N_REGIONKEY = REGION.R_REGIONKEY\",\n\t\t\"SELECT N_NAME, N_REGIONKEY FROM NATION join REGION on NATION.N_REGIONKEY = REGION.R_REGIONKEY WHERE NATION.N_REGIONKEY > 0\",\n\t\t\"SELECT N_NAME, NATION2.R_REGIONKEY FROM NATION2 join REGION using(R_REGIONKEY) WHERE NATION2.R_REGIONKEY > 0\",\n\t\t\"SELECT N_NAME, NATION2.R_REGIONKEY FROM NATION2 NATURAL JOIN REGION WHERE NATION2.R_REGIONKEY > 0\",\n\t\t\"SELECT N_NAME FROM NATION NATURAL JOIN REGION\", //have no same column name but it's ok\n\t\t\"SELECT N_NAME,N_REGIONKEY FROM NATION a join REGION b on a.N_REGIONKEY = b.R_REGIONKEY WHERE a.N_REGIONKEY > 0\", //test alias\n\t\t\"SELECT l.L_ORDERKEY a FROM CUSTOMER c, ORDERS o, LINEITEM l WHERE c.C_CUSTKEY = o.O_CUSTKEY and l.L_ORDERKEY = o.O_ORDERKEY and o.O_ORDERKEY < 10\", //join three tables\n\t\t\"SELECT c.* FROM CUSTOMER c, ORDERS o, LINEITEM l WHERE c.C_CUSTKEY = o.O_CUSTKEY and l.L_ORDERKEY = o.O_ORDERKEY\", //test star\n\t\t\"SELECT * FROM CUSTOMER c, ORDERS o, LINEITEM l WHERE c.C_CUSTKEY = o.O_CUSTKEY and l.L_ORDERKEY = o.O_ORDERKEY\", //test star\n\t\t\"SELECT a.* FROM NATION a join REGION b on a.N_REGIONKEY = b.R_REGIONKEY WHERE a.N_REGIONKEY > 0\", //test star\n\t\t\"SELECT * FROM NATION a join REGION b on a.N_REGIONKEY = b.R_REGIONKEY WHERE a.N_REGIONKEY > 0\",\n\t\t\"SELECT N_NAME, R_REGIONKEY FROM NATION2 join REGION using(R_REGIONKEY)\",\n\t\t\"select nation.n_name from nation join nation2 on nation.n_name !='a' join region on nation.n_regionkey = region.r_regionkey\",\n\t\t\"select * from nation, nation2, region\",\n\t}\n\trunTestShouldPass(mock, t, sqls, false, false)\n\n\t// should error\n\tsqls = []string{\n\t\t\"SELECT N_NAME,N_REGIONKEY FROM NATION join REGION on NATION.N_REGIONKEY = REGION.NotExistColumn\", //column not exist\n\t\t\"SELECT N_NAME, R_REGIONKEY FROM NATION join REGION using(R_REGIONKEY)\", //column not exist\n\t\t\"SELECT N_NAME,N_REGIONKEY FROM NATION a join REGION b on a.N_REGIONKEY = b.R_REGIONKEY WHERE aaaaa.N_REGIONKEY > 0\", //table alias not exist\n\t\t\"select *\", //No table used\n\t\t\"SELECT * FROM NATION a join REGION b on a.N_REGIONKEY = b.R_REGIONKEY WHERE a.N_REGIONKEY > 0 for update\", //Not support\n\t\t\"select * from nation, nation2, region for update\", // Not support\n\t}\n\trunTestShouldError(mock, t, sqls)\n}", "func (q Query) Join(inner Query,\n\touterKeySelector func(interface{}) interface{},\n\tinnerKeySelector func(interface{}) interface{},\n\tresultSelector func(outer interface{}, inner interface{}) interface{}) Query {\n\n\treturn Query{\n\t\tIterate: func() Iterator {\n\t\t\touternext := q.Iterate()\n\t\t\tinnernext := inner.Iterate()\n\n\t\t\tinnerLookup := make(map[interface{}][]interface{})\n\t\t\tfor innerItem, ok := innernext(); ok; innerItem, ok = innernext() {\n\t\t\t\tinnerKey := innerKeySelector(innerItem)\n\t\t\t\tinnerLookup[innerKey] = append(innerLookup[innerKey], innerItem)\n\t\t\t}\n\n\t\t\tvar outerItem interface{}\n\t\t\tvar innerGroup []interface{}\n\t\t\tinnerLen, innerIndex := 0, 0\n\n\t\t\treturn func() (item interface{}, ok bool) {\n\t\t\t\tif innerIndex >= innerLen {\n\t\t\t\t\thas := false\n\t\t\t\t\tfor !has {\n\t\t\t\t\t\touterItem, ok = outernext()\n\t\t\t\t\t\tif !ok {\n\t\t\t\t\t\t\treturn\n\t\t\t\t\t\t}\n\n\t\t\t\t\t\tinnerGroup, has = innerLookup[outerKeySelector(outerItem)]\n\t\t\t\t\t\tinnerLen = len(innerGroup)\n\t\t\t\t\t\tinnerIndex = 0\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t\titem = resultSelector(outerItem, innerGroup[innerIndex])\n\t\t\t\tinnerIndex++\n\t\t\t\treturn item, true\n\t\t\t}\n\t\t},\n\t}\n}", "func JoinWith(handler *model.JoinTableHandler, ne *engine.Engine, source interface{}) error {\n\tne.Scope.ContextValue(source)\n\ttableName := handler.TableName\n\tquotedTableName := Quote(ne, tableName)\n\tvar joinConditions []string\n\tm, err := GetModelStruct(ne, source)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif handler.Source.ModelType == m.ModelType {\n\t\td := reflect.New(handler.Destination.ModelType).Interface()\n\t\tdestinationTableName := QuotedTableName(ne, d)\n\t\tfor _, foreignKey := range handler.Destination.ForeignKeys {\n\t\t\tjoinConditions = append(joinConditions, fmt.Sprintf(\"%v.%v = %v.%v\",\n\t\t\t\tquotedTableName,\n\t\t\t\tQuote(ne, foreignKey.DBName),\n\t\t\t\tdestinationTableName,\n\t\t\t\tQuote(ne, foreignKey.AssociationDBName)))\n\t\t}\n\n\t\tvar foreignDBNames []string\n\t\tvar foreignFieldNames []string\n\t\tfor _, foreignKey := range handler.Source.ForeignKeys {\n\t\t\tforeignDBNames = append(foreignDBNames, foreignKey.DBName)\n\t\t\tif field, ok := FieldByName(ne, source, foreignKey.AssociationDBName); ok == nil {\n\t\t\t\tforeignFieldNames = append(foreignFieldNames, field.Name)\n\t\t\t}\n\t\t}\n\n\t\tforeignFieldValues := util.ColumnAsArray(foreignFieldNames, ne.Scope.ValueOf())\n\n\t\tvar condString string\n\t\tif len(foreignFieldValues) > 0 {\n\t\t\tvar quotedForeignDBNames []string\n\t\t\tfor _, dbName := range foreignDBNames {\n\t\t\t\tquotedForeignDBNames = append(quotedForeignDBNames, tableName+\".\"+dbName)\n\t\t\t}\n\n\t\t\tcondString = fmt.Sprintf(\"%v IN (%v)\",\n\t\t\t\tToQueryCondition(ne, quotedForeignDBNames),\n\t\t\t\tutil.ToQueryMarks(foreignFieldValues))\n\t\t} else {\n\t\t\tcondString = fmt.Sprintf(\"1 <> 1\")\n\t\t}\n\n\t\tsearch.Join(ne,\n\t\t\tfmt.Sprintf(\"INNER JOIN %v ON %v\",\n\t\t\t\tquotedTableName,\n\t\t\t\tstrings.Join(joinConditions, \" AND \")))\n\t\tsearch.Where(ne, condString, util.ToQueryValues(foreignFieldValues)...)\n\t\treturn nil\n\t}\n\treturn errors.New(\"wrong source type for join table handler\")\n}", "func (mp *JoinMultiplicity) JoinFiltersDoNotDuplicateLeftRows() bool {\n\treturn mp.LeftMultiplicity&MultiplicityNotDuplicatedVal != 0\n}", "func (m Matrix3) LeftMultiply(q Matrix3) (result Matrix3) {\n\tconst size = 3\n\tfor r := 0; r < size; r++ {\n\t\tfor c := 0; c < size; c++ {\n\t\t\tresult[r*size+c] = q.GetRow(r).Dot(m.GetCol(c))\n\t\t}\n\t}\n\treturn\n}", "func JoinedNotNil() predicate.User {\n\treturn predicate.User(func(s *sql.Selector) {\n\t\ts.Where(sql.NotNull(s.C(FieldJoined)))\n\t})\n}", "func NewJoinOn(table string, from string, to string, filter ...FilterQuery) JoinQuery {\n\treturn NewJoinWith(\"JOIN\", table, from, to, filter...)\n}", "func (l *Label) left(g *Graph) (*Label, *DataError) {\n Assert(nilLabel, l != nil)\n Assert(nilGraph, g != nil)\n Assert(nilLabelStore, g.labelStore != nil)\n \n return g.labelStore.findAllowZero(l.l)\n}", "func NewFullJoinOn(table string, from string, to string, filter ...FilterQuery) JoinQuery {\n\treturn NewJoinWith(\"FULL JOIN\", table, from, to, filter...)\n}", "func NewFullJoinOn(table string, from string, to string) JoinQuery {\n\treturn NewJoinWith(\"FULL JOIN\", table, from, to)\n}", "func (q Query) JoinT(inner Query,\n\touterKeySelectorFn interface{},\n\tinnerKeySelectorFn interface{},\n\tresultSelectorFn interface{}) Query {\n\touterKeySelectorGenericFunc, err := newGenericFunc(\n\t\t\"JoinT\", \"outerKeySelectorFn\", outerKeySelectorFn,\n\t\tsimpleParamValidator(newElemTypeSlice(new(genericType)), newElemTypeSlice(new(genericType))),\n\t)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\touterKeySelectorFunc := func(item interface{}) interface{} {\n\t\treturn outerKeySelectorGenericFunc.Call(item)\n\t}\n\n\tinnerKeySelectorFuncGenericFunc, err := newGenericFunc(\n\t\t\"JoinT\", \"innerKeySelectorFn\",\n\t\tinnerKeySelectorFn,\n\t\tsimpleParamValidator(newElemTypeSlice(new(genericType)), newElemTypeSlice(new(genericType))),\n\t)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\tinnerKeySelectorFunc := func(item interface{}) interface{} {\n\t\treturn innerKeySelectorFuncGenericFunc.Call(item)\n\t}\n\n\tresultSelectorGenericFunc, err := newGenericFunc(\n\t\t\"JoinT\", \"resultSelectorFn\", resultSelectorFn,\n\t\tsimpleParamValidator(newElemTypeSlice(new(genericType), new(genericType)), newElemTypeSlice(new(genericType))),\n\t)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\tresultSelectorFunc := func(outer interface{}, inner interface{}) interface{} {\n\t\treturn resultSelectorGenericFunc.Call(outer, inner)\n\t}\n\n\treturn q.Join(inner, outerKeySelectorFunc, innerKeySelectorFunc, resultSelectorFunc)\n}", "func (left *DataTable) RightJoin(right *DataTable, on []JoinOn) (*DataTable, error) {\n\treturn newJoinImpl(rightJoin, []*DataTable{left, right}, on).Compute()\n}", "func (session *Session) Join(joinOperator string, tablename interface{}, condition string, args ...interface{}) *Session {\n\tsession.Session = session.Session.Join(joinOperator, tablename, condition, args...)\n\treturn session\n}", "func LeftRotate(n *Node) *Node {\n\tr := n.Right\n\tif r == nil {\n\t\treturn n\n\t}\n\n\tn.Right = r.Left\n\tr.Left = n\n\n\treturn r\n}", "func NewJoinOn(table string, from string, to string) JoinQuery {\n\treturn NewJoinWith(\"JOIN\", table, from, to)\n}", "func (w *Wrapper) RightJoin(table interface{}, condition string) *Wrapper {\n\tw.saveJoin(table, \"RIGHT JOIN\", condition)\n\treturn w\n}", "func (sd *SelectDataset) joinTable(join exp.JoinExpression) *SelectDataset {\n\treturn sd.copy(sd.clauses.JoinsAppend(join))\n}", "func valueLeftOf(row []string, position int) *float64 {\n\tif position <= 0 {\n\t\treturn nil\n\t}\n\n\treturn stringToFloat(row[position-1])\n}", "func (f *predicateSqlizerFactory) addJoinsToSelectBuilder(q sq.SelectBuilder) sq.SelectBuilder {\n\tfor i, alias := range f.joinedTables {\n\t\taliasName := f.aliasName(alias.secondaryTable, i)\n\t\tjoinClause := fmt.Sprintf(\"%s AS %s ON %s = %s\",\n\t\t\tf.db.tableName(alias.secondaryTable), pq.QuoteIdentifier(aliasName),\n\t\t\tfullQuoteIdentifier(f.primaryTable, alias.primaryColumn),\n\t\t\tfullQuoteIdentifier(aliasName, alias.secondaryColumn))\n\t\tq = q.LeftJoin(joinClause)\n\t}\n\n\tif len(f.joinedTables) > 0 {\n\t\tq = q.Distinct()\n\t}\n\treturn q\n}", "func EqualsTableExpr(inA, inB TableExpr) bool {\n\tif inA == nil && inB == nil {\n\t\treturn true\n\t}\n\tif inA == nil || inB == nil {\n\t\treturn false\n\t}\n\tswitch a := inA.(type) {\n\tcase *AliasedTableExpr:\n\t\tb, ok := inB.(*AliasedTableExpr)\n\t\tif !ok {\n\t\t\treturn false\n\t\t}\n\t\treturn EqualsRefOfAliasedTableExpr(a, b)\n\tcase *JoinTableExpr:\n\t\tb, ok := inB.(*JoinTableExpr)\n\t\tif !ok {\n\t\t\treturn false\n\t\t}\n\t\treturn EqualsRefOfJoinTableExpr(a, b)\n\tcase *ParenTableExpr:\n\t\tb, ok := inB.(*ParenTableExpr)\n\t\tif !ok {\n\t\t\treturn false\n\t\t}\n\t\treturn EqualsRefOfParenTableExpr(a, b)\n\tdefault:\n\t\t// this should never happen\n\t\treturn false\n\t}\n}", "func (mp *JoinMultiplicity) JoinPreservesRightRows(op opt.Operator) bool {\n\tswitch op {\n\tcase opt.InnerJoinOp, opt.LeftJoinOp:\n\t\tbreak\n\n\tcase opt.FullJoinOp:\n\t\treturn true\n\n\tcase opt.SemiJoinOp:\n\t\tpanic(errors.AssertionFailedf(\"right rows are not included in the output of a %v\", op))\n\n\tdefault:\n\t\tpanic(errors.AssertionFailedf(\"unsupported operator: %v\", op))\n\t}\n\treturn mp.JoinFiltersMatchAllRightRows()\n}", "func TestLogicalJoinProps(t *testing.T) {\n\tevalCtx := tree.MakeTestingEvalContext(cluster.MakeTestingClusterSettings())\n\tf := norm.NewFactory(&evalCtx)\n\n\t// Disable all rules so that the expected operators are constructed.\n\tf.DisableOptimizations()\n\n\tcat := createLogPropsCatalog(t)\n\ta := f.Metadata().AddTable(cat.Table(\"a\"))\n\tb := f.Metadata().AddTable(cat.Table(\"b\"))\n\n\tjoinFunc := func(op opt.Operator, expected string) {\n\t\tt.Helper()\n\n\t\t// (Join (Scan a) (Scan b) (True))\n\t\tleftGroup := f.ConstructScan(f.InternScanOpDef(constructScanOpDef(f.Metadata(), a)))\n\t\trightGroup := f.ConstructScan(f.InternScanOpDef(constructScanOpDef(f.Metadata(), b)))\n\t\tonGroup := f.ConstructTrue()\n\t\toperands := norm.DynamicOperands{\n\t\t\tnorm.DynamicID(leftGroup),\n\t\t\tnorm.DynamicID(rightGroup),\n\t\t\tnorm.DynamicID(onGroup),\n\t\t}\n\t\tjoinGroup := f.DynamicConstruct(op, operands)\n\n\t\tev := memo.MakeNormExprView(f.Memo(), joinGroup)\n\t\ttestLogicalProps(t, f.Metadata(), ev, expected)\n\t}\n\n\tjoinFunc(opt.InnerJoinApplyOp, \"a.x:1(int!null) a.y:2(int) b.x:3(int!null) b.z:4(int!null)\\n\")\n\tjoinFunc(opt.LeftJoinApplyOp, \"a.x:1(int!null) a.y:2(int) b.x:3(int) b.z:4(int)\\n\")\n\tjoinFunc(opt.RightJoinApplyOp, \"a.x:1(int) a.y:2(int) b.x:3(int!null) b.z:4(int!null)\\n\")\n\tjoinFunc(opt.FullJoinApplyOp, \"a.x:1(int) a.y:2(int) b.x:3(int) b.z:4(int)\\n\")\n\tjoinFunc(opt.SemiJoinOp, \"a.x:1(int!null) a.y:2(int)\\n\")\n\tjoinFunc(opt.SemiJoinApplyOp, \"a.x:1(int!null) a.y:2(int)\\n\")\n\tjoinFunc(opt.AntiJoinOp, \"a.x:1(int!null) a.y:2(int)\\n\")\n\tjoinFunc(opt.AntiJoinApplyOp, \"a.x:1(int!null) a.y:2(int)\\n\")\n}", "func (o DashboardSpacingPtrOutput) Left() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v *DashboardSpacing) *string {\n\t\tif v == nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn v.Left\n\t}).(pulumi.StringPtrOutput)\n}", "func (sd *SelectDataset) RightJoin(table exp.Expression, condition exp.JoinCondition) *SelectDataset {\n\treturn sd.joinTable(exp.NewConditionedJoinExpression(exp.RightJoinType, table, condition))\n}", "func (llrb *LLRB) moveredleft(nd *Llrbnode) *Llrbnode {\n\tllrb.flip(nd)\n\tif nd.right.left.isred() {\n\t\tnd.right = llrb.rotateright(nd.right)\n\t\tnd = llrb.rotateleft(nd)\n\t\tllrb.flip(nd)\n\t}\n\treturn nd\n}" ]
[ "0.72299886", "0.70459723", "0.704144", "0.70356774", "0.686112", "0.68354493", "0.67730963", "0.67380315", "0.66573316", "0.65042645", "0.65006894", "0.64356035", "0.64321244", "0.64166707", "0.6370883", "0.6337108", "0.6249756", "0.62033296", "0.60057163", "0.5978834", "0.5748164", "0.5709977", "0.5437819", "0.5316289", "0.5245883", "0.5214563", "0.51760066", "0.5006892", "0.49026057", "0.48613825", "0.47889638", "0.4779042", "0.47654355", "0.47556376", "0.47292498", "0.46929717", "0.466789", "0.46595502", "0.46440268", "0.46431684", "0.45985326", "0.45877194", "0.45818502", "0.45726216", "0.4569246", "0.45676073", "0.4554215", "0.45455393", "0.44836077", "0.44763252", "0.44671452", "0.44540322", "0.4453739", "0.44444734", "0.44297275", "0.44006667", "0.43994716", "0.43927836", "0.4384477", "0.43729746", "0.43726635", "0.43525684", "0.43497646", "0.43126357", "0.43100888", "0.43048084", "0.43047717", "0.42898688", "0.428755", "0.4286909", "0.42842364", "0.42790198", "0.4275073", "0.42640537", "0.42495903", "0.42366827", "0.42337653", "0.42307845", "0.42260367", "0.4223775", "0.4221228", "0.4214643", "0.42087644", "0.42079687", "0.42018682", "0.41969624", "0.41853786", "0.4182167", "0.4180139", "0.41705847", "0.41676247", "0.4159347", "0.41576993", "0.41569486", "0.4151906", "0.41503808", "0.41480076", "0.41440606", "0.4134178", "0.41269094" ]
0.7001835
4
LeftJoin the tables. tables[0] is used as reference datatable.
func LeftJoin(tables []*DataTable, on []JoinOn) (*DataTable, error) { return newJoinImpl(leftJoin, tables, on).Compute() }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (stmt *statement) LeftJoin(table, on string) Statement {\n\tstmt.join(\"LEFT JOIN \", table, on)\n\treturn stmt\n}", "func (left *DataTable) LeftJoin(right *DataTable, on []JoinOn) (*DataTable, error) {\n\treturn newJoinImpl(leftJoin, []*DataTable{left, right}, on).Compute()\n}", "func (w *Wrapper) LeftJoin(table interface{}, condition string) *Wrapper {\n\tw.saveJoin(table, \"LEFT JOIN\", condition)\n\treturn w\n}", "func (sd *SelectDataset) LeftJoin(table exp.Expression, condition exp.JoinCondition) *SelectDataset {\n\treturn sd.joinTable(exp.NewConditionedJoinExpression(exp.LeftJoinType, table, condition))\n}", "func (t *Table) LeftJoin(offset int32, count int, crit string, target interface{}) error {\n\tbody, err := t.LeftJoinRaw(offset, count, crit)\n\tif err == nil {\n\t\terr = json.Unmarshal(body, &target)\n\t}\n\treturn err\n}", "func (sd *SelectDataset) LeftOuterJoin(table exp.Expression, condition exp.JoinCondition) *SelectDataset {\n\treturn sd.joinTable(exp.NewConditionedJoinExpression(exp.LeftOuterJoinType, table, condition))\n}", "func LeftJoin(table, on string) QueryOption {\n\treturn newFuncQueryOption(func(wrapper *QueryWrapper) {\n\t\twrapper.joins = append(wrapper.joins, \"LEFT\", \"JOIN\", table, \"ON\", on)\n\t\twrapper.queryLen += 5\n\t})\n}", "func (b *Builder) LeftJoin(joinTable string, joinCond interface{}) *Builder {\r\n\treturn b.Join(\"LEFT\", joinTable, joinCond)\r\n}", "func (b *JoinBuilder) LeftJoin(other *Table) *JoinBuilder {\n\treturn makeJoinBuilder(\"LEFT JOIN\", b, other)\n}", "func (b *Builder) LeftJoin(joinTable, joinCond interface{}) *Builder {\n\treturn b.Join(\"LEFT\", joinTable, joinCond)\n}", "func NewLeftJoinOn(table string, from string, to string, filter ...FilterQuery) JoinQuery {\n\treturn NewJoinWith(\"LEFT JOIN\", table, from, to, filter...)\n}", "func (sd *SelectDataset) NaturalLeftJoin(table exp.Expression) *SelectDataset {\n\treturn sd.joinTable(exp.NewUnConditionedJoinExpression(exp.NaturalLeftJoinType, table))\n}", "func NewLeftJoinOn(table string, from string, to string) JoinQuery {\n\treturn NewJoinWith(\"LEFT JOIN\", table, from, to)\n}", "func (t *Table) LeftJoinRaw(offset int32, count int, crit string) ([]byte, error) {\n\tp := \"https://%s/api/getLeftJoin.sjs?json&object=%s&limit=%d,%d\"\n\tx := fmt.Sprintf(p, t.Host, t.Name, offset, count)\n\tif len(crit) != 0 {\n\t\tx = x + \"&condition=\" + FixCrit(crit)\n\t}\n\t_, body, err := t.Get(x)\n\treturn body, err\n}", "func NewLeftJoin(table string, filter ...FilterQuery) JoinQuery {\n\treturn NewLeftJoinOn(table, \"\", \"\", filter...)\n}", "func NewLeftJoin(table string) JoinQuery {\n\treturn NewLeftJoinOn(table, \"\", \"\")\n}", "func LeftJoin(lx, rx reflect.Value) reflect.Value {\n\tresult := reflect.MakeSlice(reflect.SliceOf(lx.Type().Elem()), 0, lx.Len())\n\trhash := hashSlice(rx)\n\n\tfor i := 0; i < lx.Len(); i++ {\n\t\tv := lx.Index(i)\n\t\t_, ok := rhash[v.Interface()]\n\t\tif !ok {\n\t\t\tresult = reflect.Append(result, v)\n\t\t}\n\t}\n\treturn result\n}", "func (t *Table) LeftJoinMap(offset int32, count int, crit string) ([]map[string]string, error) {\n\tvar a []map[string]string\n\tbody, err := t.LeftJoinRaw(offset, count, crit)\n\ta = unpackGJsonArray(body)\n\treturn a, err\n}", "func (f *predicateSqlizerFactory) createLeftJoin(secondaryTable string, primaryColumn string, secondaryColumn string) string {\n\tnewAlias := joinedTable{secondaryTable, primaryColumn, secondaryColumn}\n\tfor i, alias := range f.joinedTables {\n\t\tif alias.equal(newAlias) {\n\t\t\treturn f.aliasName(secondaryTable, i)\n\t\t}\n\t}\n\n\tf.joinedTables = append(f.joinedTables, newAlias)\n\treturn f.aliasName(secondaryTable, len(f.joinedTables)-1)\n}", "func OuterJoin(tables []*DataTable, on []JoinOn) (*DataTable, error) {\n\treturn newJoinImpl(outerJoin, tables, on).Compute()\n}", "func (mp *JoinMultiplicity) JoinPreservesLeftRows(op opt.Operator) bool {\n\tswitch op {\n\tcase opt.InnerJoinOp, opt.SemiJoinOp:\n\t\tbreak\n\n\tcase opt.LeftJoinOp, opt.FullJoinOp:\n\t\treturn true\n\n\tdefault:\n\t\tpanic(errors.AssertionFailedf(\"unsupported operator: %v\", op))\n\t}\n\treturn mp.JoinFiltersMatchAllLeftRows()\n}", "func (left *DataTable) OuterJoin(right *DataTable, on []JoinOn) (*DataTable, error) {\n\treturn newJoinImpl(outerJoin, []*DataTable{left, right}, on).Compute()\n}", "func LeftFromTable(timeTable map[string]map[rune][]uint) (ret []uint) {\n\tfor _, day := range timeTable {\n\t\tfor _, ids := range day {\n\t\t\tfor _, id := range ids {\n\t\t\t\tret = append(ret, id)\n\t\t\t}\n\t\t}\n\t}\n\treturn SliceUniqMap(ret)\n}", "func joinLeft(g []string) string {\n\tif g == nil || len(g) == 0 {\n\t\treturn \"\"\n\t}\n\tvar bf bytes.Buffer\n\tfor i := range g {\n\t\tc := strings.Index(g[i], \"#\")\n\t\tif c == -1 {\n\t\t\tbf.WriteString(g[i])\n\t\t} else {\n\t\t\tbf.WriteString(g[i][0:c])\n\t\t\tbreak\n\t\t}\n\t}\n\treturn string(bf.Bytes())\n}", "func (mp *JoinMultiplicity) JoinDoesNotDuplicateLeftRows(op opt.Operator) bool {\n\tswitch op {\n\tcase opt.InnerJoinOp, opt.LeftJoinOp, opt.FullJoinOp:\n\t\tbreak\n\n\tcase opt.SemiJoinOp:\n\t\treturn true\n\n\tdefault:\n\t\tpanic(errors.AssertionFailedf(\"unsupported operator: %v\", op))\n\t}\n\treturn mp.JoinFiltersDoNotDuplicateLeftRows()\n}", "func (stmt *statement) FullJoin(table, on string) Statement {\n\tstmt.join(\"FULL JOIN \", table, on)\n\treturn stmt\n}", "func (sd *SelectDataset) FullOuterJoin(table exp.Expression, condition exp.JoinCondition) *SelectDataset {\n\treturn sd.joinTable(exp.NewConditionedJoinExpression(exp.FullOuterJoinType, table, condition))\n}", "func TestAggregateLeftJoin(t *testing.T) {\n\tmcmp, closer := start(t)\n\tdefer closer()\n\n\tmcmp.Exec(\"insert into t1(t1_id, name, value, shardKey) values (11, 'r', 'r', 1), (3, 'r', 'r', 0)\")\n\tmcmp.Exec(\"insert into t2(id, shardKey) values (11, 1)\")\n\n\tmcmp.AssertMatchesNoOrder(\"SELECT t1.shardkey FROM t1 LEFT JOIN t2 ON t1.t1_id = t2.id\", `[[INT64(1)] [INT64(0)]]`)\n\tmcmp.AssertMatches(\"SELECT count(t1.shardkey) FROM t1 LEFT JOIN t2 ON t1.t1_id = t2.id\", `[[INT64(2)]]`)\n\tmcmp.AssertMatches(\"SELECT count(t2.shardkey) FROM t1 LEFT JOIN t2 ON t1.t1_id = t2.id\", `[[INT64(1)]]`)\n\tmcmp.AssertMatches(\"SELECT count(*) FROM t1 LEFT JOIN t2 ON t1.t1_id = t2.id\", `[[INT64(2)]]`)\n\tmcmp.AssertMatches(\"SELECT sum(t1.shardkey) FROM t1 LEFT JOIN t2 ON t1.t1_id = t2.id\", `[[DECIMAL(1)]]`)\n\tmcmp.AssertMatches(\"SELECT sum(t2.shardkey) FROM t1 LEFT JOIN t2 ON t1.t1_id = t2.id\", `[[DECIMAL(1)]]`)\n\tmcmp.AssertMatches(\"SELECT count(*) FROM t2 LEFT JOIN t1 ON t1.t1_id = t2.id WHERE IFNULL(t1.name, 'NOTSET') = 'r'\", `[[INT64(1)]]`)\n}", "func (sd *SelectDataset) FullJoin(table exp.Expression, condition exp.JoinCondition) *SelectDataset {\n\treturn sd.joinTable(exp.NewConditionedJoinExpression(exp.FullJoinType, table, condition))\n}", "func (mySelf SQLJoin) Outer() SQLJoin {\n\tmySelf.outer = true\n\treturn mySelf\n}", "func OuterJoin(lx, rx reflect.Value) reflect.Value {\n\tljoin := LeftJoin(lx, rx)\n\trjoin := RightJoin(lx, rx)\n\n\tresult := reflect.MakeSlice(reflect.SliceOf(lx.Type().Elem()), ljoin.Len()+rjoin.Len(), ljoin.Len()+rjoin.Len())\n\tfor i := 0; i < ljoin.Len(); i++ {\n\t\tresult.Index(i).Set(ljoin.Index(i))\n\t}\n\tfor i := 0; i < rjoin.Len(); i++ {\n\t\tresult.Index(ljoin.Len() + i).Set(rjoin.Index(i))\n\t}\n\n\treturn result\n}", "func (sd *SelectDataset) CrossJoin(table exp.Expression) *SelectDataset {\n\treturn sd.joinTable(exp.NewUnConditionedJoinExpression(exp.CrossJoinType, table))\n}", "func (mp *JoinMultiplicity) JoinFiltersMatchAllLeftRows() bool {\n\treturn mp.LeftMultiplicity&MultiplicityPreservedVal != 0\n}", "func (m Matrix3) LeftMultiply(q Matrix3) (result Matrix3) {\n\tconst size = 3\n\tfor r := 0; r < size; r++ {\n\t\tfor c := 0; c < size; c++ {\n\t\t\tresult[r*size+c] = q.GetRow(r).Dot(m.GetCol(c))\n\t\t}\n\t}\n\treturn\n}", "func (sd *SelectDataset) NaturalFullJoin(table exp.Expression) *SelectDataset {\n\treturn sd.joinTable(exp.NewUnConditionedJoinExpression(exp.NaturalFullJoinType, table))\n}", "func (mp *JoinMultiplicity) JoinFiltersDoNotDuplicateLeftRows() bool {\n\treturn mp.LeftMultiplicity&MultiplicityNotDuplicatedVal != 0\n}", "func (b *Builder) CrossJoin(joinTable string, joinCond interface{}) *Builder {\r\n\treturn b.Join(\"CROSS\", joinTable, joinCond)\r\n}", "func (b *Builder) FullJoin(joinTable string, joinCond interface{}) *Builder {\r\n\treturn b.Join(\"FULL\", joinTable, joinCond)\r\n}", "func NewFullJoinOn(table string, from string, to string) JoinQuery {\n\treturn NewJoinWith(\"FULL JOIN\", table, from, to)\n}", "func NewFullJoinOn(table string, from string, to string, filter ...FilterQuery) JoinQuery {\n\treturn NewJoinWith(\"FULL JOIN\", table, from, to, filter...)\n}", "func LeftOf(x ...interface{}) Either {\n\treturn newEither(false, x...)\n}", "func (m *Machine) Left() {\n\tfmt.Printf(\">> LEFT\\n\")\n\t// If we're at the 0th position, then we need to expand our tape array:\n\tif m.position == 0 {\n\t\tsize := len(m.Tape)\n\t\tm.Tape = append(make([]Cell, size), m.Tape...)\n\t\tm.position += size\n\t}\n\n\tm.position -= 1\n}", "func (stmt *statement) Join(table, on string) Statement {\n\tstmt.join(\"JOIN \", table, on)\n\treturn stmt\n}", "func (self Accessor) OuterJoin(expr interface{}) *SelectManager {\n\treturn self.From(self.Relation()).OuterJoin(expr)\n}", "func (b *Builder) CrossJoin(joinTable, joinCond interface{}) *Builder {\n\treturn b.Join(\"CROSS\", joinTable, joinCond)\n}", "func (w *Wrapper) NaturalJoin(table interface{}, condition string) *Wrapper {\n\tw.saveJoin(table, \"NATURAL JOIN\", condition)\n\treturn w\n}", "func (b *Builder) FullJoin(joinTable, joinCond interface{}) *Builder {\n\treturn b.Join(\"FULL\", joinTable, joinCond)\n}", "func (r1 *csvTable) Join(r2 rel.Relation, zero interface{}) rel.Relation {\n\treturn rel.NewJoin(r1, r2, zero)\n}", "func (sd *SelectDataset) joinTable(join exp.JoinExpression) *SelectDataset {\n\treturn sd.copy(sd.clauses.JoinsAppend(join))\n}", "func (sd *SelectDataset) NaturalJoin(table exp.Expression) *SelectDataset {\n\treturn sd.joinTable(exp.NewUnConditionedJoinExpression(exp.NaturalJoinType, table))\n}", "func PadLeft(orig []byte, pad byte, length int) []byte {\n\tif len(orig) >= length {\n\t\treturn orig\n\t}\n\tb := bytes.NewBuffer([]byte{})\n\ttoAdd := length - len(orig)\n\tfor i := 0; i < toAdd; i++ {\n\t\tb.WriteByte(pad)\n\t}\n\tb.Write(orig)\n\treturn b.Bytes()\n}", "func main() {\n\t// bigger table\n\tfmt.Printf(\"%X \\n\", JoinExample(\"./t/r0.tbl\", \"./t/r0.tbl\", []int{0}, []int{1})) // 767636031\n\tfmt.Printf(\"%X \\n\", JoinExample(\"./t/r0.tbl\", \"./t/r1.tbl\", []int{0}, []int{0})) // 49082128576\n\tfmt.Printf(\"%X \\n\", JoinExample(\"./t/r0.tbl\", \"./t/r1.tbl\", []int{1}, []int{1})) // 85306117839070\n\tfmt.Printf(\"%X \\n\", JoinExample(\"./t/r0.tbl\", \"./t/r2.tbl\", []int{0}, []int{0})) // 48860100254\n\tfmt.Printf(\"%X \\n\", JoinExample(\"./t/r0.tbl\", \"./t/r1.tbl\", []int{0, 1}, []int{0, 1})) //5552101\n\tfmt.Printf(\"%X \\n\", JoinExample(\"./t/r1.tbl\", \"./t/r2.tbl\", []int{0}, []int{0})) // 6331038719880\n\tfmt.Printf(\"%X \\n\", JoinExample(\"./t/r2.tbl\", \"./t/r2.tbl\", []int{0, 1}, []int{0, 1})) // 42056985375886\n}", "func FullJoin(table, on string) QueryOption {\n\treturn newFuncQueryOption(func(wrapper *QueryWrapper) {\n\t\twrapper.joins = append(wrapper.joins, \"FULL\", \"JOIN\", table, \"ON\", on)\n\t\twrapper.queryLen += 5\n\t})\n}", "func (node *TruncateTable) GetFromTables() TableNames {\n\treturn nil\n}", "func LeftPad(s string, padStr string, overallLen int) string {\n\tvar padCountInt = 1 + ((overallLen - len(padStr)) / len(padStr))\n\tvar retStr = strings.Repeat(padStr, padCountInt) + s\n\treturn retStr[(len(retStr) - overallLen):]\n}", "func NewFullJoin(table string) JoinQuery {\n\treturn NewFullJoinOn(table, \"\", \"\")\n}", "func (th *TableHandler) EmptyTables(c echo.Context) (err error) {\n\treqID := c.Response().Header().Get(echo.HeaderXRequestID)\n\t// Query database\n\terr = th.dbSvc.EmptyTables(c.Request().Context())\n\tif err != nil {\n\t\t// Error while querying database\n\t\treturn c.JSON(http.StatusInternalServerError, presenter.ErrResp(reqID, err))\n\t}\n\t// Return ok\n\treturn c.JSON(http.StatusOK, \"Tables emptied!\")\n}", "func (node *CreateTable) GetFromTables() TableNames {\n\treturn nil\n}", "func NewFullJoin(table string, filter ...FilterQuery) JoinQuery {\n\treturn NewFullJoinOn(table, \"\", \"\", filter...)\n}", "func JoinedIsNil() predicate.User {\n\treturn predicate.User(func(s *sql.Selector) {\n\t\ts.Where(sql.IsNull(s.C(FieldJoined)))\n\t})\n}", "func FillAliases(a *Aliases, tables []drivers.Table) {\n\tif a.Tables == nil {\n\t\ta.Tables = make(map[string]TableAlias)\n\t}\n\n\tfor _, t := range tables {\n\t\tif t.IsJoinTable {\n\t\t\tjt, ok := a.Tables[t.Name]\n\t\t\tif !ok {\n\t\t\t\ta.Tables[t.Name] = TableAlias{Relationships: make(map[string]RelationshipAlias)}\n\t\t\t} else if jt.Relationships == nil {\n\t\t\t\tjt.Relationships = make(map[string]RelationshipAlias)\n\t\t\t}\n\t\t\tcontinue\n\t\t}\n\n\t\ttable := a.Tables[t.Name]\n\n\t\tif len(table.UpPlural) == 0 {\n\t\t\ttable.UpPlural = strmangle.TitleCase(strmangle.Plural(t.Name))\n\t\t}\n\t\tif len(table.UpSingular) == 0 {\n\t\t\ttable.UpSingular = strmangle.TitleCase(strmangle.Singular(t.Name))\n\t\t}\n\t\tif len(table.DownPlural) == 0 {\n\t\t\ttable.DownPlural = strmangle.CamelCase(strmangle.Plural(t.Name))\n\t\t}\n\t\tif len(table.DownSingular) == 0 {\n\t\t\ttable.DownSingular = strmangle.CamelCase(strmangle.Singular(t.Name))\n\t\t}\n\n\t\tif table.Columns == nil {\n\t\t\ttable.Columns = make(map[string]string)\n\t\t}\n\t\tif table.Relationships == nil {\n\t\t\ttable.Relationships = make(map[string]RelationshipAlias)\n\t\t}\n\n\t\tfor _, c := range t.Columns {\n\t\t\tif _, ok := table.Columns[c.Name]; !ok {\n\t\t\t\ttable.Columns[c.Name] = strmangle.TitleCase(c.Name)\n\t\t\t}\n\n\t\t\tr, _ := utf8.DecodeRuneInString(table.Columns[c.Name])\n\t\t\tif unicode.IsNumber(r) {\n\t\t\t\ttable.Columns[c.Name] = \"C\" + table.Columns[c.Name]\n\t\t\t}\n\t\t}\n\n\t\ta.Tables[t.Name] = table\n\n\t\tfor _, k := range t.FKeys {\n\t\t\tr := table.Relationships[k.Name]\n\t\t\tif len(r.Local) != 0 && len(r.Foreign) != 0 {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tlocal, foreign := txtNameToOne(k)\n\t\t\tif len(r.Local) == 0 {\n\t\t\t\tr.Local = local\n\t\t\t}\n\t\t\tif len(r.Foreign) == 0 {\n\t\t\t\tr.Foreign = foreign\n\t\t\t}\n\n\t\t\ttable.Relationships[k.Name] = r\n\t\t}\n\n\t}\n\n\tfor _, t := range tables {\n\t\tif !t.IsJoinTable {\n\t\t\tcontinue\n\t\t}\n\n\t\ttable := a.Tables[t.Name]\n\n\t\tlhs := t.FKeys[0]\n\t\trhs := t.FKeys[1]\n\n\t\tlhsAlias, lhsOK := table.Relationships[lhs.Name]\n\t\trhsAlias, rhsOK := table.Relationships[rhs.Name]\n\n\t\tif lhsOK && len(lhsAlias.Local) != 0 && len(lhsAlias.Foreign) != 0 &&\n\t\t\trhsOK && len(rhsAlias.Local) != 0 && len(rhsAlias.Foreign) != 0 {\n\t\t\tcontinue\n\t\t}\n\n\t\t// Here we actually reverse the meaning of local/foreign to be\n\t\t// consistent with the way normal one-to-many relationships are done.\n\t\t// That's to say local = the side with the foreign key. Now in a many-to-many\n\t\t// if we were able to not have a join table our foreign key say \"videos_id\"\n\t\t// would be on the tags table. Hence the relationships should look like:\n\t\t// videos_tags.relationships.fk_video_id.local = \"Tags\"\n\t\t// videos_tags.relationships.fk_video_id.foreign = \"Videos\"\n\t\t// Consistent, yes. Confusing? Also yes.\n\n\t\tlhsName, rhsName := txtNameToMany(lhs, rhs)\n\n\t\tif len(lhsAlias.Local) != 0 {\n\t\t\trhsName = lhsAlias.Local\n\t\t} else if len(rhsAlias.Local) != 0 {\n\t\t\tlhsName = rhsAlias.Local\n\t\t}\n\n\t\tif len(lhsAlias.Foreign) != 0 {\n\t\t\tlhsName = lhsAlias.Foreign\n\t\t} else if len(rhsAlias.Foreign) != 0 {\n\t\t\trhsName = rhsAlias.Foreign\n\t\t}\n\n\t\tif len(lhsAlias.Local) == 0 {\n\t\t\tlhsAlias.Local = rhsName\n\t\t}\n\t\tif len(lhsAlias.Foreign) == 0 {\n\t\t\tlhsAlias.Foreign = lhsName\n\t\t}\n\t\tif len(rhsAlias.Local) == 0 {\n\t\t\trhsAlias.Local = lhsName\n\t\t}\n\t\tif len(rhsAlias.Foreign) == 0 {\n\t\t\trhsAlias.Foreign = rhsName\n\t\t}\n\n\t\ttable.Relationships[lhs.Name] = lhsAlias\n\t\ttable.Relationships[rhs.Name] = rhsAlias\n\t}\n}", "func PadLeft(s string, padStr string, totalLen int) string {\n\tvar padCountInt int\n\tpadCountInt = 1 + ((totalLen - len(padStr)) / len(padStr))\n\tvar retStr = strings.Repeat(padStr, padCountInt) + s\n\treturn retStr[(len(retStr) - totalLen):]\n}", "func Using(fields ...string) []JoinOn {\n\tvar jon []JoinOn\n\tfor _, f := range fields {\n\t\tjon = append(jon, JoinOn{Table: \"*\", Field: f})\n\t}\n\treturn jon\n}", "func LeftPad(s string, padStr string, pLen int) string {\n\treturn strings.Repeat(padStr, pLen) + s\n}", "func (sd *SelectDataset) Join(table exp.Expression, condition exp.JoinCondition) *SelectDataset {\n\treturn sd.InnerJoin(table, condition)\n}", "func (tm *Term) FixLeft() error {\n\ttm.FixCols = ints.MaxInt(tm.FixCols-1, 0)\n\treturn tm.Draw()\n}", "func emptyTargetTable() TargetTable {\n\treturn TargetTable{}\n}", "func (g Game) Left() []Player {\n\tvar players []Player\n\tif isEmptyPlayer(g.LeftPlayerTwo) {\n\t\tplayers = make([]Player, 1)\n\t\tplayers[0] = g.LeftPlayerOne.Player\n\t} else {\n\t\tplayers = make([]Player, 2)\n\t\tplayers[0] = g.LeftPlayerOne.Player\n\t\tplayers[1] = g.LeftPlayerTwo.Player\n\t}\n\treturn players\n}", "func (tm *Term) ScrollLeft() error {\n\ttm.ColSt = ints.MaxInt(tm.ColSt-1, 0)\n\treturn tm.Draw()\n}", "func buildJoin(joins []Join, baseTable string) string {\n\tif len(joins) == 0 {\n\t\treturn \"\"\n\t}\n\n\tjoin := \"\"\n\tfor _, j := range joins {\n\t\tjoin += fmt.Sprintf(\n\t\t\t\" JOIN %s ON (%s.%s %s %s.%s)\",\n\t\t\tquote(j.table),\n\t\t\tquote(baseTable),\n\t\t\tquote(j.on.field),\n\t\t\tstring(j.on.comparison),\n\t\t\tquote(j.table),\n\t\t\tquote(j.on.value.(string)),\n\t\t)\n\t}\n\treturn join\n}", "func (fn *formulaFuncs) LEFT(argsList *list.List) formulaArg {\n\treturn fn.leftRight(\"LEFT\", argsList)\n}", "func PadLeft(str string, padStr string, padLen int) string {\n\treturn buildPadStr(str, padStr, padLen, true, false)\n}", "func (this *Dao) Join (joinType string, joinTable string, joinOn string) *Dao {\n\tjoin := fmt.Sprintf(\"%s JOIN %s ON %s\", strings.ToUpper(joinType), _table(joinTable), joinOn)\n\n\tthis.queryJoins = append(this.queryJoins, join)\n\treturn this\n}", "func PadLeft(src, padding string, length int) (string, error) {\n\n\tif length <= len(src) {\n\t\treturn \"\", errors.New(\"Target length must be greater than the length\" +\n\t\t\t\" of the original string.\")\n\t}\n\n\tif len(padding) != 1 {\n\t\treturn \"\", errors.New(\"Padding must be a single character.\")\n\t}\n\n\treturn strings.Repeat(padding, length-len(src)) + src, nil\n\n}", "func (t *largeFlatTable) Prefetch(ctx context.Context) {\n\tfor _, s := range t.srcTables {\n\t\ts.Prefetch(ctx)\n\t}\n}", "func ColumnLeft(name string) {\n\tidx := colIndex(name)\n\tif idx > 0 {\n\t\tswapCols(idx, idx-1)\n\t}\n}", "func PadLeft(s string, padStr string, lenStr int) string {\n\tvar padCount int\n\tpadCount = I.MaxOf(lenStr-len(s), 0)\n\treturn strings.Repeat(padStr, padCount) + s\n}", "func clearTables() {\n\tmodel.Tables = make([]*model.Table, 0)\n}", "func NewJoinOn(table string, from string, to string, filter ...FilterQuery) JoinQuery {\n\treturn NewJoinWith(\"JOIN\", table, from, to, filter...)\n}", "func (w *Wrapper) JoinWhere(table interface{}, args ...interface{}) *Wrapper {\n\tw.saveJoinCondition(\"AND\", table, args...)\n\treturn w\n}", "func JoinTableValues(cols ...string) string {\n\tcols = strings_.SliceTrimEmpty(cols...)\n\tif len(cols) == 0 {\n\t\t// https://dev.mysql.com/doc/refman/5.7/en/data-type-defaults.html\n\t\t// DEFAULT\n\t\treturn \"\"\n\t}\n\treturn strings.Join(TableValues(cols...), \",\")\n}", "func (l *levelHandler) initTables(tables []*table.Table) {\n\tl.Lock()\n\tdefer l.Unlock()\n\n\tl.tables = tables\n\n\t// Now that we have the tables setup,\n\tl.totalSize = 0\n\tfor _, t := range tables {\n\t\tl.totalSize += t.Size()\n\t}\n\n\tif l.level == 0 {\n\t\t// Key range will overlap. Just sort by fileID in ascending order because newer tables are at the end of\n\t\t// level 0.\n\t\tsort.Slice(l.tables, func(i, j int) bool {\n\t\t\treturn l.tables[i].FileId() < l.tables[j].FileId()\n\t\t})\n\t} else {\n\t\t// Sort tables by keys.\n\t\tsort.Slice(l.tables, func(i, j int) bool {\n\t\t\treturn z.CompareKeys(l.tables[i].Smallest(), l.tables[j].Smallest()) < 0\n\t\t})\n\t}\n}", "func NewJoinOn(table string, from string, to string) JoinQuery {\n\treturn NewJoinWith(\"JOIN\", table, from, to)\n}", "func PadLeft(s, pad string, width int) string {\n\tgap := width - DisplayWidth(s)\n\tif gap > 0 {\n\t\treturn strings.Repeat(string(pad), gap) + s\n\t}\n\treturn s\n}", "func PadLeft(s, pad string, width int) string {\n\tgap := width - DisplayWidth(s)\n\tif gap > 0 {\n\t\treturn strings.Repeat(string(pad), gap) + s\n\t}\n\treturn s\n}", "func JoinDatamart(joinLeft *JoinSpec, joinRight *JoinSpec, rightOrigin *model.DatasetOrigin) (string, *apiModel.FilteredData, error) {\n\tpipelineDesc, err := description.CreateDatamartAugmentPipeline(\"Join Preview\",\n\t\t\"Join to be reviewed by user\", rightOrigin.SearchResult, rightOrigin.Provenance)\n\tif err != nil {\n\t\treturn \"\", nil, err\n\t}\n\tdatasetLeftURI := env.ResolvePath(joinLeft.DatasetSource, joinLeft.DatasetPath)\n\n\treturn join(joinLeft, joinRight, \"\", pipelineDesc, []string{datasetLeftURI}, defaultSubmitter{}, false)\n}", "func prefixJoin(prefix string, array []string, separator string) (result string) {\n\tif len(array) == 0 {\n\t\treturn\n\t}\n\tfor index, val := range array {\n\t\tif index == 0 {\n\t\t\tresult = val\n\t\t} else {\n\t\t\tresult = join(result, concat(prefix, val), separator)\n\t\t}\n\t}\n\treturn\n}", "func leftRotate(arr []int, d int, n int) {\n\tif d == 0 {\n\t\treturn\n\t}\n\treverseArray(arr, 0, d-1)\n\treverseArray(arr, d, n-1)\n\treverseArray(arr, 0, n-1)\n}", "func (b *TestDriver) LeftFlip() (err error) {\n\tb.Publish(Rolling, true)\n\treturn nil\n}", "func resetTables() {\n\ttables := []string{\"time_sheets\", \"employee_reports\", \"pay_periods\"}\n\n\tfor _, table := range tables {\n\t\tapp.DB.Exec(fmt.Sprintf(\"DELETE FROM %s\", table))\n\t}\n}", "func (o JobCopyOutput) SourceTables() JobCopySourceTableArrayOutput {\n\treturn o.ApplyT(func(v JobCopy) []JobCopySourceTable { return v.SourceTables }).(JobCopySourceTableArrayOutput)\n}", "func (stmt *statement) RightJoin(table, on string) Statement {\n\tstmt.join(\"RIGHT JOIN \", table, on)\n\treturn stmt\n}", "func (o DashboardSpacingPtrOutput) Left() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v *DashboardSpacing) *string {\n\t\tif v == nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn v.Left\n\t}).(pulumi.StringPtrOutput)\n}", "func (sopsTxtJustify TextJustify) Left() TextJustify {\n\n\tlockStrOpsTextJustify.Lock()\n\n\tdefer lockStrOpsTextJustify.Unlock()\n\n\treturn TextJustify(1)\n}", "func leftPad(s string, padStr string, pLen int) string {\n\tr := pLen - len(s)\n\tif r > 0 {\n\t\treturn strings.Repeat(padStr, pLen-len(s)) + s\n\t}\n\treturn s\n}", "func (n *nodeHeader) leftTrimPrefix(l uint16) {\n\tif l < 1 {\n\t\treturn\n\t}\n\tpLen, pBytes := n.prefixFields()\n\tif l > *pLen {\n\t\tl = *pLen\n\t}\n\tnewLen := *pLen - uint16(l)\n\tcopy(pBytes[0:newLen], pBytes[l:*pLen])\n\t*pLen = newLen\n}", "func JoinColumns(cols ...string) string {\n\treturn JoinTableColumns(\"\", cols...)\n}", "func (tm *TableModel) SelectNoJoin() string {\n\tcache := SELECT_NOJOIN_CACHE.Get(tm.CacheName)\n\tquery_str, ok := cache.(string)\n\tif !ok {\n\t\tqueries := []string{}\n\t\tpos := 1\n\t\tfor idx, field := range tm.Fields {\n\t\t\tif field.Hide || field.SqlHide {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tswitch field.Key {\n\t\t\tcase `password`:\n\t\t\t\tcontinue\n\t\t\tcase `id`:\n\t\t\t\tquery := `x1.id::TEXT \"id\"`\n\t\t\t\tqueries = append(queries, query)\n\t\t\t\ttm.Fields[idx].SqlColPos = pos\n\t\t\t\tpos += 1\n\t\t\t\tcontinue\n\t\t\tcase `created_at`, `modified_at`, `updated_at`, `deleted_at`, `restored_at`:\n\t\t\t\ttm.Fields[idx].Label = S.ToTitle(S.Replace(field.Key, `_`, ` `))\n\t\t\t\ttm.Fields[idx].Type = `datetime`\n\t\t\t}\n\t\t\tif field.CustomQuery != `` {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tquery := field.SqlColumn() + ` ` + ZZ(field.Key)\n\t\t\tqueries = append(queries, query)\n\t\t\ttm.Fields[idx].SqlColPos = pos\n\t\t\tpos += 1\n\t\t}\n\t\tquery_str = A.StrJoin(queries, \"\\n, \")\n\t\tSELECT_NOJOIN_CACHE.Set(tm.CacheName, query_str)\n\t}\n\treturn query_str\n}", "func (l *Label) rotateLeft(g *Graph) uint16 {\n Assert(nilLabel, l != nil)\n Assert(nilGraph, g != nil)\n Assert(nilLabelStore, g.labelStore != nil)\n Assert(nilLabelWriteMap, g.labelStore.writes != nil)\n \n // perform the rotation\n right, _ := l.right(g) // TODO do not ignore error\n l.r = right.l\n right.l = l.Id\n \n l.setHeight(g)\n right.setHeight(g)\n \n // make sure the changes are written\n g.labelStore.writes[l.Id] = l\n g.labelStore.writes[right.Id] = right\n \n return right.Id\n}", "func (o DashboardSpacingOutput) Left() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v DashboardSpacing) *string { return v.Left }).(pulumi.StringPtrOutput)\n}" ]
[ "0.7873163", "0.76610047", "0.7514192", "0.7485381", "0.7484619", "0.7296211", "0.7200829", "0.7158153", "0.7113473", "0.71128154", "0.66189706", "0.6592026", "0.65634686", "0.6533419", "0.6475337", "0.6456779", "0.64426446", "0.63812834", "0.6122921", "0.60929805", "0.607487", "0.604373", "0.5660946", "0.55513275", "0.54756564", "0.52410173", "0.51612407", "0.5080959", "0.50247854", "0.4961187", "0.4923821", "0.48008248", "0.4789282", "0.47611392", "0.47508404", "0.47489274", "0.47383466", "0.47342694", "0.47029877", "0.46947727", "0.46906537", "0.46875104", "0.46666095", "0.46634036", "0.46472603", "0.46340537", "0.46194407", "0.46098074", "0.45880744", "0.45793363", "0.4564216", "0.45465615", "0.45221367", "0.449071", "0.44836226", "0.44800872", "0.44487268", "0.44410983", "0.44144097", "0.4401392", "0.43798673", "0.4378062", "0.43626633", "0.43482876", "0.4325313", "0.4322908", "0.43213147", "0.43150502", "0.43093693", "0.43089575", "0.43088415", "0.43053895", "0.42981717", "0.42939538", "0.42805606", "0.42766178", "0.42343327", "0.42222986", "0.41884091", "0.41866896", "0.4182063", "0.41781607", "0.41726115", "0.4164084", "0.4164084", "0.41639537", "0.41635606", "0.41563445", "0.41459137", "0.41400716", "0.4136468", "0.4128594", "0.41153583", "0.41083694", "0.4106543", "0.40784147", "0.40766752", "0.40706512", "0.40603593", "0.4059418" ]
0.8205081
0
RightJoin returns all records from the right table (table2), and the matched records from the left table (table1). The result is NULL from the left side, when there is no match. RightJoin transforms an expr column to a raw column
func (left *DataTable) RightJoin(right *DataTable, on []JoinOn) (*DataTable, error) { return newJoinImpl(rightJoin, []*DataTable{left, right}, on).Compute() }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (stmt *statement) RightJoin(table, on string) Statement {\n\tstmt.join(\"RIGHT JOIN \", table, on)\n\treturn stmt\n}", "func (w *Wrapper) RightJoin(table interface{}, condition string) *Wrapper {\n\tw.saveJoin(table, \"RIGHT JOIN\", condition)\n\treturn w\n}", "func (sd *SelectDataset) RightJoin(table exp.Expression, condition exp.JoinCondition) *SelectDataset {\n\treturn sd.joinTable(exp.NewConditionedJoinExpression(exp.RightJoinType, table, condition))\n}", "func RightJoin(tables []*DataTable, on []JoinOn) (*DataTable, error) {\n\treturn newJoinImpl(rightJoin, tables, on).Compute()\n}", "func (b *Builder) RightJoin(joinTable string, joinCond interface{}) *Builder {\r\n\treturn b.Join(\"RIGHT\", joinTable, joinCond)\r\n}", "func (b *Builder) RightJoin(joinTable, joinCond interface{}) *Builder {\n\treturn b.Join(\"RIGHT\", joinTable, joinCond)\n}", "func (sd *SelectDataset) RightOuterJoin(table exp.Expression, condition exp.JoinCondition) *SelectDataset {\n\treturn sd.joinTable(exp.NewConditionedJoinExpression(exp.RightOuterJoinType, table, condition))\n}", "func (b *JoinBuilder) RightJoin(other *Table) *JoinBuilder {\n\treturn makeJoinBuilder(\"RIGHT JOIN\", b, other)\n}", "func RightJoin(lx, rx reflect.Value) reflect.Value { return LeftJoin(rx, lx) }", "func (sd *SelectDataset) NaturalRightJoin(table exp.Expression) *SelectDataset {\n\treturn sd.joinTable(exp.NewUnConditionedJoinExpression(exp.NaturalRightJoinType, table))\n}", "func RightJoin(table, on string) QueryOption {\n\treturn newFuncQueryOption(func(wrapper *QueryWrapper) {\n\t\twrapper.joins = append(wrapper.joins, \"RIGHT\", \"JOIN\", table, \"ON\", on)\n\t\twrapper.queryLen += 5\n\t})\n}", "func NewRightJoin(table string) JoinQuery {\n\treturn NewRightJoinOn(table, \"\", \"\")\n}", "func NewRightJoin(table string, filter ...FilterQuery) JoinQuery {\n\treturn NewRightJoinOn(table, \"\", \"\", filter...)\n}", "func NewRightJoinOn(table string, from string, to string, filter ...FilterQuery) JoinQuery {\n\treturn NewJoinWith(\"RIGHT JOIN\", table, from, to, filter...)\n}", "func NewRightJoinOn(table string, from string, to string) JoinQuery {\n\treturn NewJoinWith(\"RIGHT JOIN\", table, from, to)\n}", "func (mySelf SQLJoin) Right() SQLJoin {\n\tmySelf.right = true\n\treturn mySelf\n}", "func (mp *JoinMultiplicity) JoinPreservesRightRows(op opt.Operator) bool {\n\tswitch op {\n\tcase opt.InnerJoinOp, opt.LeftJoinOp:\n\t\tbreak\n\n\tcase opt.FullJoinOp:\n\t\treturn true\n\n\tcase opt.SemiJoinOp:\n\t\tpanic(errors.AssertionFailedf(\"right rows are not included in the output of a %v\", op))\n\n\tdefault:\n\t\tpanic(errors.AssertionFailedf(\"unsupported operator: %v\", op))\n\t}\n\treturn mp.JoinFiltersMatchAllRightRows()\n}", "func (dr *DataRow) joinOnColumnIndexRight(rightRow DataRow, onColumnIndexRight int) DataRow {\n\toutItems := make([]DataItem, 0, len(dr.Items)+len(rightRow.Items)-1)\n\t// append left row\n\toutItems = append(outItems, dr.Items...)\n\t// append right row except on column\n\toutItems = append(outItems, rightRow.Items[:onColumnIndexRight]...)\n\toutItems = append(outItems, rightRow.Items[onColumnIndexRight+1:]...)\n\n\treturn DataRow{\n\t\tItems: outItems,\n\t}\n}", "func (fn *formulaFuncs) RIGHT(argsList *list.List) formulaArg {\n\treturn fn.leftRight(\"RIGHT\", argsList)\n}", "func (mp *JoinMultiplicity) JoinDoesNotDuplicateRightRows(op opt.Operator) bool {\n\tswitch op {\n\tcase opt.InnerJoinOp, opt.LeftJoinOp, opt.FullJoinOp:\n\t\tbreak\n\n\tcase opt.SemiJoinOp:\n\t\tpanic(errors.AssertionFailedf(\"right rows are not included in the output of a %v\", op))\n\n\tdefault:\n\t\tpanic(errors.AssertionFailedf(\"unsupported operator: %v\", op))\n\t}\n\treturn mp.JoinFiltersDoNotDuplicateRightRows()\n}", "func (left *DataTable) InnerJoin(right *DataTable, on []JoinOn) (*DataTable, error) {\n\treturn newJoinImpl(innerJoin, []*DataTable{left, right}, on).Compute()\n}", "func (left *DataTable) LeftJoin(right *DataTable, on []JoinOn) (*DataTable, error) {\n\treturn newJoinImpl(leftJoin, []*DataTable{left, right}, on).Compute()\n}", "func (llrb *LLRB) moveredright(nd *Llrbnode) *Llrbnode {\n\tllrb.flip(nd)\n\tif nd.left.left.isred() {\n\t\tnd = llrb.rotateright(nd)\n\t\tllrb.flip(nd)\n\t}\n\treturn nd\n}", "func (r1 *csvTable) Join(r2 rel.Relation, zero interface{}) rel.Relation {\n\treturn rel.NewJoin(r1, r2, zero)\n}", "func (t *TableExpr) JoinSQL() string {\n\tif t.JoinConditions != \"\" && t.JoinType != \"\" {\n\t\treturn \" \" + t.JoinType + \" \" + t.Table.SQL() + \" on \" +\n\t\t\tt.JoinConditions\n\t}\n\treturn \", \" + t.Table.SQL()\n}", "func (stmt *statement) LeftJoin(table, on string) Statement {\n\tstmt.join(\"LEFT JOIN \", table, on)\n\treturn stmt\n}", "func (stmt *statement) FullJoin(table, on string) Statement {\n\tstmt.join(\"FULL JOIN \", table, on)\n\treturn stmt\n}", "func (self Accessor) InnerJoin(expr interface{}) *SelectManager {\n\treturn self.From(self.Relation()).InnerJoin(expr)\n}", "func valueRightOf(row []string, position int) *float64 {\n\tif position >= len(row)-1 {\n\t\treturn nil\n\t}\n\n\treturn stringToFloat(row[position+1])\n}", "func (m *Machine) Right() {\n\tfmt.Printf(\">> RIGHT\\n\")\n\t// If we're at the last position, then we need to expand our tape array:\n\tif m.position == (len(m.Tape) - 1) {\n\t\tsize := len(m.Tape)\n\t\tm.Tape = append(m.Tape, make([]Cell, size)...)\n\t}\n\n\tm.position += 1\n}", "func (b *Builder) FullJoin(joinTable string, joinCond interface{}) *Builder {\r\n\treturn b.Join(\"FULL\", joinTable, joinCond)\r\n}", "func RightOf(x ...interface{}) Either {\n\treturn newEither(true, x...)\n}", "func (b *Builder) FullJoin(joinTable, joinCond interface{}) *Builder {\n\treturn b.Join(\"FULL\", joinTable, joinCond)\n}", "func (bo BinaryOperator) EvaluateLeftAndRight(vars map[string]interface{}, ctx interface{}, funcs FunctionMap, quotes []string) (map[string]interface{}, interface{}, interface{}, error) {\n\n\tvars, lv, err := bo.Left.Evaluate(vars, ctx, funcs, quotes)\n\tif err != nil {\n\t\treturn vars, false, false, err\n\t}\n\tvars, rv, err := bo.Right.Evaluate(vars, ctx, funcs, quotes)\n\tif err != nil {\n\t\treturn vars, false, false, err\n\t}\n\treturn vars, lv, rv, nil\n}", "func JoinDistil(dataStorage apiModel.DataStorage, joinLeft *JoinSpec, joinRight *JoinSpec, joinPairs []*JoinPair, joinType string, returnRaw bool) (string, *apiModel.FilteredData, error) {\n\tif !isValidJoinType(joinType) {\n\t\treturn \"\", nil, errors.Errorf(\"unsupported join type\")\n\t}\n\tisKey := false\n\tvarsLeftMapUpdated := mapDistilJoinVars(joinLeft.UpdatedVariables)\n\tvarsRightMapUpdated := mapDistilJoinVars(joinRight.UpdatedVariables)\n\tjoins := make([]*description.Join, len(joinPairs))\n\trightVars := make([]*model.Variable, len(joinPairs))\n\tfor i := range joinPairs {\n\t\tjoins[i] = &description.Join{\n\t\t\tLeft: varsLeftMapUpdated[joinPairs[i].Left],\n\t\t\tRight: varsRightMapUpdated[joinPairs[i].Right],\n\t\t\tAccuracy: joinPairs[i].Accuracy,\n\t\t\tAbsolute: joinPairs[i].AbsoluteAccuracy,\n\t\t}\n\t\trightVars[i] = varsRightMapUpdated[joinPairs[i].Right]\n\n\t\t// assume groupings are valid keys for the join\n\t\tif joins[i].Right.IsGrouping() {\n\t\t\tisKey = true\n\t\t}\n\t}\n\tvar err error\n\tif !isKey {\n\t\tisKey, err = dataStorage.IsKey(joinRight.DatasetID, joinRight.ExistingMetadata.StorageName, rightVars)\n\t\tif err != nil {\n\t\t\treturn \"\", nil, err\n\t\t}\n\t}\n\tif !isKey {\n\t\treturn \"\", nil, errors.Errorf(\"specified right join columns do not specify a unique key\")\n\t}\n\n\trightExcludes := generateRightExcludes(joinLeft.UpdatedVariables, joinRight.UpdatedVariables, joinPairs)\n\tjoinInfo := &description.JoinDescription{\n\t\tJoins: joins,\n\t\tLeftExcludes: []*model.Variable{},\n\t\tLeftVariables: joinLeft.UpdatedVariables,\n\t\tRightExcludes: rightExcludes,\n\t\tRightVariables: joinRight.UpdatedVariables,\n\t\tType: joinType,\n\t}\n\tpipelineDesc, err := description.CreateJoinPipeline(\"Joiner\", \"Join existing data\", joinInfo)\n\tif err != nil {\n\t\treturn \"\", nil, err\n\t}\n\n\tdatasetLeftURI := joinLeft.DatasetPath\n\tdatasetRightURI := joinRight.DatasetPath\n\n\treturn join(joinLeft, joinRight, \"\", pipelineDesc, []string{datasetLeftURI, datasetRightURI}, defaultSubmitter{}, returnRaw)\n}", "func (o DashboardSpacingPtrOutput) Right() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v *DashboardSpacing) *string {\n\t\tif v == nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn v.Right\n\t}).(pulumi.StringPtrOutput)\n}", "func (f *predicateSqlizerFactory) createLeftJoin(secondaryTable string, primaryColumn string, secondaryColumn string) string {\n\tnewAlias := joinedTable{secondaryTable, primaryColumn, secondaryColumn}\n\tfor i, alias := range f.joinedTables {\n\t\tif alias.equal(newAlias) {\n\t\t\treturn f.aliasName(secondaryTable, i)\n\t\t}\n\t}\n\n\tf.joinedTables = append(f.joinedTables, newAlias)\n\treturn f.aliasName(secondaryTable, len(f.joinedTables)-1)\n}", "func (h *joinPlanningHelper) remapOnExpr(\n\tplanCtx *PlanningCtx, onCond tree.TypedExpr,\n) (execinfrapb.Expression, error) {\n\tif onCond == nil {\n\t\treturn execinfrapb.Expression{}, nil\n\t}\n\n\tjoinColMap := make([]int, h.numLeftOutCols+h.numRightOutCols)\n\tidx := 0\n\tleftCols := 0\n\tfor i := 0; i < h.numLeftOutCols; i++ {\n\t\tjoinColMap[idx] = h.leftPlanToStreamColMap[i]\n\t\tif h.leftPlanToStreamColMap[i] != -1 {\n\t\t\tleftCols++\n\t\t}\n\t\tidx++\n\t}\n\tfor i := 0; i < h.numRightOutCols; i++ {\n\t\tjoinColMap[idx] = leftCols + h.rightPlanToStreamColMap[i]\n\t\tidx++\n\t}\n\n\treturn physicalplan.MakeExpression(onCond, planCtx, joinColMap)\n}", "func rotateRight(n *rbnode) *rbnode {\n\tif n.left == nil {\n\t\treturn n\n\t}\n\tl := n.left\n\tconnectLeft(n, l.right)\n\treplaceChild(n, l)\n\tconnectRight(l, n)\n\tn.c, l.c = l.c, n.c\n\treturn l\n}", "func (w *Wrapper) LeftJoin(table interface{}, condition string) *Wrapper {\n\tw.saveJoin(table, \"LEFT JOIN\", condition)\n\treturn w\n}", "func (fn *formulaFuncs) RIGHTB(argsList *list.List) formulaArg {\n\treturn fn.leftRight(\"RIGHTB\", argsList)\n}", "func (matrix Matrix4) Right() vector.Vector {\n\treturn vector.Vector{\n\t\tmatrix[0][0],\n\t\tmatrix[0][1],\n\t\tmatrix[0][2],\n\t}.Unit()\n}", "func (sd *SelectDataset) FullJoin(table exp.Expression, condition exp.JoinCondition) *SelectDataset {\n\treturn sd.joinTable(exp.NewConditionedJoinExpression(exp.FullJoinType, table, condition))\n}", "func (ubt *ubtTree) Right() UnboundBinaryTree {\n\tif ubt.right == nil {\n\t\tubt.right = &ubtTree{}\n\t}\n\treturn ubt.right\n}", "func (mp *JoinMultiplicity) JoinFiltersMatchAllRightRows() bool {\n\treturn mp.RightMultiplicity&MultiplicityPreservedVal != 0\n}", "func (sd *SelectDataset) Join(table exp.Expression, condition exp.JoinCondition) *SelectDataset {\n\treturn sd.InnerJoin(table, condition)\n}", "func (w *Wrapper) InnerJoin(table interface{}, condition string) *Wrapper {\n\tw.saveJoin(table, \"INNER JOIN\", condition)\n\treturn w\n}", "func decendRight(root *SnailfishNumber) *SnailfishNumber {\n\tfor root.rhs != nil {\n\t\troot = root.rhs\n\t}\n\treturn root\n}", "func ColumnRight(name string) {\n\tidx := colIndex(name)\n\tif idx < len(GlobalColumns)-1 {\n\t\tswapCols(idx, idx+1)\n\t}\n}", "func BreakExpressionInLHSandRHS(\n\tctx *plancontext.PlanningContext,\n\texpr sqlparser.Expr,\n\tlhs semantics.TableSet,\n) (col JoinColumn, err error) {\n\trewrittenExpr := sqlparser.CopyOnRewrite(expr, nil, func(cursor *sqlparser.CopyOnWriteCursor) {\n\t\tnode, ok := cursor.Node().(*sqlparser.ColName)\n\t\tif !ok {\n\t\t\treturn\n\t\t}\n\t\tdeps := ctx.SemTable.RecursiveDeps(node)\n\t\tif deps.IsEmpty() {\n\t\t\terr = vterrors.VT13001(\"unknown column. has the AST been copied?\")\n\t\t\tcursor.StopTreeWalk()\n\t\t\treturn\n\t\t}\n\t\tif !deps.IsSolvedBy(lhs) {\n\t\t\treturn\n\t\t}\n\n\t\tnode.Qualifier.Qualifier = sqlparser.NewIdentifierCS(\"\")\n\t\tcol.LHSExprs = append(col.LHSExprs, node)\n\t\tbvName := node.CompliantName()\n\t\tcol.BvNames = append(col.BvNames, bvName)\n\t\targ := sqlparser.NewArgument(bvName)\n\t\t// we are replacing one of the sides of the comparison with an argument,\n\t\t// but we don't want to lose the type information we have, so we copy it over\n\t\tctx.SemTable.CopyExprInfo(node, arg)\n\t\tcursor.Replace(arg)\n\t}, nil).(sqlparser.Expr)\n\n\tif err != nil {\n\t\treturn JoinColumn{}, err\n\t}\n\tctx.JoinPredicates[expr] = append(ctx.JoinPredicates[expr], rewrittenExpr)\n\tcol.RHSExpr = rewrittenExpr\n\treturn\n}", "func (o DashboardSpacingOutput) Right() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v DashboardSpacing) *string { return v.Right }).(pulumi.StringPtrOutput)\n}", "func (b *Bound) Right() float64 {\n\treturn b.ne[0]\n}", "func RightRotate(n *Node) *Node {\n\tl := n.Left\n\tif l == nil {\n\t\treturn n\n\t}\n\n\tn.Left = l.Right\n\tl.Right = n\n\n\treturn l\n}", "func (c *expression) extendRight(n *expression) *expression {\n\n\tc.right = n\n\tn.parent = c\n\n\tfmt.Printf(\"++++++++++++++++++++++++++ extendRight FROM %s -> [%s] \\n\", c.opr, n.opr)\n\treturn n\n}", "func trimRight(r *syntax.Regexp) (bool, *syntax.Regexp) {\n\tif eqSuffixAnyRegex(r, patDotStar, patNullEndDotStar) {\n\t\ti := len(r.Sub) - 1\n\t\ttmp := *r\n\t\ttmp.Sub = tmp.Sub[0:i]\n\t\treturn true, &tmp\n\t}\n\n\treturn false, r\n}", "func (b *Builder) LeftJoin(joinTable, joinCond interface{}) *Builder {\n\treturn b.Join(\"LEFT\", joinTable, joinCond)\n}", "func ExtractJoinEquality(\n\tleftCols, rightCols opt.ColSet, condition opt.ScalarExpr,\n) (ok bool, left, right opt.ColumnID) {\n\tlvar, rvar, ok := isVarEquality(condition)\n\tif !ok {\n\t\treturn false, 0, 0\n\t}\n\n\t// Don't allow mixed types (see #22519).\n\tif !lvar.DataType().Equivalent(rvar.DataType()) {\n\t\treturn false, 0, 0\n\t}\n\n\tif leftCols.Contains(lvar.Col) && rightCols.Contains(rvar.Col) {\n\t\treturn true, lvar.Col, rvar.Col\n\t}\n\tif leftCols.Contains(rvar.Col) && rightCols.Contains(lvar.Col) {\n\t\treturn true, rvar.Col, lvar.Col\n\t}\n\n\treturn false, 0, 0\n}", "func (b *JoinBuilder) LeftJoin(other *Table) *JoinBuilder {\n\treturn makeJoinBuilder(\"LEFT JOIN\", b, other)\n}", "func (l *Label) right(g *Graph) (*Label, *DataError) {\n Assert (nilLabel, l != nil)\n Assert(nilGraph, g != nil)\n Assert(nilLabelStore, g.labelStore != nil)\n \n return g.labelStore.findAllowZero(l.r)\n}", "func (sd *SelectDataset) joinTable(join exp.JoinExpression) *SelectDataset {\n\treturn sd.copy(sd.clauses.JoinsAppend(join))\n}", "func (c hashChainer) chainInnerRight(seed []byte, proof [][]byte, index int64) []byte {\n\tfor i, h := range proof {\n\t\tif (index>>uint(i))&1 == 1 {\n\t\t\tseed = c.hasher.HashChildren(h, seed)\n\t\t}\n\t}\n\treturn seed\n}", "func (l *Label) rotateRight(g *Graph) uint16 {\n Assert(nilLabel, l != nil)\n Assert(nilGraph, g != nil)\n Assert(nilLabelStore, g.labelStore != nil)\n Assert(nilLabelWriteMap, g.labelStore.writes != nil)\n \n // perform the rotation\n left, _ := l.left(g) // TODO do not ignore error\n l.l = left.r\n left.r = l.Id\n \n\n l.setHeight(g)\n left.setHeight(g)\n \n // make sure the changes are written\n g.labelStore.writes[l.Id] = l\n g.labelStore.writes[left.Id] = left\n \n return left.Id\n}", "func InnerJoin(tables []*DataTable, on []JoinOn) (*DataTable, error) {\n\treturn newJoinImpl(innerJoin, tables, on).Compute()\n}", "func (b *Builder) LeftJoin(joinTable string, joinCond interface{}) *Builder {\r\n\treturn b.Join(\"LEFT\", joinTable, joinCond)\r\n}", "func (tm *Term) FixRight() error {\n\ttm.FixCols++ // no obvious max\n\treturn tm.Draw()\n}", "func (mp *JoinMultiplicity) JoinFiltersDoNotDuplicateRightRows() bool {\n\treturn mp.RightMultiplicity&MultiplicityNotDuplicatedVal != 0\n}", "func LeftJoin(tables []*DataTable, on []JoinOn) (*DataTable, error) {\n\treturn newJoinImpl(leftJoin, tables, on).Compute()\n}", "func JoinTableValues(cols ...string) string {\n\tcols = strings_.SliceTrimEmpty(cols...)\n\tif len(cols) == 0 {\n\t\t// https://dev.mysql.com/doc/refman/5.7/en/data-type-defaults.html\n\t\t// DEFAULT\n\t\treturn \"\"\n\t}\n\treturn strings.Join(TableValues(cols...), \",\")\n}", "func (board *Board) Right() *Board {\n\tblankPosition := board.PositionOfBlank()\n\tif (blankPosition+1)%board.Dimension == 0 {\n\t\treturn nil\n\t}\n\n\tclone := board.Clone()\n\tclone.move = RIGHT\n\ttile := clone.GetTileAt(blankPosition + 1)\n\tclone.SetTileAt(blankPosition+1, BLANK)\n\tclone.SetTileAt(blankPosition, tile)\n\tclone.cost = clone.g + clone.Cost()\n\treturn clone\n}", "func right(x uint, n uint) uint {\n\tif level(x) == 0 {\n\t\treturn x\n\t}\n\n\tr := x ^ (0x03 << (level(x) - 1))\n\tfor r > 2*(n-1) {\n\t\tr = left(r)\n\t}\n\treturn r\n}", "func ExtractRemainingJoinFilters(on FiltersExpr, leftEq, rightEq opt.ColList) FiltersExpr {\n\tif len(leftEq) != len(rightEq) {\n\t\tpanic(errors.AssertionFailedf(\"leftEq and rightEq have different lengths\"))\n\t}\n\tif len(leftEq) == 0 {\n\t\treturn on\n\t}\n\tvar newFilters FiltersExpr\n\tfor i := range on {\n\t\tleftVar, rightVar, ok := isVarEquality(on[i].Condition)\n\t\tif ok {\n\t\t\ta, b := leftVar.Col, rightVar.Col\n\t\t\tfound := false\n\t\t\tfor j := range leftEq {\n\t\t\t\tif (a == leftEq[j] && b == rightEq[j]) ||\n\t\t\t\t\t(a == rightEq[j] && b == leftEq[j]) {\n\t\t\t\t\tfound = true\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t}\n\t\t\tif found {\n\t\t\t\t// Skip this condition.\n\t\t\t\tcontinue\n\t\t\t}\n\t\t}\n\t\tif newFilters == nil {\n\t\t\tnewFilters = make(FiltersExpr, 0, len(on)-i)\n\t\t}\n\t\tnewFilters = append(newFilters, on[i])\n\t}\n\treturn newFilters\n}", "func (e Equation) Right() Type {\n\treturn e.right\n}", "func (t *Table) LeftJoinRaw(offset int32, count int, crit string) ([]byte, error) {\n\tp := \"https://%s/api/getLeftJoin.sjs?json&object=%s&limit=%d,%d\"\n\tx := fmt.Sprintf(p, t.Host, t.Name, offset, count)\n\tif len(crit) != 0 {\n\t\tx = x + \"&condition=\" + FixCrit(crit)\n\t}\n\t_, body, err := t.Get(x)\n\treturn body, err\n}", "func ExtractJoinEqualityFilter(\n\tleftCol, rightCol opt.ColumnID, leftCols, rightCols opt.ColSet, on FiltersExpr,\n) FiltersItem {\n\tfor i := range on {\n\t\tcondition := on[i].Condition\n\t\tok, left, right := ExtractJoinEquality(leftCols, rightCols, condition)\n\t\tif !ok {\n\t\t\tcontinue\n\t\t}\n\t\tif left == leftCol && right == rightCol {\n\t\t\treturn on[i]\n\t\t}\n\t}\n\tpanic(errors.AssertionFailedf(\"could not find equality between columns %d and %d in filters %s\",\n\t\tleftCol, rightCol, on.String(),\n\t))\n}", "func (w *Wrapper) NaturalJoin(table interface{}, condition string) *Wrapper {\n\tw.saveJoin(table, \"NATURAL JOIN\", condition)\n\treturn w\n}", "func TrimRight(cutset string) MapFunc {\n\treturn func(s string) string { return strings.TrimRight(s, cutset) }\n}", "func TestAggregateLeftJoin(t *testing.T) {\n\tmcmp, closer := start(t)\n\tdefer closer()\n\n\tmcmp.Exec(\"insert into t1(t1_id, name, value, shardKey) values (11, 'r', 'r', 1), (3, 'r', 'r', 0)\")\n\tmcmp.Exec(\"insert into t2(id, shardKey) values (11, 1)\")\n\n\tmcmp.AssertMatchesNoOrder(\"SELECT t1.shardkey FROM t1 LEFT JOIN t2 ON t1.t1_id = t2.id\", `[[INT64(1)] [INT64(0)]]`)\n\tmcmp.AssertMatches(\"SELECT count(t1.shardkey) FROM t1 LEFT JOIN t2 ON t1.t1_id = t2.id\", `[[INT64(2)]]`)\n\tmcmp.AssertMatches(\"SELECT count(t2.shardkey) FROM t1 LEFT JOIN t2 ON t1.t1_id = t2.id\", `[[INT64(1)]]`)\n\tmcmp.AssertMatches(\"SELECT count(*) FROM t1 LEFT JOIN t2 ON t1.t1_id = t2.id\", `[[INT64(2)]]`)\n\tmcmp.AssertMatches(\"SELECT sum(t1.shardkey) FROM t1 LEFT JOIN t2 ON t1.t1_id = t2.id\", `[[DECIMAL(1)]]`)\n\tmcmp.AssertMatches(\"SELECT sum(t2.shardkey) FROM t1 LEFT JOIN t2 ON t1.t1_id = t2.id\", `[[DECIMAL(1)]]`)\n\tmcmp.AssertMatches(\"SELECT count(*) FROM t2 LEFT JOIN t1 ON t1.t1_id = t2.id WHERE IFNULL(t1.name, 'NOTSET') = 'r'\", `[[INT64(1)]]`)\n}", "func (sd *SelectDataset) LeftJoin(table exp.Expression, condition exp.JoinCondition) *SelectDataset {\n\treturn sd.joinTable(exp.NewConditionedJoinExpression(exp.LeftJoinType, table, condition))\n}", "func (sd *SelectDataset) InnerJoin(table exp.Expression, condition exp.JoinCondition) *SelectDataset {\n\treturn sd.joinTable(exp.NewConditionedJoinExpression(exp.InnerJoinType, table, condition))\n}", "func (left *DataTable) OuterJoin(right *DataTable, on []JoinOn) (*DataTable, error) {\n\treturn newJoinImpl(outerJoin, []*DataTable{left, right}, on).Compute()\n}", "func AlignRight(cols ...int) TableOption {\n\treturn func(opts *options) {\n\t\tfor _, col := range cols {\n\t\t\tif col < 0 || col >= len(opts.columnAlignment) {\n\t\t\t\topts.errors = append(opts.errors, &ColumnIndexIsOutOfBoundsError{col})\n\t\t\t} else {\n\t\t\t\topts.columnAlignment[col] = Right\n\t\t\t}\n\t\t}\n\t}\n}", "func (b *Builder) InnerJoin(joinTable, joinCond interface{}) *Builder {\n\treturn b.Join(\"INNER\", joinTable, joinCond)\n}", "func InnerJoin(clause string, args ...interface{}) QueryMod {\n\treturn func(q *queries.Query) {\n\t\tqueries.AppendInnerJoin(q, clause, args...)\n\t}\n}", "func (q Query) Join(inner Query,\n\touterKeySelector func(interface{}) interface{},\n\tinnerKeySelector func(interface{}) interface{},\n\tresultSelector func(outer interface{}, inner interface{}) interface{}) Query {\n\n\treturn Query{\n\t\tIterate: func() Iterator {\n\t\t\touternext := q.Iterate()\n\t\t\tinnernext := inner.Iterate()\n\n\t\t\tinnerLookup := make(map[interface{}][]interface{})\n\t\t\tfor innerItem, ok := innernext(); ok; innerItem, ok = innernext() {\n\t\t\t\tinnerKey := innerKeySelector(innerItem)\n\t\t\t\tinnerLookup[innerKey] = append(innerLookup[innerKey], innerItem)\n\t\t\t}\n\n\t\t\tvar outerItem interface{}\n\t\t\tvar innerGroup []interface{}\n\t\t\tinnerLen, innerIndex := 0, 0\n\n\t\t\treturn func() (item interface{}, ok bool) {\n\t\t\t\tif innerIndex >= innerLen {\n\t\t\t\t\thas := false\n\t\t\t\t\tfor !has {\n\t\t\t\t\t\touterItem, ok = outernext()\n\t\t\t\t\t\tif !ok {\n\t\t\t\t\t\t\treturn\n\t\t\t\t\t\t}\n\n\t\t\t\t\t\tinnerGroup, has = innerLookup[outerKeySelector(outerItem)]\n\t\t\t\t\t\tinnerLen = len(innerGroup)\n\t\t\t\t\t\tinnerIndex = 0\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t\titem = resultSelector(outerItem, innerGroup[innerIndex])\n\t\t\t\tinnerIndex++\n\t\t\t\treturn item, true\n\t\t\t}\n\t\t},\n\t}\n}", "func (root *mTreap) rotateRight(y *treapNode) {\n\t// p -> (y (x a b) c)\n\tp := y.parent\n\tx, c := y.left, y.right\n\ta, b := x.left, x.right\n\n\tx.left = a\n\tif a != nil {\n\t\ta.parent = x\n\t}\n\tx.right = y\n\ty.parent = x\n\ty.left = b\n\tif b != nil {\n\t\tb.parent = y\n\t}\n\ty.right = c\n\tif c != nil {\n\t\tc.parent = y\n\t}\n\n\tx.parent = p\n\tif p == nil {\n\t\troot.treap = x\n\t} else if p.left == y {\n\t\tp.left = x\n\t} else {\n\t\tif p.right != y {\n\t\t\tthrow(\"large span treap rotateRight\")\n\t\t}\n\t\tp.right = x\n\t}\n\n\ty.updateInvariants()\n\tx.updateInvariants()\n}", "func LeftJoin(lx, rx reflect.Value) reflect.Value {\n\tresult := reflect.MakeSlice(reflect.SliceOf(lx.Type().Elem()), 0, lx.Len())\n\trhash := hashSlice(rx)\n\n\tfor i := 0; i < lx.Len(); i++ {\n\t\tv := lx.Index(i)\n\t\t_, ok := rhash[v.Interface()]\n\t\tif !ok {\n\t\t\tresult = reflect.Append(result, v)\n\t\t}\n\t}\n\treturn result\n}", "func (sd *SelectDataset) NaturalFullJoin(table exp.Expression) *SelectDataset {\n\treturn sd.joinTable(exp.NewUnConditionedJoinExpression(exp.NaturalFullJoinType, table))\n}", "func RotateRight(t TermT, n uint32) TermT {\n\treturn TermT(C.yices_rotate_right(C.term_t(t), C.uint32_t(n)))\n}", "func Right(str string, length int, pad string) string {\n\treturn str + times(pad, length-len(str))\n}", "func (b *Builder) InnerJoin(joinTable string, joinCond interface{}) *Builder {\r\n\treturn b.Join(\"INNER\", joinTable, joinCond)\r\n}", "func (sd *SelectDataset) NaturalJoin(table exp.Expression) *SelectDataset {\n\treturn sd.joinTable(exp.NewUnConditionedJoinExpression(exp.NaturalJoinType, table))\n}", "func (e *Tree) SetRight(replacement *Tree) { e.right = replacement }", "func (sb *SQLBuilder) JoinRaw(join string, values ...interface{}) *SQLBuilder {\n\tvar buf strings.Builder\n\n\tbuf.WriteString(sb._join)\n\tif buf.Len() != 0 {\n\t\tbuf.WriteString(\" \")\n\t}\n\tbuf.WriteString(join)\n\n\tsb._join = buf.String()\n\n\tfor _, value := range values {\n\t\tsb._joinParams = append(sb._joinParams, value)\n\t}\n\n\treturn sb\n}", "func (c hashChainer) chainBorderRight(seed []byte, proof [][]byte) []byte {\n\tfor _, h := range proof {\n\t\tseed = c.hasher.HashChildren(h, seed)\n\t}\n\treturn seed\n}", "func (q Quat) Right() Vec3f {\n\treturn q.RotateVec(Vec3f{1, 0, 0})\n}", "func (t *Table) LeftJoin(offset int32, count int, crit string, target interface{}) error {\n\tbody, err := t.LeftJoinRaw(offset, count, crit)\n\tif err == nil {\n\t\terr = json.Unmarshal(body, &target)\n\t}\n\treturn err\n}", "func EqualsRefOfJoinTableExpr(a, b *JoinTableExpr) bool {\n\tif a == b {\n\t\treturn true\n\t}\n\tif a == nil || b == nil {\n\t\treturn false\n\t}\n\treturn EqualsTableExpr(a.LeftExpr, b.LeftExpr) &&\n\t\ta.Join == b.Join &&\n\t\tEqualsTableExpr(a.RightExpr, b.RightExpr) &&\n\t\tEqualsJoinCondition(a.Condition, b.Condition)\n}", "func (this *Tuple) Right(n int) *Tuple {\n\tlength := this.Len()\n\tn = max(0, length-n)\n\treturn this.Slice(n, length)\n}", "func (n *Node) rotateRight(c *Node) {\n\tl := c.Left\n\tc.Left = l.Right\n\tl.Right = c\n\tif c == n.Left {\n\t\tn.Left = l\n\t} else {\n\t\tn.Right = l\n\t}\n\tc.bal = 0\n\tl.bal = 0\n}", "func (sd *SelectDataset) FullOuterJoin(table exp.Expression, condition exp.JoinCondition) *SelectDataset {\n\treturn sd.joinTable(exp.NewConditionedJoinExpression(exp.FullOuterJoinType, table, condition))\n}" ]
[ "0.7628548", "0.7389646", "0.7321759", "0.72971517", "0.7130319", "0.7081261", "0.7040699", "0.69593596", "0.69572276", "0.6703606", "0.6688393", "0.6547884", "0.65122604", "0.64553106", "0.6415866", "0.60450983", "0.5878921", "0.58000296", "0.50920373", "0.5038833", "0.500085", "0.49687022", "0.4731131", "0.47088453", "0.46893412", "0.46870908", "0.46862704", "0.46770886", "0.4632343", "0.46294218", "0.46233425", "0.4617011", "0.4602727", "0.45388338", "0.4534692", "0.4530391", "0.45289522", "0.4526295", "0.45174593", "0.45169902", "0.4500771", "0.44847006", "0.44757393", "0.4473496", "0.44724604", "0.44678876", "0.44651076", "0.44646564", "0.44483402", "0.44425213", "0.44376975", "0.44355166", "0.4428949", "0.44248205", "0.43802", "0.4379054", "0.43660408", "0.4363462", "0.43434042", "0.4342158", "0.43382016", "0.4332483", "0.4328479", "0.43171203", "0.4312828", "0.4282029", "0.4281956", "0.42783803", "0.42732418", "0.42610782", "0.4250525", "0.42366856", "0.4234621", "0.4232955", "0.4230592", "0.42245632", "0.42207024", "0.42151964", "0.4213592", "0.4210548", "0.4200873", "0.4197253", "0.41925237", "0.41911215", "0.4190371", "0.418906", "0.4187964", "0.41876385", "0.41819873", "0.418143", "0.4178405", "0.41735974", "0.41616184", "0.41605067", "0.41578954", "0.415394", "0.41523007", "0.4150712", "0.41490313", "0.41483024" ]
0.7296182
4
RightJoin the tables. tables[0] is used as reference datatable.
func RightJoin(tables []*DataTable, on []JoinOn) (*DataTable, error) { return newJoinImpl(rightJoin, tables, on).Compute() }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (stmt *statement) RightJoin(table, on string) Statement {\n\tstmt.join(\"RIGHT JOIN \", table, on)\n\treturn stmt\n}", "func (left *DataTable) RightJoin(right *DataTable, on []JoinOn) (*DataTable, error) {\n\treturn newJoinImpl(rightJoin, []*DataTable{left, right}, on).Compute()\n}", "func (w *Wrapper) RightJoin(table interface{}, condition string) *Wrapper {\n\tw.saveJoin(table, \"RIGHT JOIN\", condition)\n\treturn w\n}", "func (sd *SelectDataset) RightJoin(table exp.Expression, condition exp.JoinCondition) *SelectDataset {\n\treturn sd.joinTable(exp.NewConditionedJoinExpression(exp.RightJoinType, table, condition))\n}", "func (b *Builder) RightJoin(joinTable string, joinCond interface{}) *Builder {\r\n\treturn b.Join(\"RIGHT\", joinTable, joinCond)\r\n}", "func (b *JoinBuilder) RightJoin(other *Table) *JoinBuilder {\n\treturn makeJoinBuilder(\"RIGHT JOIN\", b, other)\n}", "func (b *Builder) RightJoin(joinTable, joinCond interface{}) *Builder {\n\treturn b.Join(\"RIGHT\", joinTable, joinCond)\n}", "func RightJoin(table, on string) QueryOption {\n\treturn newFuncQueryOption(func(wrapper *QueryWrapper) {\n\t\twrapper.joins = append(wrapper.joins, \"RIGHT\", \"JOIN\", table, \"ON\", on)\n\t\twrapper.queryLen += 5\n\t})\n}", "func (sd *SelectDataset) RightOuterJoin(table exp.Expression, condition exp.JoinCondition) *SelectDataset {\n\treturn sd.joinTable(exp.NewConditionedJoinExpression(exp.RightOuterJoinType, table, condition))\n}", "func RightJoin(lx, rx reflect.Value) reflect.Value { return LeftJoin(rx, lx) }", "func NewRightJoinOn(table string, from string, to string, filter ...FilterQuery) JoinQuery {\n\treturn NewJoinWith(\"RIGHT JOIN\", table, from, to, filter...)\n}", "func NewRightJoinOn(table string, from string, to string) JoinQuery {\n\treturn NewJoinWith(\"RIGHT JOIN\", table, from, to)\n}", "func (sd *SelectDataset) NaturalRightJoin(table exp.Expression) *SelectDataset {\n\treturn sd.joinTable(exp.NewUnConditionedJoinExpression(exp.NaturalRightJoinType, table))\n}", "func NewRightJoin(table string) JoinQuery {\n\treturn NewRightJoinOn(table, \"\", \"\")\n}", "func (mySelf SQLJoin) Right() SQLJoin {\n\tmySelf.right = true\n\treturn mySelf\n}", "func NewRightJoin(table string, filter ...FilterQuery) JoinQuery {\n\treturn NewRightJoinOn(table, \"\", \"\", filter...)\n}", "func (dr *DataRow) joinOnColumnIndexRight(rightRow DataRow, onColumnIndexRight int) DataRow {\n\toutItems := make([]DataItem, 0, len(dr.Items)+len(rightRow.Items)-1)\n\t// append left row\n\toutItems = append(outItems, dr.Items...)\n\t// append right row except on column\n\toutItems = append(outItems, rightRow.Items[:onColumnIndexRight]...)\n\toutItems = append(outItems, rightRow.Items[onColumnIndexRight+1:]...)\n\n\treturn DataRow{\n\t\tItems: outItems,\n\t}\n}", "func (mp *JoinMultiplicity) JoinPreservesRightRows(op opt.Operator) bool {\n\tswitch op {\n\tcase opt.InnerJoinOp, opt.LeftJoinOp:\n\t\tbreak\n\n\tcase opt.FullJoinOp:\n\t\treturn true\n\n\tcase opt.SemiJoinOp:\n\t\tpanic(errors.AssertionFailedf(\"right rows are not included in the output of a %v\", op))\n\n\tdefault:\n\t\tpanic(errors.AssertionFailedf(\"unsupported operator: %v\", op))\n\t}\n\treturn mp.JoinFiltersMatchAllRightRows()\n}", "func (stmt *statement) FullJoin(table, on string) Statement {\n\tstmt.join(\"FULL JOIN \", table, on)\n\treturn stmt\n}", "func (b *Builder) FullJoin(joinTable string, joinCond interface{}) *Builder {\r\n\treturn b.Join(\"FULL\", joinTable, joinCond)\r\n}", "func (b *Builder) FullJoin(joinTable, joinCond interface{}) *Builder {\n\treturn b.Join(\"FULL\", joinTable, joinCond)\n}", "func (sd *SelectDataset) FullJoin(table exp.Expression, condition exp.JoinCondition) *SelectDataset {\n\treturn sd.joinTable(exp.NewConditionedJoinExpression(exp.FullJoinType, table, condition))\n}", "func AlignRight(cols ...int) TableOption {\n\treturn func(opts *options) {\n\t\tfor _, col := range cols {\n\t\t\tif col < 0 || col >= len(opts.columnAlignment) {\n\t\t\t\topts.errors = append(opts.errors, &ColumnIndexIsOutOfBoundsError{col})\n\t\t\t} else {\n\t\t\t\topts.columnAlignment[col] = Right\n\t\t\t}\n\t\t}\n\t}\n}", "func (r1 *csvTable) Join(r2 rel.Relation, zero interface{}) rel.Relation {\n\treturn rel.NewJoin(r1, r2, zero)\n}", "func (mp *JoinMultiplicity) JoinDoesNotDuplicateRightRows(op opt.Operator) bool {\n\tswitch op {\n\tcase opt.InnerJoinOp, opt.LeftJoinOp, opt.FullJoinOp:\n\t\tbreak\n\n\tcase opt.SemiJoinOp:\n\t\tpanic(errors.AssertionFailedf(\"right rows are not included in the output of a %v\", op))\n\n\tdefault:\n\t\tpanic(errors.AssertionFailedf(\"unsupported operator: %v\", op))\n\t}\n\treturn mp.JoinFiltersDoNotDuplicateRightRows()\n}", "func (left *DataTable) LeftJoin(right *DataTable, on []JoinOn) (*DataTable, error) {\n\treturn newJoinImpl(leftJoin, []*DataTable{left, right}, on).Compute()\n}", "func JoinTableValues(cols ...string) string {\n\tcols = strings_.SliceTrimEmpty(cols...)\n\tif len(cols) == 0 {\n\t\t// https://dev.mysql.com/doc/refman/5.7/en/data-type-defaults.html\n\t\t// DEFAULT\n\t\treturn \"\"\n\t}\n\treturn strings.Join(TableValues(cols...), \",\")\n}", "func (sd *SelectDataset) joinTable(join exp.JoinExpression) *SelectDataset {\n\treturn sd.copy(sd.clauses.JoinsAppend(join))\n}", "func JoinDistil(dataStorage apiModel.DataStorage, joinLeft *JoinSpec, joinRight *JoinSpec, joinPairs []*JoinPair, joinType string, returnRaw bool) (string, *apiModel.FilteredData, error) {\n\tif !isValidJoinType(joinType) {\n\t\treturn \"\", nil, errors.Errorf(\"unsupported join type\")\n\t}\n\tisKey := false\n\tvarsLeftMapUpdated := mapDistilJoinVars(joinLeft.UpdatedVariables)\n\tvarsRightMapUpdated := mapDistilJoinVars(joinRight.UpdatedVariables)\n\tjoins := make([]*description.Join, len(joinPairs))\n\trightVars := make([]*model.Variable, len(joinPairs))\n\tfor i := range joinPairs {\n\t\tjoins[i] = &description.Join{\n\t\t\tLeft: varsLeftMapUpdated[joinPairs[i].Left],\n\t\t\tRight: varsRightMapUpdated[joinPairs[i].Right],\n\t\t\tAccuracy: joinPairs[i].Accuracy,\n\t\t\tAbsolute: joinPairs[i].AbsoluteAccuracy,\n\t\t}\n\t\trightVars[i] = varsRightMapUpdated[joinPairs[i].Right]\n\n\t\t// assume groupings are valid keys for the join\n\t\tif joins[i].Right.IsGrouping() {\n\t\t\tisKey = true\n\t\t}\n\t}\n\tvar err error\n\tif !isKey {\n\t\tisKey, err = dataStorage.IsKey(joinRight.DatasetID, joinRight.ExistingMetadata.StorageName, rightVars)\n\t\tif err != nil {\n\t\t\treturn \"\", nil, err\n\t\t}\n\t}\n\tif !isKey {\n\t\treturn \"\", nil, errors.Errorf(\"specified right join columns do not specify a unique key\")\n\t}\n\n\trightExcludes := generateRightExcludes(joinLeft.UpdatedVariables, joinRight.UpdatedVariables, joinPairs)\n\tjoinInfo := &description.JoinDescription{\n\t\tJoins: joins,\n\t\tLeftExcludes: []*model.Variable{},\n\t\tLeftVariables: joinLeft.UpdatedVariables,\n\t\tRightExcludes: rightExcludes,\n\t\tRightVariables: joinRight.UpdatedVariables,\n\t\tType: joinType,\n\t}\n\tpipelineDesc, err := description.CreateJoinPipeline(\"Joiner\", \"Join existing data\", joinInfo)\n\tif err != nil {\n\t\treturn \"\", nil, err\n\t}\n\n\tdatasetLeftURI := joinLeft.DatasetPath\n\tdatasetRightURI := joinRight.DatasetPath\n\n\treturn join(joinLeft, joinRight, \"\", pipelineDesc, []string{datasetLeftURI, datasetRightURI}, defaultSubmitter{}, returnRaw)\n}", "func (left *DataTable) InnerJoin(right *DataTable, on []JoinOn) (*DataTable, error) {\n\treturn newJoinImpl(innerJoin, []*DataTable{left, right}, on).Compute()\n}", "func (c hashChainer) chainBorderRight(seed []byte, proof [][]byte) []byte {\n\tfor _, h := range proof {\n\t\tseed = c.hasher.HashChildren(h, seed)\n\t}\n\treturn seed\n}", "func FullJoin(table, on string) QueryOption {\n\treturn newFuncQueryOption(func(wrapper *QueryWrapper) {\n\t\twrapper.joins = append(wrapper.joins, \"FULL\", \"JOIN\", table, \"ON\", on)\n\t\twrapper.queryLen += 5\n\t})\n}", "func JoinStringsReversed(separator string, stringArray ...string) string {\n\n\tvar buffer bytes.Buffer\n\n\tfor vi := len(stringArray) - 1; vi >= 0; vi-- {\n\t\tbuffer.WriteString(stringArray[vi])\n\t\tif vi > 0 {\n\t\t\tbuffer.WriteString(separator)\n\t\t}\n\t}\n\n\treturn buffer.String()\n\n}", "func (c hashChainer) chainInnerRight(seed []byte, proof [][]byte, index int64) []byte {\n\tfor i, h := range proof {\n\t\tif (index>>uint(i))&1 == 1 {\n\t\t\tseed = c.hasher.HashChildren(h, seed)\n\t\t}\n\t}\n\treturn seed\n}", "func InnerJoin(tables []*DataTable, on []JoinOn) (*DataTable, error) {\n\treturn newJoinImpl(innerJoin, tables, on).Compute()\n}", "func JoinTableColumns(table string, cols ...string) string {\n\t//cols = strings_.SliceTrimEmpty(cols...)\n\treturn strings.Join(TableColumns(table, cols...), \",\")\n}", "func (fn *formulaFuncs) RIGHT(argsList *list.List) formulaArg {\n\treturn fn.leftRight(\"RIGHT\", argsList)\n}", "func (m *Machine) Right() {\n\tfmt.Printf(\">> RIGHT\\n\")\n\t// If we're at the last position, then we need to expand our tape array:\n\tif m.position == (len(m.Tape) - 1) {\n\t\tsize := len(m.Tape)\n\t\tm.Tape = append(m.Tape, make([]Cell, size)...)\n\t}\n\n\tm.position += 1\n}", "func LeftJoin(tables []*DataTable, on []JoinOn) (*DataTable, error) {\n\treturn newJoinImpl(leftJoin, tables, on).Compute()\n}", "func (b *Builder) CrossJoin(joinTable, joinCond interface{}) *Builder {\n\treturn b.Join(\"CROSS\", joinTable, joinCond)\n}", "func (sd *SelectDataset) FullOuterJoin(table exp.Expression, condition exp.JoinCondition) *SelectDataset {\n\treturn sd.joinTable(exp.NewConditionedJoinExpression(exp.FullOuterJoinType, table, condition))\n}", "func (mp *JoinMultiplicity) JoinFiltersMatchAllRightRows() bool {\n\treturn mp.RightMultiplicity&MultiplicityPreservedVal != 0\n}", "func (b *Builder) CrossJoin(joinTable string, joinCond interface{}) *Builder {\r\n\treturn b.Join(\"CROSS\", joinTable, joinCond)\r\n}", "func RightOf(x ...interface{}) Either {\n\treturn newEither(true, x...)\n}", "func (tm *Term) FixRight() error {\n\ttm.FixCols++ // no obvious max\n\treturn tm.Draw()\n}", "func (mp *JoinMultiplicity) JoinFiltersDoNotDuplicateRightRows() bool {\n\treturn mp.RightMultiplicity&MultiplicityNotDuplicatedVal != 0\n}", "func JoinColumns(cols ...string) string {\n\treturn JoinTableColumns(\"\", cols...)\n}", "func (stmt *statement) LeftJoin(table, on string) Statement {\n\tstmt.join(\"LEFT JOIN \", table, on)\n\treturn stmt\n}", "func main() {\n\t// bigger table\n\tfmt.Printf(\"%X \\n\", JoinExample(\"./t/r0.tbl\", \"./t/r0.tbl\", []int{0}, []int{1})) // 767636031\n\tfmt.Printf(\"%X \\n\", JoinExample(\"./t/r0.tbl\", \"./t/r1.tbl\", []int{0}, []int{0})) // 49082128576\n\tfmt.Printf(\"%X \\n\", JoinExample(\"./t/r0.tbl\", \"./t/r1.tbl\", []int{1}, []int{1})) // 85306117839070\n\tfmt.Printf(\"%X \\n\", JoinExample(\"./t/r0.tbl\", \"./t/r2.tbl\", []int{0}, []int{0})) // 48860100254\n\tfmt.Printf(\"%X \\n\", JoinExample(\"./t/r0.tbl\", \"./t/r1.tbl\", []int{0, 1}, []int{0, 1})) //5552101\n\tfmt.Printf(\"%X \\n\", JoinExample(\"./t/r1.tbl\", \"./t/r2.tbl\", []int{0}, []int{0})) // 6331038719880\n\tfmt.Printf(\"%X \\n\", JoinExample(\"./t/r2.tbl\", \"./t/r2.tbl\", []int{0, 1}, []int{0, 1})) // 42056985375886\n}", "func RotateRight(t TermT, n uint32) TermT {\n\treturn TermT(C.yices_rotate_right(C.term_t(t), C.uint32_t(n)))\n}", "func buildJoin(joins []Join, baseTable string) string {\n\tif len(joins) == 0 {\n\t\treturn \"\"\n\t}\n\n\tjoin := \"\"\n\tfor _, j := range joins {\n\t\tjoin += fmt.Sprintf(\n\t\t\t\" JOIN %s ON (%s.%s %s %s.%s)\",\n\t\t\tquote(j.table),\n\t\t\tquote(baseTable),\n\t\t\tquote(j.on.field),\n\t\t\tstring(j.on.comparison),\n\t\t\tquote(j.table),\n\t\t\tquote(j.on.value.(string)),\n\t\t)\n\t}\n\treturn join\n}", "func (matrix Matrix4) Right() vector.Vector {\n\treturn vector.Vector{\n\t\tmatrix[0][0],\n\t\tmatrix[0][1],\n\t\tmatrix[0][2],\n\t}.Unit()\n}", "func (stmt *statement) Join(table, on string) Statement {\n\tstmt.join(\"JOIN \", table, on)\n\treturn stmt\n}", "func JoinTableColumnsValues(cmp string, table string, cols ...string) string {\n\t//cols = strings_.SliceTrimEmpty(cols...)\n\treturn strings.Join(TableColumnsValues(cmp, table, cols...), \",\")\n}", "func reverse(rs []*histRecord) {\n\tfor i := 0; i < len(rs)/2; i++ {\n\t\trs[i], rs[len(rs)-i-1] = rs[len(rs)-i-1], rs[i]\n\t}\n}", "func (sb *SQLBuilder) JoinRaw(join string, values ...interface{}) *SQLBuilder {\n\tvar buf strings.Builder\n\n\tbuf.WriteString(sb._join)\n\tif buf.Len() != 0 {\n\t\tbuf.WriteString(\" \")\n\t}\n\tbuf.WriteString(join)\n\n\tsb._join = buf.String()\n\n\tfor _, value := range values {\n\t\tsb._joinParams = append(sb._joinParams, value)\n\t}\n\n\treturn sb\n}", "func (suite *PopTestSuite) Truncate(tables []string) error {\n\t// Truncate the specified tables.\n\tfor _, table := range tables {\n\t\tsql := fmt.Sprintf(\"TRUNCATE TABLE %s CASCADE\", table)\n\t\tif err := suite.highPrivConn.RawQuery(sql).Exec(); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}", "func FillAliases(a *Aliases, tables []drivers.Table) {\n\tif a.Tables == nil {\n\t\ta.Tables = make(map[string]TableAlias)\n\t}\n\n\tfor _, t := range tables {\n\t\tif t.IsJoinTable {\n\t\t\tjt, ok := a.Tables[t.Name]\n\t\t\tif !ok {\n\t\t\t\ta.Tables[t.Name] = TableAlias{Relationships: make(map[string]RelationshipAlias)}\n\t\t\t} else if jt.Relationships == nil {\n\t\t\t\tjt.Relationships = make(map[string]RelationshipAlias)\n\t\t\t}\n\t\t\tcontinue\n\t\t}\n\n\t\ttable := a.Tables[t.Name]\n\n\t\tif len(table.UpPlural) == 0 {\n\t\t\ttable.UpPlural = strmangle.TitleCase(strmangle.Plural(t.Name))\n\t\t}\n\t\tif len(table.UpSingular) == 0 {\n\t\t\ttable.UpSingular = strmangle.TitleCase(strmangle.Singular(t.Name))\n\t\t}\n\t\tif len(table.DownPlural) == 0 {\n\t\t\ttable.DownPlural = strmangle.CamelCase(strmangle.Plural(t.Name))\n\t\t}\n\t\tif len(table.DownSingular) == 0 {\n\t\t\ttable.DownSingular = strmangle.CamelCase(strmangle.Singular(t.Name))\n\t\t}\n\n\t\tif table.Columns == nil {\n\t\t\ttable.Columns = make(map[string]string)\n\t\t}\n\t\tif table.Relationships == nil {\n\t\t\ttable.Relationships = make(map[string]RelationshipAlias)\n\t\t}\n\n\t\tfor _, c := range t.Columns {\n\t\t\tif _, ok := table.Columns[c.Name]; !ok {\n\t\t\t\ttable.Columns[c.Name] = strmangle.TitleCase(c.Name)\n\t\t\t}\n\n\t\t\tr, _ := utf8.DecodeRuneInString(table.Columns[c.Name])\n\t\t\tif unicode.IsNumber(r) {\n\t\t\t\ttable.Columns[c.Name] = \"C\" + table.Columns[c.Name]\n\t\t\t}\n\t\t}\n\n\t\ta.Tables[t.Name] = table\n\n\t\tfor _, k := range t.FKeys {\n\t\t\tr := table.Relationships[k.Name]\n\t\t\tif len(r.Local) != 0 && len(r.Foreign) != 0 {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tlocal, foreign := txtNameToOne(k)\n\t\t\tif len(r.Local) == 0 {\n\t\t\t\tr.Local = local\n\t\t\t}\n\t\t\tif len(r.Foreign) == 0 {\n\t\t\t\tr.Foreign = foreign\n\t\t\t}\n\n\t\t\ttable.Relationships[k.Name] = r\n\t\t}\n\n\t}\n\n\tfor _, t := range tables {\n\t\tif !t.IsJoinTable {\n\t\t\tcontinue\n\t\t}\n\n\t\ttable := a.Tables[t.Name]\n\n\t\tlhs := t.FKeys[0]\n\t\trhs := t.FKeys[1]\n\n\t\tlhsAlias, lhsOK := table.Relationships[lhs.Name]\n\t\trhsAlias, rhsOK := table.Relationships[rhs.Name]\n\n\t\tif lhsOK && len(lhsAlias.Local) != 0 && len(lhsAlias.Foreign) != 0 &&\n\t\t\trhsOK && len(rhsAlias.Local) != 0 && len(rhsAlias.Foreign) != 0 {\n\t\t\tcontinue\n\t\t}\n\n\t\t// Here we actually reverse the meaning of local/foreign to be\n\t\t// consistent with the way normal one-to-many relationships are done.\n\t\t// That's to say local = the side with the foreign key. Now in a many-to-many\n\t\t// if we were able to not have a join table our foreign key say \"videos_id\"\n\t\t// would be on the tags table. Hence the relationships should look like:\n\t\t// videos_tags.relationships.fk_video_id.local = \"Tags\"\n\t\t// videos_tags.relationships.fk_video_id.foreign = \"Videos\"\n\t\t// Consistent, yes. Confusing? Also yes.\n\n\t\tlhsName, rhsName := txtNameToMany(lhs, rhs)\n\n\t\tif len(lhsAlias.Local) != 0 {\n\t\t\trhsName = lhsAlias.Local\n\t\t} else if len(rhsAlias.Local) != 0 {\n\t\t\tlhsName = rhsAlias.Local\n\t\t}\n\n\t\tif len(lhsAlias.Foreign) != 0 {\n\t\t\tlhsName = lhsAlias.Foreign\n\t\t} else if len(rhsAlias.Foreign) != 0 {\n\t\t\trhsName = rhsAlias.Foreign\n\t\t}\n\n\t\tif len(lhsAlias.Local) == 0 {\n\t\t\tlhsAlias.Local = rhsName\n\t\t}\n\t\tif len(lhsAlias.Foreign) == 0 {\n\t\t\tlhsAlias.Foreign = lhsName\n\t\t}\n\t\tif len(rhsAlias.Local) == 0 {\n\t\t\trhsAlias.Local = lhsName\n\t\t}\n\t\tif len(rhsAlias.Foreign) == 0 {\n\t\t\trhsAlias.Foreign = rhsName\n\t\t}\n\n\t\ttable.Relationships[lhs.Name] = lhsAlias\n\t\ttable.Relationships[rhs.Name] = rhsAlias\n\t}\n}", "func Right(arr []int, n int) {\n\tmisc.Reverse(arr[n:])\n\tmisc.Reverse(arr[:n])\n\tmisc.Reverse(arr[:])\n}", "func mergeTable(table1 *Table, table2 Table) {\n\tfor _, column := range table2.columns {\n\t\tif containsString(convertColumnsToColumnNames(table1.columns), column.name) {\n\t\t\ttable1.columns = append(table1.columns, column)\n\t\t}\n\t}\n\n\tfor _, index := range table2.indexes {\n\t\tif containsString(convertIndexesToIndexNames(table1.indexes), index.name) {\n\t\t\ttable1.indexes = append(table1.indexes, index)\n\t\t}\n\t}\n}", "func (c *Client) ZDORoutingTable(ctx context.Context, dstAddr uint16, startIndex uint8) error {\n\tdataIn := NewBuffer(nil)\n\tdataIn.WriteUint16(dstAddr) // dstAddr\n\tdataIn.WriteUint8(startIndex) // StartIndex\n\n\tresponse, err := c.CallWithResultSREQ(ctx, dataIn.Frame(0x25, 0x32))\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tdataOut := response.DataAsBuffer()\n\tif dataOut.Len() == 0 {\n\t\treturn errors.New(\"failure\")\n\t}\n\n\tif status := dataOut.ReadCommandStatus(); status != CommandStatusSuccess {\n\t\treturn status\n\t}\n\n\treturn nil\n}", "func ColumnRight(name string) {\n\tidx := colIndex(name)\n\tif idx < len(GlobalColumns)-1 {\n\t\tswapCols(idx, idx+1)\n\t}\n}", "func (w *Wrapper) saveJoin(table interface{}, typ string, condition string) {\n\tswitch v := table.(type) {\n\t// Sub query joining.\n\tcase *Wrapper:\n\t\tw.joins[v.query] = &join{\n\t\t\ttyp: typ,\n\t\t\ttable: table,\n\t\t\tcondition: condition,\n\t\t}\n\t// Common table joining.\n\tcase string:\n\t\tw.joins[v] = &join{\n\t\t\ttyp: typ,\n\t\t\ttable: table,\n\t\t\tcondition: condition,\n\t\t}\n\t}\n}", "func (t *ProcessTable) Underlying() sql.Table {\n\treturn t.Table\n}", "func (n *Node) rotateRight(c *Node) {\n\tl := c.Left\n\tc.Left = l.Right\n\tl.Right = c\n\tif c == n.Left {\n\t\tn.Left = l\n\t} else {\n\t\tn.Right = l\n\t}\n\tc.bal = 0\n\tl.bal = 0\n}", "func (tm *Term) ScrollRight() error {\n\ttm.ColSt++ // no obvious max\n\treturn tm.Draw()\n}", "func (w *Wrapper) InnerJoin(table interface{}, condition string) *Wrapper {\n\tw.saveJoin(table, \"INNER JOIN\", condition)\n\treturn w\n}", "func (b *TestDriver) RightFlip() (err error) {\n\tb.Publish(Rolling, true)\n\treturn nil\n}", "func (a *PixelSubArray) flipAllRight(x, y int) {\n\txBit := uint(x % 8)\n\txByte := x/8 - a.xStartByte\n\tyRow := y - a.yStart\n\n\tfor i := xBit; i < 8; i++ {\n\t\ta.bytes[yRow][xByte] ^= (1 << i)\n\t}\n\n\tfor i := xByte + 1; i < len(a.bytes[0]); i++ {\n\t\ta.bytes[yRow][i] ^= 0xFF\n\t}\n}", "func PadRight(s string, padStr string, lenStr int) string {\n\tvar padCount int\n\tpadCount = I.MaxOf(lenStr-len(s), 0)\n\treturn s + strings.Repeat(padStr, padCount)\n}", "func (left *DataTable) OuterJoin(right *DataTable, on []JoinOn) (*DataTable, error) {\n\treturn newJoinImpl(outerJoin, []*DataTable{left, right}, on).Compute()\n}", "func joinCols(cols []string) string {\n\treturn fmt.Sprintf(\"| %s |\", strings.Join(cols, \" | \"))\n}", "func NewFullJoin(table string) JoinQuery {\n\treturn NewFullJoinOn(table, \"\", \"\")\n}", "func (c *DBCleaner) TruncateTablesExclude(excludedTables ...string) error {\n\ttables, err := c.getTables()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\thelper, err := FindHelper(c.driver)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\ttables = utils.SubtractStringArray(tables, excludedTables)\n\n\tvar wg sync.WaitGroup\n\twg.Add(len(tables))\n\n\tfor _, table := range tables {\n\t\tgo func(tbl string) {\n\t\t\tcmd := helper.TruncateTableCommand(tbl)\n\t\t\tif _, err := c.db.Exec(cmd); err != nil {\n\t\t\t\tlog.Fatalf(\"Failed to truncate table %s. Error: %s\", tbl, err.Error())\n\t\t\t}\n\t\t\twg.Done()\n\t\t}(table)\n\t}\n\n\twg.Wait()\n\n\treturn nil\n}", "func (l *Label) rotateRight(g *Graph) uint16 {\n Assert(nilLabel, l != nil)\n Assert(nilGraph, g != nil)\n Assert(nilLabelStore, g.labelStore != nil)\n Assert(nilLabelWriteMap, g.labelStore.writes != nil)\n \n // perform the rotation\n left, _ := l.left(g) // TODO do not ignore error\n l.l = left.r\n left.r = l.Id\n \n\n l.setHeight(g)\n left.setHeight(g)\n \n // make sure the changes are written\n g.labelStore.writes[l.Id] = l\n g.labelStore.writes[left.Id] = left\n \n return left.Id\n}", "func rotateRight(n *rbnode) *rbnode {\n\tif n.left == nil {\n\t\treturn n\n\t}\n\tl := n.left\n\tconnectLeft(n, l.right)\n\treplaceChild(n, l)\n\tconnectRight(l, n)\n\tn.c, l.c = l.c, n.c\n\treturn l\n}", "func (sd *SelectDataset) CrossJoin(table exp.Expression) *SelectDataset {\n\treturn sd.joinTable(exp.NewUnConditionedJoinExpression(exp.CrossJoinType, table))\n}", "func (this *Tuple) Right(n int) *Tuple {\n\tlength := this.Len()\n\tn = max(0, length-n)\n\treturn this.Slice(n, length)\n}", "func RightPad(s string, padStr string, overallLen int) string {\n\tvar padCountInt = 1 + ((overallLen - len(padStr)) / len(padStr))\n\tvar retStr = s + strings.Repeat(padStr, padCountInt)\n\treturn retStr[:overallLen]\n}", "func (s *State) RotateRight() {\n\tif s.robotLost {\n\t\treturn\n\t}\n\tswitch s.direction {\n\tcase North:\n\t\ts.direction = East\n\t\tbreak\n\tcase South:\n\t\ts.direction = West\n\t\tbreak\n\tcase West:\n\t\ts.direction = North\n\t\tbreak\n\tcase East:\n\t\ts.direction = South\n\t\tbreak\n\t}\n}", "func ExampleTable() {\n\tuser := q.T(\"user\", \"usr\")\n\tpost := q.T(\"post\", \"pst\")\n\t// user.id -> post.user_id\n\tuser.InnerJoin(post, q.Eq(user.C(\"id\"), post.C(\"user_id\")))\n\tfmt.Println(\"Short:\", user)\n\n\tpostTag := q.T(\"posttag\", \"rel\")\n\ttag := q.T(\"tag\", \"tg\")\n\t// post.id -> posttag.post_id\n\tpost.InnerJoin(postTag, q.Eq(post.C(\"id\"), postTag.C(\"post_id\")))\n\t// posttag.tag_id -> tag.id\n\tpostTag.InnerJoin(tag, q.Eq(postTag.C(\"tag_id\"), tag.C(\"id\")))\n\tfmt.Println(\"Long: \", user)\n\t// Output:\n\t// Short: \"user\" AS \"usr\" INNER JOIN \"post\" AS \"pst\" ON \"usr\".\"id\" = \"pst\".\"user_id\" []\n\t// Long: \"user\" AS \"usr\" INNER JOIN (\"post\" AS \"pst\" INNER JOIN (\"posttag\" AS \"rel\" INNER JOIN \"tag\" AS \"tg\" ON \"rel\".\"tag_id\" = \"tg\".\"id\") ON \"pst\".\"id\" = \"rel\".\"post_id\") ON \"usr\".\"id\" = \"pst\".\"user_id\" []\n}", "func (this *Dao) Join (joinType string, joinTable string, joinOn string) *Dao {\n\tjoin := fmt.Sprintf(\"%s JOIN %s ON %s\", strings.ToUpper(joinType), _table(joinTable), joinOn)\n\n\tthis.queryJoins = append(this.queryJoins, join)\n\treturn this\n}", "func (sd *SelectDataset) NaturalFullJoin(table exp.Expression) *SelectDataset {\n\treturn sd.joinTable(exp.NewUnConditionedJoinExpression(exp.NaturalFullJoinType, table))\n}", "func (w *Wrapper) JoinOrWhere(table interface{}, args ...interface{}) *Wrapper {\n\tw.saveJoinCondition(\"OR\", table, args...)\n\treturn w\n}", "func (sd *SelectDataset) Join(table exp.Expression, condition exp.JoinCondition) *SelectDataset {\n\treturn sd.InnerJoin(table, condition)\n}", "func (w *Wrapper) LeftJoin(table interface{}, condition string) *Wrapper {\n\tw.saveJoin(table, \"LEFT JOIN\", condition)\n\treturn w\n}", "func NewFullJoin(table string, filter ...FilterQuery) JoinQuery {\n\treturn NewFullJoinOn(table, \"\", \"\", filter...)\n}", "func rightPad(s string, padStr string, pLen int) string {\n\treturn s + strings.Repeat(padStr, pLen)\n}", "func JoinDatamart(joinLeft *JoinSpec, joinRight *JoinSpec, rightOrigin *model.DatasetOrigin) (string, *apiModel.FilteredData, error) {\n\tpipelineDesc, err := description.CreateDatamartAugmentPipeline(\"Join Preview\",\n\t\t\"Join to be reviewed by user\", rightOrigin.SearchResult, rightOrigin.Provenance)\n\tif err != nil {\n\t\treturn \"\", nil, err\n\t}\n\tdatasetLeftURI := env.ResolvePath(joinLeft.DatasetSource, joinLeft.DatasetPath)\n\n\treturn join(joinLeft, joinRight, \"\", pipelineDesc, []string{datasetLeftURI}, defaultSubmitter{}, false)\n}", "func (w *Wrapper) buildJoin() (query string) {\n\tif len(w.joins) == 0 {\n\t\treturn\n\t}\n\n\tfor _, v := range w.joins {\n\t\t// The join type (ex: LEFT JOIN, RIGHT JOIN, INNER JOIN).\n\t\tquery += fmt.Sprintf(\"%s \", v.typ)\n\t\tswitch d := v.table.(type) {\n\t\t// Sub query.\n\t\tcase *Wrapper:\n\t\t\tquery += fmt.Sprintf(\"%s AS %s ON \", w.bindParam(d), d.alias)\n\t\t// Table name.\n\t\tcase string:\n\t\t\tquery += fmt.Sprintf(\"%s ON \", d)\n\t\t}\n\n\t\tif len(v.conditions) == 0 {\n\t\t\tquery += fmt.Sprintf(\"(%s) \", v.condition)\n\t\t} else {\n\t\t\tconditionsQuery := strings.TrimSpace(w.buildConditions(v.conditions))\n\t\t\tquery += fmt.Sprintf(\"(%s %s %s) \", v.condition, v.conditions[0].connector, conditionsQuery)\n\t\t}\n\t}\n\treturn\n}", "func (q Quat) Right() Vec3f {\n\treturn q.RotateVec(Vec3f{1, 0, 0})\n}", "func (llrb *LLRB) moveredright(nd *Llrbnode) *Llrbnode {\n\tllrb.flip(nd)\n\tif nd.left.left.isred() {\n\t\tnd = llrb.rotateright(nd)\n\t\tllrb.flip(nd)\n\t}\n\treturn nd\n}", "func JoinTableColumnsWithAs(table string, cols ...string) string {\n\t//cols = strings_.SliceTrimEmpty(cols...)\n\treturn strings.Join(ExpandAsColumns(TableColumns(table, cols...)...), \",\")\n}", "func (root *mTreap) rotateRight(y *treapNode) {\n\t// p -> (y (x a b) c)\n\tp := y.parent\n\tx, c := y.left, y.right\n\ta, b := x.left, x.right\n\n\tx.left = a\n\tif a != nil {\n\t\ta.parent = x\n\t}\n\tx.right = y\n\ty.parent = x\n\ty.left = b\n\tif b != nil {\n\t\tb.parent = y\n\t}\n\ty.right = c\n\tif c != nil {\n\t\tc.parent = y\n\t}\n\n\tx.parent = p\n\tif p == nil {\n\t\troot.treap = x\n\t} else if p.left == y {\n\t\tp.left = x\n\t} else {\n\t\tif p.right != y {\n\t\t\tthrow(\"large span treap rotateRight\")\n\t\t}\n\t\tp.right = x\n\t}\n\n\ty.updateInvariants()\n\tx.updateInvariants()\n}", "func ShiftRight0(t TermT, n uint32) TermT {\n\treturn TermT(C.yices_shift_right0(C.term_t(t), C.uint32_t(n)))\n}", "func (e unionNodeEmitDistinct) emitRight(b []byte) bool {\n\t_, ok := e[string(b)]\n\te[string(b)] = 1\n\treturn !ok\n}", "func PadRight(str string, padStr string, padLen int) string {\n\treturn buildPadStr(str, padStr, padLen, false, true)\n}", "func ReverseCreateTableStmts(nodes interface{ DDLNodes() []ast.DDLNode }) string {\n\tvar buffer = bytes.NewBuffer(nil)\n\tfor _, ddl := range nodes.DDLNodes() {\n\t\tif create, ok := ddl.(*ast.CreateTableStmt); ok {\n\t\t\tname := create.Table.Name.String()\n\t\t\tbuffer.WriteString(\"DROP TABLE IF EXISTS \" + name + \";\\n\")\n\t\t}\n\t}\n\n\treturn buffer.String()\n}", "func (t *Table) LeftJoinRaw(offset int32, count int, crit string) ([]byte, error) {\n\tp := \"https://%s/api/getLeftJoin.sjs?json&object=%s&limit=%d,%d\"\n\tx := fmt.Sprintf(p, t.Host, t.Name, offset, count)\n\tif len(crit) != 0 {\n\t\tx = x + \"&condition=\" + FixCrit(crit)\n\t}\n\t_, body, err := t.Get(x)\n\treturn body, err\n}", "func (s *BasePlSqlParserListener) ExitTable_ref_aux_internal_two(ctx *Table_ref_aux_internal_twoContext) {\n}" ]
[ "0.78055537", "0.7420944", "0.741076", "0.7111573", "0.7041436", "0.69916314", "0.6970857", "0.68913996", "0.68234277", "0.6683218", "0.6154301", "0.61236024", "0.61003006", "0.6057328", "0.6012464", "0.6009745", "0.5808238", "0.5495906", "0.53983474", "0.51418346", "0.5133363", "0.49576372", "0.4944325", "0.48702252", "0.4838444", "0.48095226", "0.48031813", "0.47838217", "0.4767201", "0.4741974", "0.47074887", "0.46650997", "0.46540526", "0.46428844", "0.46326226", "0.46293697", "0.45576698", "0.45526305", "0.4548451", "0.45047998", "0.45012274", "0.4484232", "0.4474799", "0.4472152", "0.44656923", "0.44632703", "0.44494006", "0.44492993", "0.4441505", "0.4398289", "0.4392055", "0.4391623", "0.43903705", "0.43730345", "0.43599296", "0.43435553", "0.43119723", "0.43106875", "0.42892152", "0.42738503", "0.42705652", "0.42681408", "0.42587444", "0.42517373", "0.42468762", "0.4234652", "0.42278534", "0.42270875", "0.4220499", "0.42113864", "0.4209405", "0.42015657", "0.4201148", "0.4198487", "0.41953966", "0.4193562", "0.4181252", "0.41803285", "0.41750956", "0.41626787", "0.41586095", "0.41517907", "0.41420028", "0.41342986", "0.413158", "0.41264957", "0.41249332", "0.41125312", "0.4111617", "0.40994588", "0.40973997", "0.40965348", "0.40727863", "0.40720174", "0.40688533", "0.4051072", "0.40476036", "0.40434256", "0.40376475", "0.4032858" ]
0.7857376
0
OuterJoin returns all records when there is a match in either left or right table OuterJoin transforms an expr column to a raw column
func (left *DataTable) OuterJoin(right *DataTable, on []JoinOn) (*DataTable, error) { return newJoinImpl(outerJoin, []*DataTable{left, right}, on).Compute() }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (self Accessor) OuterJoin(expr interface{}) *SelectManager {\n\treturn self.From(self.Relation()).OuterJoin(expr)\n}", "func OuterJoin(tables []*DataTable, on []JoinOn) (*DataTable, error) {\n\treturn newJoinImpl(outerJoin, tables, on).Compute()\n}", "func OuterJoin(lx, rx reflect.Value) reflect.Value {\n\tljoin := LeftJoin(lx, rx)\n\trjoin := RightJoin(lx, rx)\n\n\tresult := reflect.MakeSlice(reflect.SliceOf(lx.Type().Elem()), ljoin.Len()+rjoin.Len(), ljoin.Len()+rjoin.Len())\n\tfor i := 0; i < ljoin.Len(); i++ {\n\t\tresult.Index(i).Set(ljoin.Index(i))\n\t}\n\tfor i := 0; i < rjoin.Len(); i++ {\n\t\tresult.Index(ljoin.Len() + i).Set(rjoin.Index(i))\n\t}\n\n\treturn result\n}", "func (mySelf SQLJoin) Outer() SQLJoin {\n\tmySelf.outer = true\n\treturn mySelf\n}", "func (sd *SelectDataset) LeftOuterJoin(table exp.Expression, condition exp.JoinCondition) *SelectDataset {\n\treturn sd.joinTable(exp.NewConditionedJoinExpression(exp.LeftOuterJoinType, table, condition))\n}", "func (sd *SelectDataset) FullOuterJoin(table exp.Expression, condition exp.JoinCondition) *SelectDataset {\n\treturn sd.joinTable(exp.NewConditionedJoinExpression(exp.FullOuterJoinType, table, condition))\n}", "func (s *BasePlSqlParserListener) EnterOuter_join_sign(ctx *Outer_join_signContext) {}", "func (t *Table) LeftJoinRaw(offset int32, count int, crit string) ([]byte, error) {\n\tp := \"https://%s/api/getLeftJoin.sjs?json&object=%s&limit=%d,%d\"\n\tx := fmt.Sprintf(p, t.Host, t.Name, offset, count)\n\tif len(crit) != 0 {\n\t\tx = x + \"&condition=\" + FixCrit(crit)\n\t}\n\t_, body, err := t.Get(x)\n\treturn body, err\n}", "func (s *BasePlSqlParserListener) EnterOuter_join_type(ctx *Outer_join_typeContext) {}", "func (h *joinPlanningHelper) remapOnExpr(\n\tplanCtx *PlanningCtx, onCond tree.TypedExpr,\n) (execinfrapb.Expression, error) {\n\tif onCond == nil {\n\t\treturn execinfrapb.Expression{}, nil\n\t}\n\n\tjoinColMap := make([]int, h.numLeftOutCols+h.numRightOutCols)\n\tidx := 0\n\tleftCols := 0\n\tfor i := 0; i < h.numLeftOutCols; i++ {\n\t\tjoinColMap[idx] = h.leftPlanToStreamColMap[i]\n\t\tif h.leftPlanToStreamColMap[i] != -1 {\n\t\t\tleftCols++\n\t\t}\n\t\tidx++\n\t}\n\tfor i := 0; i < h.numRightOutCols; i++ {\n\t\tjoinColMap[idx] = leftCols + h.rightPlanToStreamColMap[i]\n\t\tidx++\n\t}\n\n\treturn physicalplan.MakeExpression(onCond, planCtx, joinColMap)\n}", "func (self Accessor) InnerJoin(expr interface{}) *SelectManager {\n\treturn self.From(self.Relation()).InnerJoin(expr)\n}", "func (stmt *statement) LeftJoin(table, on string) Statement {\n\tstmt.join(\"LEFT JOIN \", table, on)\n\treturn stmt\n}", "func (w *Wrapper) LeftJoin(table interface{}, condition string) *Wrapper {\n\tw.saveJoin(table, \"LEFT JOIN\", condition)\n\treturn w\n}", "func (sd *SelectDataset) NaturalLeftJoin(table exp.Expression) *SelectDataset {\n\treturn sd.joinTable(exp.NewUnConditionedJoinExpression(exp.NaturalLeftJoinType, table))\n}", "func (t *Table) LeftJoin(offset int32, count int, crit string, target interface{}) error {\n\tbody, err := t.LeftJoinRaw(offset, count, crit)\n\tif err == nil {\n\t\terr = json.Unmarshal(body, &target)\n\t}\n\treturn err\n}", "func (w *Wrapper) NaturalJoin(table interface{}, condition string) *Wrapper {\n\tw.saveJoin(table, \"NATURAL JOIN\", condition)\n\treturn w\n}", "func JoinWithLeftAssociativeOp(op OpCode, a Expr, b Expr) Expr {\n\t// \"(a, b) op c\" => \"a, b op c\"\n\tif comma, ok := a.Data.(*EBinary); ok && comma.Op == BinOpComma {\n\t\tcomma.Right = JoinWithLeftAssociativeOp(op, comma.Right, b)\n\t\treturn a\n\t}\n\n\t// \"a op (b op c)\" => \"(a op b) op c\"\n\t// \"a op (b op (c op d))\" => \"((a op b) op c) op d\"\n\tif binary, ok := b.Data.(*EBinary); ok && binary.Op == op {\n\t\treturn JoinWithLeftAssociativeOp(\n\t\t\top,\n\t\t\tJoinWithLeftAssociativeOp(op, a, binary.Left),\n\t\t\tbinary.Right,\n\t\t)\n\t}\n\n\t// \"a op b\" => \"a op b\"\n\t// \"(a op b) op c\" => \"(a op b) op c\"\n\treturn Expr{Loc: a.Loc, Data: &EBinary{Op: op, Left: a, Right: b}}\n}", "func (o *outerJoinEliminator) tryToEliminateOuterJoin(p *LogicalJoin, aggCols []*expression.Column, parentCols []*expression.Column, opt *logicalOptimizeOp) (LogicalPlan, bool, error) {\n\tvar innerChildIdx int\n\tswitch p.JoinType {\n\tcase LeftOuterJoin:\n\t\tinnerChildIdx = 1\n\tcase RightOuterJoin:\n\t\tinnerChildIdx = 0\n\tdefault:\n\t\treturn p, false, nil\n\t}\n\n\touterPlan := p.children[1^innerChildIdx]\n\tinnerPlan := p.children[innerChildIdx]\n\touterUniqueIDs := set.NewInt64Set()\n\tfor _, outerCol := range outerPlan.Schema().Columns {\n\t\touterUniqueIDs.Insert(outerCol.UniqueID)\n\t}\n\tmatched := IsColsAllFromOuterTable(parentCols, outerUniqueIDs)\n\tif !matched {\n\t\treturn p, false, nil\n\t}\n\t// outer join elimination with duplicate agnostic aggregate functions\n\tmatched = IsColsAllFromOuterTable(aggCols, outerUniqueIDs)\n\tif matched {\n\t\tappendOuterJoinEliminateAggregationTraceStep(p, outerPlan, aggCols, opt)\n\t\treturn outerPlan, true, nil\n\t}\n\t// outer join elimination without duplicate agnostic aggregate functions\n\tinnerJoinKeys := o.extractInnerJoinKeys(p, innerChildIdx)\n\tcontain, err := o.isInnerJoinKeysContainUniqueKey(innerPlan, innerJoinKeys)\n\tif err != nil {\n\t\treturn p, false, err\n\t}\n\tif contain {\n\t\tappendOuterJoinEliminateTraceStep(p, outerPlan, parentCols, innerJoinKeys, opt)\n\t\treturn outerPlan, true, nil\n\t}\n\tcontain, err = o.isInnerJoinKeysContainIndex(innerPlan, innerJoinKeys)\n\tif err != nil {\n\t\treturn p, false, err\n\t}\n\tif contain {\n\t\tappendOuterJoinEliminateTraceStep(p, outerPlan, parentCols, innerJoinKeys, opt)\n\t\treturn outerPlan, true, nil\n\t}\n\n\treturn p, false, nil\n}", "func (s *BaseMySqlParserListener) EnterOuterJoin(ctx *OuterJoinContext) {}", "func ExtractJoinEqualityColumns(\n\tleftCols, rightCols opt.ColSet, on FiltersExpr,\n) (leftEq opt.ColList, rightEq opt.ColList) {\n\tfor i := range on {\n\t\tcondition := on[i].Condition\n\t\tok, left, right := ExtractJoinEquality(leftCols, rightCols, condition)\n\t\tif !ok {\n\t\t\tcontinue\n\t\t}\n\t\t// Don't allow any column to show up twice.\n\t\t// TODO(radu): need to figure out the right thing to do in cases\n\t\t// like: left.a = right.a AND left.a = right.b\n\t\tduplicate := false\n\t\tfor i := range leftEq {\n\t\t\tif leftEq[i] == left || rightEq[i] == right {\n\t\t\t\tduplicate = true\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t\tif !duplicate {\n\t\t\tleftEq = append(leftEq, left)\n\t\t\trightEq = append(rightEq, right)\n\t\t}\n\t}\n\treturn leftEq, rightEq\n}", "func ExtractJoinEqualityFilter(\n\tleftCol, rightCol opt.ColumnID, leftCols, rightCols opt.ColSet, on FiltersExpr,\n) FiltersItem {\n\tfor i := range on {\n\t\tcondition := on[i].Condition\n\t\tok, left, right := ExtractJoinEquality(leftCols, rightCols, condition)\n\t\tif !ok {\n\t\t\tcontinue\n\t\t}\n\t\tif left == leftCol && right == rightCol {\n\t\t\treturn on[i]\n\t\t}\n\t}\n\tpanic(errors.AssertionFailedf(\"could not find equality between columns %d and %d in filters %s\",\n\t\tleftCol, rightCol, on.String(),\n\t))\n}", "func (sd *SelectDataset) NaturalJoin(table exp.Expression) *SelectDataset {\n\treturn sd.joinTable(exp.NewUnConditionedJoinExpression(exp.NaturalJoinType, table))\n}", "func TestAggregateLeftJoin(t *testing.T) {\n\tmcmp, closer := start(t)\n\tdefer closer()\n\n\tmcmp.Exec(\"insert into t1(t1_id, name, value, shardKey) values (11, 'r', 'r', 1), (3, 'r', 'r', 0)\")\n\tmcmp.Exec(\"insert into t2(id, shardKey) values (11, 1)\")\n\n\tmcmp.AssertMatchesNoOrder(\"SELECT t1.shardkey FROM t1 LEFT JOIN t2 ON t1.t1_id = t2.id\", `[[INT64(1)] [INT64(0)]]`)\n\tmcmp.AssertMatches(\"SELECT count(t1.shardkey) FROM t1 LEFT JOIN t2 ON t1.t1_id = t2.id\", `[[INT64(2)]]`)\n\tmcmp.AssertMatches(\"SELECT count(t2.shardkey) FROM t1 LEFT JOIN t2 ON t1.t1_id = t2.id\", `[[INT64(1)]]`)\n\tmcmp.AssertMatches(\"SELECT count(*) FROM t1 LEFT JOIN t2 ON t1.t1_id = t2.id\", `[[INT64(2)]]`)\n\tmcmp.AssertMatches(\"SELECT sum(t1.shardkey) FROM t1 LEFT JOIN t2 ON t1.t1_id = t2.id\", `[[DECIMAL(1)]]`)\n\tmcmp.AssertMatches(\"SELECT sum(t2.shardkey) FROM t1 LEFT JOIN t2 ON t1.t1_id = t2.id\", `[[DECIMAL(1)]]`)\n\tmcmp.AssertMatches(\"SELECT count(*) FROM t2 LEFT JOIN t1 ON t1.t1_id = t2.id WHERE IFNULL(t1.name, 'NOTSET') = 'r'\", `[[INT64(1)]]`)\n}", "func LeftJoin(tables []*DataTable, on []JoinOn) (*DataTable, error) {\n\treturn newJoinImpl(leftJoin, tables, on).Compute()\n}", "func (q Query) Join(inner Query,\n\touterKeySelector func(interface{}) interface{},\n\tinnerKeySelector func(interface{}) interface{},\n\tresultSelector func(outer interface{}, inner interface{}) interface{}) Query {\n\n\treturn Query{\n\t\tIterate: func() Iterator {\n\t\t\touternext := q.Iterate()\n\t\t\tinnernext := inner.Iterate()\n\n\t\t\tinnerLookup := make(map[interface{}][]interface{})\n\t\t\tfor innerItem, ok := innernext(); ok; innerItem, ok = innernext() {\n\t\t\t\tinnerKey := innerKeySelector(innerItem)\n\t\t\t\tinnerLookup[innerKey] = append(innerLookup[innerKey], innerItem)\n\t\t\t}\n\n\t\t\tvar outerItem interface{}\n\t\t\tvar innerGroup []interface{}\n\t\t\tinnerLen, innerIndex := 0, 0\n\n\t\t\treturn func() (item interface{}, ok bool) {\n\t\t\t\tif innerIndex >= innerLen {\n\t\t\t\t\thas := false\n\t\t\t\t\tfor !has {\n\t\t\t\t\t\touterItem, ok = outernext()\n\t\t\t\t\t\tif !ok {\n\t\t\t\t\t\t\treturn\n\t\t\t\t\t\t}\n\n\t\t\t\t\t\tinnerGroup, has = innerLookup[outerKeySelector(outerItem)]\n\t\t\t\t\t\tinnerLen = len(innerGroup)\n\t\t\t\t\t\tinnerIndex = 0\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t\titem = resultSelector(outerItem, innerGroup[innerIndex])\n\t\t\t\tinnerIndex++\n\t\t\t\treturn item, true\n\t\t\t}\n\t\t},\n\t}\n}", "func ExtractJoinEquality(\n\tleftCols, rightCols opt.ColSet, condition opt.ScalarExpr,\n) (ok bool, left, right opt.ColumnID) {\n\tlvar, rvar, ok := isVarEquality(condition)\n\tif !ok {\n\t\treturn false, 0, 0\n\t}\n\n\t// Don't allow mixed types (see #22519).\n\tif !lvar.DataType().Equivalent(rvar.DataType()) {\n\t\treturn false, 0, 0\n\t}\n\n\tif leftCols.Contains(lvar.Col) && rightCols.Contains(rvar.Col) {\n\t\treturn true, lvar.Col, rvar.Col\n\t}\n\tif leftCols.Contains(rvar.Col) && rightCols.Contains(lvar.Col) {\n\t\treturn true, rvar.Col, lvar.Col\n\t}\n\n\treturn false, 0, 0\n}", "func (mp *JoinMultiplicity) JoinPreservesLeftRows(op opt.Operator) bool {\n\tswitch op {\n\tcase opt.InnerJoinOp, opt.SemiJoinOp:\n\t\tbreak\n\n\tcase opt.LeftJoinOp, opt.FullJoinOp:\n\t\treturn true\n\n\tdefault:\n\t\tpanic(errors.AssertionFailedf(\"unsupported operator: %v\", op))\n\t}\n\treturn mp.JoinFiltersMatchAllLeftRows()\n}", "func (t *TableExpr) JoinSQL() string {\n\tif t.JoinConditions != \"\" && t.JoinType != \"\" {\n\t\treturn \" \" + t.JoinType + \" \" + t.Table.SQL() + \" on \" +\n\t\t\tt.JoinConditions\n\t}\n\treturn \", \" + t.Table.SQL()\n}", "func (left *DataTable) LeftJoin(right *DataTable, on []JoinOn) (*DataTable, error) {\n\treturn newJoinImpl(leftJoin, []*DataTable{left, right}, on).Compute()\n}", "func (w *Wrapper) InnerJoin(table interface{}, condition string) *Wrapper {\n\tw.saveJoin(table, \"INNER JOIN\", condition)\n\treturn w\n}", "func TestJoinTableSqlBuilder(t *testing.T) {\n\tmock := NewMockOptimizer(false)\n\n\t// should pass\n\tsqls := []string{\n\t\t\"SELECT N_NAME,N_REGIONKEY FROM NATION join REGION on NATION.N_REGIONKEY = REGION.R_REGIONKEY\",\n\t\t\"SELECT N_NAME, N_REGIONKEY FROM NATION join REGION on NATION.N_REGIONKEY = REGION.R_REGIONKEY WHERE NATION.N_REGIONKEY > 0\",\n\t\t\"SELECT N_NAME, NATION2.R_REGIONKEY FROM NATION2 join REGION using(R_REGIONKEY) WHERE NATION2.R_REGIONKEY > 0\",\n\t\t\"SELECT N_NAME, NATION2.R_REGIONKEY FROM NATION2 NATURAL JOIN REGION WHERE NATION2.R_REGIONKEY > 0\",\n\t\t\"SELECT N_NAME FROM NATION NATURAL JOIN REGION\", //have no same column name but it's ok\n\t\t\"SELECT N_NAME,N_REGIONKEY FROM NATION a join REGION b on a.N_REGIONKEY = b.R_REGIONKEY WHERE a.N_REGIONKEY > 0\", //test alias\n\t\t\"SELECT l.L_ORDERKEY a FROM CUSTOMER c, ORDERS o, LINEITEM l WHERE c.C_CUSTKEY = o.O_CUSTKEY and l.L_ORDERKEY = o.O_ORDERKEY and o.O_ORDERKEY < 10\", //join three tables\n\t\t\"SELECT c.* FROM CUSTOMER c, ORDERS o, LINEITEM l WHERE c.C_CUSTKEY = o.O_CUSTKEY and l.L_ORDERKEY = o.O_ORDERKEY\", //test star\n\t\t\"SELECT * FROM CUSTOMER c, ORDERS o, LINEITEM l WHERE c.C_CUSTKEY = o.O_CUSTKEY and l.L_ORDERKEY = o.O_ORDERKEY\", //test star\n\t\t\"SELECT a.* FROM NATION a join REGION b on a.N_REGIONKEY = b.R_REGIONKEY WHERE a.N_REGIONKEY > 0\", //test star\n\t\t\"SELECT * FROM NATION a join REGION b on a.N_REGIONKEY = b.R_REGIONKEY WHERE a.N_REGIONKEY > 0\",\n\t\t\"SELECT N_NAME, R_REGIONKEY FROM NATION2 join REGION using(R_REGIONKEY)\",\n\t\t\"select nation.n_name from nation join nation2 on nation.n_name !='a' join region on nation.n_regionkey = region.r_regionkey\",\n\t\t\"select * from nation, nation2, region\",\n\t}\n\trunTestShouldPass(mock, t, sqls, false, false)\n\n\t// should error\n\tsqls = []string{\n\t\t\"SELECT N_NAME,N_REGIONKEY FROM NATION join REGION on NATION.N_REGIONKEY = REGION.NotExistColumn\", //column not exist\n\t\t\"SELECT N_NAME, R_REGIONKEY FROM NATION join REGION using(R_REGIONKEY)\", //column not exist\n\t\t\"SELECT N_NAME,N_REGIONKEY FROM NATION a join REGION b on a.N_REGIONKEY = b.R_REGIONKEY WHERE aaaaa.N_REGIONKEY > 0\", //table alias not exist\n\t\t\"select *\", //No table used\n\t\t\"SELECT * FROM NATION a join REGION b on a.N_REGIONKEY = b.R_REGIONKEY WHERE a.N_REGIONKEY > 0 for update\", //Not support\n\t\t\"select * from nation, nation2, region for update\", // Not support\n\t}\n\trunTestShouldError(mock, t, sqls)\n}", "func (t *Table) LeftJoinMap(offset int32, count int, crit string) ([]map[string]string, error) {\n\tvar a []map[string]string\n\tbody, err := t.LeftJoinRaw(offset, count, crit)\n\ta = unpackGJsonArray(body)\n\treturn a, err\n}", "func (f *predicateSqlizerFactory) createLeftJoin(secondaryTable string, primaryColumn string, secondaryColumn string) string {\n\tnewAlias := joinedTable{secondaryTable, primaryColumn, secondaryColumn}\n\tfor i, alias := range f.joinedTables {\n\t\tif alias.equal(newAlias) {\n\t\t\treturn f.aliasName(secondaryTable, i)\n\t\t}\n\t}\n\n\tf.joinedTables = append(f.joinedTables, newAlias)\n\treturn f.aliasName(secondaryTable, len(f.joinedTables)-1)\n}", "func NewLeftJoinOn(table string, from string, to string, filter ...FilterQuery) JoinQuery {\n\treturn NewJoinWith(\"LEFT JOIN\", table, from, to, filter...)\n}", "func (b *Builder) LeftJoin(joinTable string, joinCond interface{}) *Builder {\r\n\treturn b.Join(\"LEFT\", joinTable, joinCond)\r\n}", "func (sd *SelectDataset) LeftJoin(table exp.Expression, condition exp.JoinCondition) *SelectDataset {\n\treturn sd.joinTable(exp.NewConditionedJoinExpression(exp.LeftJoinType, table, condition))\n}", "func TestLogicalJoinProps(t *testing.T) {\n\tevalCtx := tree.MakeTestingEvalContext(cluster.MakeTestingClusterSettings())\n\tf := norm.NewFactory(&evalCtx)\n\n\t// Disable all rules so that the expected operators are constructed.\n\tf.DisableOptimizations()\n\n\tcat := createLogPropsCatalog(t)\n\ta := f.Metadata().AddTable(cat.Table(\"a\"))\n\tb := f.Metadata().AddTable(cat.Table(\"b\"))\n\n\tjoinFunc := func(op opt.Operator, expected string) {\n\t\tt.Helper()\n\n\t\t// (Join (Scan a) (Scan b) (True))\n\t\tleftGroup := f.ConstructScan(f.InternScanOpDef(constructScanOpDef(f.Metadata(), a)))\n\t\trightGroup := f.ConstructScan(f.InternScanOpDef(constructScanOpDef(f.Metadata(), b)))\n\t\tonGroup := f.ConstructTrue()\n\t\toperands := norm.DynamicOperands{\n\t\t\tnorm.DynamicID(leftGroup),\n\t\t\tnorm.DynamicID(rightGroup),\n\t\t\tnorm.DynamicID(onGroup),\n\t\t}\n\t\tjoinGroup := f.DynamicConstruct(op, operands)\n\n\t\tev := memo.MakeNormExprView(f.Memo(), joinGroup)\n\t\ttestLogicalProps(t, f.Metadata(), ev, expected)\n\t}\n\n\tjoinFunc(opt.InnerJoinApplyOp, \"a.x:1(int!null) a.y:2(int) b.x:3(int!null) b.z:4(int!null)\\n\")\n\tjoinFunc(opt.LeftJoinApplyOp, \"a.x:1(int!null) a.y:2(int) b.x:3(int) b.z:4(int)\\n\")\n\tjoinFunc(opt.RightJoinApplyOp, \"a.x:1(int) a.y:2(int) b.x:3(int!null) b.z:4(int!null)\\n\")\n\tjoinFunc(opt.FullJoinApplyOp, \"a.x:1(int) a.y:2(int) b.x:3(int) b.z:4(int)\\n\")\n\tjoinFunc(opt.SemiJoinOp, \"a.x:1(int!null) a.y:2(int)\\n\")\n\tjoinFunc(opt.SemiJoinApplyOp, \"a.x:1(int!null) a.y:2(int)\\n\")\n\tjoinFunc(opt.AntiJoinOp, \"a.x:1(int!null) a.y:2(int)\\n\")\n\tjoinFunc(opt.AntiJoinApplyOp, \"a.x:1(int!null) a.y:2(int)\\n\")\n}", "func NewLeftJoinOn(table string, from string, to string) JoinQuery {\n\treturn NewJoinWith(\"LEFT JOIN\", table, from, to)\n}", "func ExtractJoinEqualityFilters(leftCols, rightCols opt.ColSet, on FiltersExpr) FiltersExpr {\n\t// We want to avoid allocating a new slice unless strictly necessary.\n\tvar newFilters FiltersExpr\n\tfor i := range on {\n\t\tcondition := on[i].Condition\n\t\tok, _, _ := ExtractJoinEquality(leftCols, rightCols, condition)\n\t\tif ok {\n\t\t\tif newFilters != nil {\n\t\t\t\tnewFilters = append(newFilters, on[i])\n\t\t\t}\n\t\t} else {\n\t\t\tif newFilters == nil {\n\t\t\t\tnewFilters = make(FiltersExpr, i, len(on)-1)\n\t\t\t\tcopy(newFilters, on[:i])\n\t\t\t}\n\t\t}\n\t}\n\tif newFilters != nil {\n\t\treturn newFilters\n\t}\n\treturn on\n}", "func (sd *SelectDataset) Join(table exp.Expression, condition exp.JoinCondition) *SelectDataset {\n\treturn sd.InnerJoin(table, condition)\n}", "func (sd *SelectDataset) RightOuterJoin(table exp.Expression, condition exp.JoinCondition) *SelectDataset {\n\treturn sd.joinTable(exp.NewConditionedJoinExpression(exp.RightOuterJoinType, table, condition))\n}", "func (sd *SelectDataset) NaturalFullJoin(table exp.Expression) *SelectDataset {\n\treturn sd.joinTable(exp.NewUnConditionedJoinExpression(exp.NaturalFullJoinType, table))\n}", "func (b *Builder) LeftJoin(joinTable, joinCond interface{}) *Builder {\n\treturn b.Join(\"LEFT\", joinTable, joinCond)\n}", "func (d *dbBasePostgres) GenerateOperatorLeftCol(fi *fieldInfo, operator string, leftCol *string) {\n\tswitch operator {\n\tcase \"contains\", \"startswith\", \"endswith\":\n\t\t*leftCol = fmt.Sprintf(\"%s::text\", *leftCol)\n\tcase \"iexact\", \"icontains\", \"istartswith\", \"iendswith\":\n\t\t*leftCol = fmt.Sprintf(\"UPPER(%s::text)\", *leftCol)\n\t}\n}", "func (session *Session) Join(joinOperator string, tablename interface{}, condition string, args ...interface{}) *Session {\n\tsession.Statement.Join(joinOperator, tablename, condition, args...)\n\treturn session\n}", "func (pb *primitiveBuilder) expandStar(inrcs []*resultColumn, expr *sqlparser.StarExpr) (outrcs []*resultColumn, expanded bool, err error) {\n\ttables := pb.st.AllTables()\n\tif tables == nil {\n\t\t// no table metadata available.\n\t\treturn inrcs, false, nil\n\t}\n\tif expr.TableName.IsEmpty() {\n\t\tfor _, t := range tables {\n\t\t\t// All tables must have authoritative column lists.\n\t\t\tif !t.isAuthoritative {\n\t\t\t\treturn inrcs, false, nil\n\t\t\t}\n\t\t}\n\t\tsingleTable := false\n\t\tif len(tables) == 1 {\n\t\t\tsingleTable = true\n\t\t}\n\t\tfor _, t := range tables {\n\t\t\tfor _, col := range t.columnNames {\n\t\t\t\tvar expr *sqlparser.AliasedExpr\n\t\t\t\tif singleTable {\n\t\t\t\t\t// If there's only one table, we use unqualified column names.\n\t\t\t\t\texpr = &sqlparser.AliasedExpr{\n\t\t\t\t\t\tExpr: &sqlparser.ColName{\n\t\t\t\t\t\t\tMetadata: t.columns[col.Lowered()],\n\t\t\t\t\t\t\tName: col,\n\t\t\t\t\t\t},\n\t\t\t\t\t}\n\t\t\t\t} else {\n\t\t\t\t\t// If a and b have id as their column, then\n\t\t\t\t\t// select * from a join b should result in\n\t\t\t\t\t// select a.id as id, b.id as id from a join b.\n\t\t\t\t\texpr = &sqlparser.AliasedExpr{\n\t\t\t\t\t\tExpr: &sqlparser.ColName{\n\t\t\t\t\t\t\tMetadata: t.columns[col.Lowered()],\n\t\t\t\t\t\t\tName: col,\n\t\t\t\t\t\t\tQualifier: t.alias,\n\t\t\t\t\t\t},\n\t\t\t\t\t\tAs: col,\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\tnewBuilder, rc, _, err := planProjection(pb, pb.plan, expr, t.Origin())\n\t\t\t\tif err != nil {\n\t\t\t\t\t// Unreachable because PushSelect won't fail on ColName.\n\t\t\t\t\treturn inrcs, false, err\n\t\t\t\t}\n\t\t\t\tpb.plan = newBuilder\n\t\t\t\tinrcs = append(inrcs, rc)\n\t\t\t}\n\t\t}\n\t\treturn inrcs, true, nil\n\t}\n\n\t// Expression qualified with table name.\n\tt, err := pb.st.FindTable(expr.TableName)\n\tif err != nil {\n\t\treturn inrcs, false, err\n\t}\n\tif !t.isAuthoritative {\n\t\treturn inrcs, false, nil\n\t}\n\tfor _, col := range t.columnNames {\n\t\texpr := &sqlparser.AliasedExpr{\n\t\t\tExpr: &sqlparser.ColName{\n\t\t\t\tMetadata: t.columns[col.Lowered()],\n\t\t\t\tName: col,\n\t\t\t\tQualifier: expr.TableName,\n\t\t\t},\n\t\t}\n\t\tnewBuilder, rc, _, err := planProjection(pb, pb.plan, expr, t.Origin())\n\t\tif err != nil {\n\t\t\t// Unreachable because PushSelect won't fail on ColName.\n\t\t\treturn inrcs, false, err\n\t\t}\n\t\tpb.plan = newBuilder\n\t\tinrcs = append(inrcs, rc)\n\t}\n\treturn inrcs, true, nil\n}", "func Outer(a, b Tensor, opts ...FuncOpt) (retVal Tensor, err error) {\n\tif a.Dtype() != b.Dtype() {\n\t\terr = errors.Errorf(dtypeMismatch, a.Dtype(), b.Dtype())\n\t\treturn\n\t}\n\n\tswitch at := a.(type) {\n\tcase *Dense:\n\t\tbt := b.(*Dense)\n\t\treturn at.Outer(bt, opts...)\n\t}\n\tpanic(\"Unreachable\")\n}", "func TestBaseColExpNode_Intersect(t *testing.T) {\n\tcol := Column(myTb, \"Col\")\n\tarr := Array(3, 50, 70, 80)\n\tassert.Equal(t, fmt.Sprintf(`\"%s\".\"Col\" && ARRAY[3, 50, 70, 80]`, myTbTable),\n\t\tAstToSQL(col.Intersect(arr)))\n}", "func (q Query) JoinT(inner Query,\n\touterKeySelectorFn interface{},\n\tinnerKeySelectorFn interface{},\n\tresultSelectorFn interface{}) Query {\n\touterKeySelectorGenericFunc, err := newGenericFunc(\n\t\t\"JoinT\", \"outerKeySelectorFn\", outerKeySelectorFn,\n\t\tsimpleParamValidator(newElemTypeSlice(new(genericType)), newElemTypeSlice(new(genericType))),\n\t)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\touterKeySelectorFunc := func(item interface{}) interface{} {\n\t\treturn outerKeySelectorGenericFunc.Call(item)\n\t}\n\n\tinnerKeySelectorFuncGenericFunc, err := newGenericFunc(\n\t\t\"JoinT\", \"innerKeySelectorFn\",\n\t\tinnerKeySelectorFn,\n\t\tsimpleParamValidator(newElemTypeSlice(new(genericType)), newElemTypeSlice(new(genericType))),\n\t)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\tinnerKeySelectorFunc := func(item interface{}) interface{} {\n\t\treturn innerKeySelectorFuncGenericFunc.Call(item)\n\t}\n\n\tresultSelectorGenericFunc, err := newGenericFunc(\n\t\t\"JoinT\", \"resultSelectorFn\", resultSelectorFn,\n\t\tsimpleParamValidator(newElemTypeSlice(new(genericType), new(genericType)), newElemTypeSlice(new(genericType))),\n\t)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\tresultSelectorFunc := func(outer interface{}, inner interface{}) interface{} {\n\t\treturn resultSelectorGenericFunc.Call(outer, inner)\n\t}\n\n\treturn q.Join(inner, outerKeySelectorFunc, innerKeySelectorFunc, resultSelectorFunc)\n}", "func InnerJoin(lx, rx reflect.Value) reflect.Value {\n\tresult := reflect.MakeSlice(reflect.SliceOf(lx.Type().Elem()), 0, lx.Len()+rx.Len())\n\trhash := hashSlice(rx)\n\tlhash := make(map[interface{}]struct{}, lx.Len())\n\n\tfor i := 0; i < lx.Len(); i++ {\n\t\tv := lx.Index(i)\n\t\t_, ok := rhash[v.Interface()]\n\t\t_, alreadyExists := lhash[v.Interface()]\n\t\tif ok && !alreadyExists {\n\t\t\tlhash[v.Interface()] = struct{}{}\n\t\t\tresult = reflect.Append(result, v)\n\t\t}\n\t}\n\treturn result\n}", "func (stmt *statement) FullJoin(table, on string) Statement {\n\tstmt.join(\"FULL JOIN \", table, on)\n\treturn stmt\n}", "func (filter *JoinFilter) JoinClause(structMap TableAndColumnLocater, dialect gorp.Dialect, startBindIdx int) (string, []interface{}, error) {\n\tjoin := \" inner join \" + filter.QuotedJoinTable\n\ton, args, err := filter.AndFilter.Where(structMap, dialect, startBindIdx)\n\tif err != nil {\n\t\treturn \"\", nil, err\n\t}\n\tif on != \"\" {\n\t\tjoin += \" on \" + on\n\t}\n\treturn join, args, nil\n}", "func LeftJoin(lx, rx reflect.Value) reflect.Value {\n\tresult := reflect.MakeSlice(reflect.SliceOf(lx.Type().Elem()), 0, lx.Len())\n\trhash := hashSlice(rx)\n\n\tfor i := 0; i < lx.Len(); i++ {\n\t\tv := lx.Index(i)\n\t\t_, ok := rhash[v.Interface()]\n\t\tif !ok {\n\t\t\tresult = reflect.Append(result, v)\n\t\t}\n\t}\n\treturn result\n}", "func JoinWith(handler *model.JoinTableHandler, ne *engine.Engine, source interface{}) error {\n\tne.Scope.ContextValue(source)\n\ttableName := handler.TableName\n\tquotedTableName := Quote(ne, tableName)\n\tvar joinConditions []string\n\tm, err := GetModelStruct(ne, source)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif handler.Source.ModelType == m.ModelType {\n\t\td := reflect.New(handler.Destination.ModelType).Interface()\n\t\tdestinationTableName := QuotedTableName(ne, d)\n\t\tfor _, foreignKey := range handler.Destination.ForeignKeys {\n\t\t\tjoinConditions = append(joinConditions, fmt.Sprintf(\"%v.%v = %v.%v\",\n\t\t\t\tquotedTableName,\n\t\t\t\tQuote(ne, foreignKey.DBName),\n\t\t\t\tdestinationTableName,\n\t\t\t\tQuote(ne, foreignKey.AssociationDBName)))\n\t\t}\n\n\t\tvar foreignDBNames []string\n\t\tvar foreignFieldNames []string\n\t\tfor _, foreignKey := range handler.Source.ForeignKeys {\n\t\t\tforeignDBNames = append(foreignDBNames, foreignKey.DBName)\n\t\t\tif field, ok := FieldByName(ne, source, foreignKey.AssociationDBName); ok == nil {\n\t\t\t\tforeignFieldNames = append(foreignFieldNames, field.Name)\n\t\t\t}\n\t\t}\n\n\t\tforeignFieldValues := util.ColumnAsArray(foreignFieldNames, ne.Scope.ValueOf())\n\n\t\tvar condString string\n\t\tif len(foreignFieldValues) > 0 {\n\t\t\tvar quotedForeignDBNames []string\n\t\t\tfor _, dbName := range foreignDBNames {\n\t\t\t\tquotedForeignDBNames = append(quotedForeignDBNames, tableName+\".\"+dbName)\n\t\t\t}\n\n\t\t\tcondString = fmt.Sprintf(\"%v IN (%v)\",\n\t\t\t\tToQueryCondition(ne, quotedForeignDBNames),\n\t\t\t\tutil.ToQueryMarks(foreignFieldValues))\n\t\t} else {\n\t\t\tcondString = fmt.Sprintf(\"1 <> 1\")\n\t\t}\n\n\t\tsearch.Join(ne,\n\t\t\tfmt.Sprintf(\"INNER JOIN %v ON %v\",\n\t\t\t\tquotedTableName,\n\t\t\t\tstrings.Join(joinConditions, \" AND \")))\n\t\tsearch.Where(ne, condString, util.ToQueryValues(foreignFieldValues)...)\n\t\treturn nil\n\t}\n\treturn errors.New(\"wrong source type for join table handler\")\n}", "func LeftJoin(table, on string) QueryOption {\n\treturn newFuncQueryOption(func(wrapper *QueryWrapper) {\n\t\twrapper.joins = append(wrapper.joins, \"LEFT\", \"JOIN\", table, \"ON\", on)\n\t\twrapper.queryLen += 5\n\t})\n}", "func (session *Session) Join(joinOperator string, tablename interface{}, condition string, args ...interface{}) *Session {\n\tsession.Session = session.Session.Join(joinOperator, tablename, condition, args...)\n\treturn session\n}", "func (f *predicateSqlizerFactory) addJoinsToSelectBuilder(q sq.SelectBuilder) sq.SelectBuilder {\n\tfor i, alias := range f.joinedTables {\n\t\taliasName := f.aliasName(alias.secondaryTable, i)\n\t\tjoinClause := fmt.Sprintf(\"%s AS %s ON %s = %s\",\n\t\t\tf.db.tableName(alias.secondaryTable), pq.QuoteIdentifier(aliasName),\n\t\t\tfullQuoteIdentifier(f.primaryTable, alias.primaryColumn),\n\t\t\tfullQuoteIdentifier(aliasName, alias.secondaryColumn))\n\t\tq = q.LeftJoin(joinClause)\n\t}\n\n\tif len(f.joinedTables) > 0 {\n\t\tq = q.Distinct()\n\t}\n\treturn q\n}", "func NewInnerJoinOn(table string, from string, to string, filter ...FilterQuery) JoinQuery {\n\treturn NewJoinWith(\"INNER JOIN\", table, from, to, filter...)\n}", "func (s *BasePlSqlParserListener) ExitOuter_join_type(ctx *Outer_join_typeContext) {}", "func (t *Dense) Outer(other Tensor, opts ...FuncOpt) (retVal *Dense, err error) {\n\t// check both are vectors\n\tif !t.Shape().IsVector() || !other.Shape().IsVector() {\n\t\terr = errors.Errorf(\"Outer only works when there are two vectors. t's shape: %v. other's shape: %v\", t.Shape(), other.Shape())\n\t\treturn\n\t}\n\n\tm := t.Size()\n\tn := other.Size()\n\n\t// check whether retVal has the same size as the resulting matrix would be: mxn\n\texpectedShape := Shape{m, n}\n\n\tfo := ParseFuncOpts(opts...)\n\tdefer returnOpOpt(fo)\n\tif retVal, err = handleReuse(fo.Reuse(), expectedShape, fo.Safe()); err != nil {\n\t\terr = errors.Wrapf(err, opFail, \"Outer\")\n\t\treturn\n\t}\n\n\tif retVal == nil {\n\t\tretVal = recycledDense(t.t, expectedShape, WithEngine(t.e))\n\t\tif t.o.IsColMajor() {\n\t\t\tAsFortran(nil)(retVal)\n\t\t}\n\t}\n\n\te := t.e\n\n\t// DGER does not have any beta. So the values have to be zeroed first if the tensor is to be reused\n\tretVal.Zero()\n\tif op, ok := e.(OuterProder); ok {\n\t\tif err = op.Outer(t, other, retVal); err != nil {\n\t\t\treturn nil, errors.Wrapf(err, opFail, \"engine.uter\")\n\t\t}\n\t\treturn handleIncr(retVal, fo.Reuse(), fo.Incr(), expectedShape)\n\t}\n\treturn nil, errors.New(\"engine does not support Outer\")\n}", "func (*outerJoinEliminator) extractInnerJoinKeys(join *LogicalJoin, innerChildIdx int) *expression.Schema {\n\tjoinKeys := make([]*expression.Column, 0, len(join.EqualConditions))\n\tfor _, eqCond := range join.EqualConditions {\n\t\tjoinKeys = append(joinKeys, eqCond.GetArgs()[innerChildIdx].(*expression.Column))\n\t}\n\treturn expression.NewSchema(joinKeys...)\n}", "func (b *Builder) CrossJoin(joinTable string, joinCond interface{}) *Builder {\r\n\treturn b.Join(\"CROSS\", joinTable, joinCond)\r\n}", "func (left *DataTable) InnerJoin(right *DataTable, on []JoinOn) (*DataTable, error) {\n\treturn newJoinImpl(innerJoin, []*DataTable{left, right}, on).Compute()\n}", "func (s *BasePlSqlParserListener) ExitOuter_join_sign(ctx *Outer_join_signContext) {}", "func moveJoinConditionsToFilter(ctx *sql.Context, a *Analyzer, n sql.Node, scope *plan.Scope, sel RuleSelector) (sql.Node, transform.TreeIdentity, error) {\n\tif !n.Resolved() {\n\t\treturn n, transform.SameTree, nil\n\t}\n\n\tvar nonJoinFilters []sql.Expression\n\tvar topJoin sql.Node\n\tnode, same, err := transform.Node(n, func(n sql.Node) (sql.Node, transform.TreeIdentity, error) {\n\t\tjoin, ok := n.(*plan.JoinNode)\n\t\tif !ok {\n\t\t\t// no join\n\t\t\treturn n, transform.SameTree, nil\n\t\t}\n\n\t\t// update top join to be current join\n\t\ttopJoin = n\n\n\t\t// no filter or left join: nothing to do to the tree\n\t\tif join.JoinType().IsDegenerate() || !join.JoinType().IsInner() {\n\t\t\treturn n, transform.SameTree, nil\n\t\t}\n\n\t\tleftSources := nodeSources(join.Left())\n\t\trightSources := nodeSources(join.Right())\n\t\tfiltersMoved := 0\n\t\tvar condFilters []sql.Expression\n\t\tfor _, e := range expression.SplitConjunction(join.JoinCond()) {\n\t\t\tsources := expressionSources(e)\n\t\t\tif len(sources) == 1 {\n\t\t\t\tnonJoinFilters = append(nonJoinFilters, e)\n\t\t\t\tfiltersMoved++\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tbelongsToLeftTable := containsSources(leftSources, sources)\n\t\t\tbelongsToRightTable := containsSources(rightSources, sources)\n\n\t\t\tif belongsToLeftTable || belongsToRightTable {\n\t\t\t\tnonJoinFilters = append(nonJoinFilters, e)\n\t\t\t\tfiltersMoved++\n\t\t\t} else {\n\t\t\t\tcondFilters = append(condFilters, e)\n\t\t\t}\n\t\t}\n\n\t\tif filtersMoved == 0 {\n\t\t\treturn topJoin, transform.SameTree, nil\n\t\t}\n\n\t\tif len(condFilters) > 0 {\n\t\t\tvar err error\n\t\t\ttopJoin, err = join.WithExpressions(expression.JoinAnd(condFilters...))\n\t\t\tif err != nil {\n\t\t\t\treturn nil, transform.SameTree, err\n\t\t\t}\n\t\t\treturn topJoin, transform.NewTree, nil\n\t\t}\n\n\t\t// if there are no cond filters left we can just convert it to a cross join\n\t\ttopJoin = plan.NewCrossJoin(join.Left(), join.Right())\n\t\treturn topJoin, transform.NewTree, nil\n\t})\n\n\tif err != nil {\n\t\treturn nil, transform.SameTree, err\n\t}\n\n\tif len(nonJoinFilters) == 0 || same {\n\t\treturn node, transform.SameTree, nil\n\t}\n\n\tif node == topJoin {\n\t\treturn plan.NewFilter(expression.JoinAnd(nonJoinFilters...), node), transform.NewTree, nil\n\t}\n\n\tresultNode, resultIdentity, err := transform.Node(node, func(n sql.Node) (sql.Node, transform.TreeIdentity, error) {\n\t\tchildren := n.Children()\n\t\tif len(children) == 0 {\n\t\t\treturn n, transform.SameTree, nil\n\t\t}\n\n\t\tindexOfTopJoin := -1\n\t\tfor idx, child := range children {\n\t\t\tif child == topJoin {\n\t\t\t\tindexOfTopJoin = idx\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t\tif indexOfTopJoin == -1 {\n\t\t\treturn n, transform.SameTree, nil\n\t\t}\n\n\t\tswitch n := n.(type) {\n\t\tcase *plan.Filter:\n\t\t\tnonJoinFilters = append(nonJoinFilters, n.Expression)\n\t\t\tnewExpression := expression.JoinAnd(nonJoinFilters...)\n\t\t\tnewFilter := plan.NewFilter(newExpression, topJoin)\n\t\t\tnonJoinFilters = nil // clear nonJoinFilters so we know they were used\n\t\t\treturn newFilter, transform.NewTree, nil\n\t\tdefault:\n\t\t\tnewExpression := expression.JoinAnd(nonJoinFilters...)\n\t\t\tnewFilter := plan.NewFilter(newExpression, topJoin)\n\t\t\tchildren[indexOfTopJoin] = newFilter\n\t\t\tupdatedNode, err := n.WithChildren(children...)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, transform.SameTree, err\n\t\t\t}\n\t\t\tnonJoinFilters = nil // clear nonJoinFilters so we know they were used\n\t\t\treturn updatedNode, transform.NewTree, nil\n\t\t}\n\t})\n\n\tif err != nil {\n\t\treturn nil, transform.SameTree, err\n\t}\n\n\t// if there are still nonJoinFilters left, it means we removed them but failed to re-insert them\n\tif len(nonJoinFilters) > 0 {\n\t\treturn nil, transform.SameTree, sql.ErrDroppedJoinFilters.New()\n\t}\n\n\treturn resultNode, resultIdentity, nil\n}", "func NewInnerJoinOn(table string, from string, to string) JoinQuery {\n\treturn NewJoinWith(\"INNER JOIN\", table, from, to)\n}", "func (s *BaseMySqlParserListener) EnterNaturalJoin(ctx *NaturalJoinContext) {}", "func (mp *JoinMultiplicity) JoinFiltersMatchAllLeftRows() bool {\n\treturn mp.LeftMultiplicity&MultiplicityPreservedVal != 0\n}", "func (sd *SelectDataset) CrossJoin(table exp.Expression) *SelectDataset {\n\treturn sd.joinTable(exp.NewUnConditionedJoinExpression(exp.CrossJoinType, table))\n}", "func InnerJoin(clause string, args ...interface{}) QueryMod {\n\treturn func(q *queries.Query) {\n\t\tqueries.AppendInnerJoin(q, clause, args...)\n\t}\n}", "func (b *Builder) CrossJoin(joinTable, joinCond interface{}) *Builder {\n\treturn b.Join(\"CROSS\", joinTable, joinCond)\n}", "func TestPlanner_Plan_Join(t *testing.T) {\n\tdb := NewDB(\"2000-01-01T12:00:00Z\")\n\tdb.WriteSeries(\"cpu.0\", map[string]string{}, \"2000-01-01T00:00:00Z\", map[string]interface{}{\"value\": float64(1)})\n\tdb.WriteSeries(\"cpu.0\", map[string]string{}, \"2000-01-01T00:00:10Z\", map[string]interface{}{\"value\": float64(2)})\n\tdb.WriteSeries(\"cpu.0\", map[string]string{}, \"2000-01-01T00:00:20Z\", map[string]interface{}{\"value\": float64(3)})\n\tdb.WriteSeries(\"cpu.0\", map[string]string{}, \"2000-01-01T00:00:30Z\", map[string]interface{}{\"value\": float64(4)})\n\n\tdb.WriteSeries(\"cpu.1\", map[string]string{}, \"2000-01-01T00:00:00Z\", map[string]interface{}{\"value\": float64(10)})\n\tdb.WriteSeries(\"cpu.1\", map[string]string{}, \"2000-01-01T00:00:10Z\", map[string]interface{}{\"value\": float64(20)})\n\tdb.WriteSeries(\"cpu.1\", map[string]string{}, \"2000-01-01T00:00:30Z\", map[string]interface{}{\"value\": float64(40)})\n\n\t// Query must join the series and sum the values.\n\trs := db.MustPlanAndExecute(`\n\t\tSELECT sum(cpu.0.value) + sum(cpu.1.value) AS sum\n\t\tFROM JOIN(cpu.0, cpu.1)\n\t\tWHERE time >= '2000-01-01 00:00:00' AND time < '2000-01-01 00:01:00'\n\t\tGROUP BY time(10s)`)\n\n\t// Expected resultset.\n\texp := minify(`[{\n\t\t\"columns\":[\"time\",\"sum\"],\n\t\t\"values\":[\n\t\t\t[946684800000000,11],\n\t\t\t[946684810000000,22],\n\t\t\t[946684820000000,3],\n\t\t\t[946684830000000,44],\n\t\t\t[946684840000000,0],\n\t\t\t[946684850000000,0]\n\t\t]\n\t}]`)\n\n\t// Compare resultsets.\n\tif act := jsonify(rs); exp != act {\n\t\tt.Fatalf(\"unexpected resultset: %s\", indent(act))\n\t}\n}", "func BreakExpressionInLHSandRHS(\n\tctx *plancontext.PlanningContext,\n\texpr sqlparser.Expr,\n\tlhs semantics.TableSet,\n) (col JoinColumn, err error) {\n\trewrittenExpr := sqlparser.CopyOnRewrite(expr, nil, func(cursor *sqlparser.CopyOnWriteCursor) {\n\t\tnode, ok := cursor.Node().(*sqlparser.ColName)\n\t\tif !ok {\n\t\t\treturn\n\t\t}\n\t\tdeps := ctx.SemTable.RecursiveDeps(node)\n\t\tif deps.IsEmpty() {\n\t\t\terr = vterrors.VT13001(\"unknown column. has the AST been copied?\")\n\t\t\tcursor.StopTreeWalk()\n\t\t\treturn\n\t\t}\n\t\tif !deps.IsSolvedBy(lhs) {\n\t\t\treturn\n\t\t}\n\n\t\tnode.Qualifier.Qualifier = sqlparser.NewIdentifierCS(\"\")\n\t\tcol.LHSExprs = append(col.LHSExprs, node)\n\t\tbvName := node.CompliantName()\n\t\tcol.BvNames = append(col.BvNames, bvName)\n\t\targ := sqlparser.NewArgument(bvName)\n\t\t// we are replacing one of the sides of the comparison with an argument,\n\t\t// but we don't want to lose the type information we have, so we copy it over\n\t\tctx.SemTable.CopyExprInfo(node, arg)\n\t\tcursor.Replace(arg)\n\t}, nil).(sqlparser.Expr)\n\n\tif err != nil {\n\t\treturn JoinColumn{}, err\n\t}\n\tctx.JoinPredicates[expr] = append(ctx.JoinPredicates[expr], rewrittenExpr)\n\tcol.RHSExpr = rewrittenExpr\n\treturn\n}", "func InnerJoin(tables []*DataTable, on []JoinOn) (*DataTable, error) {\n\treturn newJoinImpl(innerJoin, tables, on).Compute()\n}", "func (this *KeyspaceTerm) JoinKeys() expression.Expression {\n\treturn this.joinKeys\n}", "func (sd *SelectDataset) InnerJoin(table exp.Expression, condition exp.JoinCondition) *SelectDataset {\n\treturn sd.joinTable(exp.NewConditionedJoinExpression(exp.InnerJoinType, table, condition))\n}", "func (sd *SelectDataset) FullJoin(table exp.Expression, condition exp.JoinCondition) *SelectDataset {\n\treturn sd.joinTable(exp.NewConditionedJoinExpression(exp.FullJoinType, table, condition))\n}", "func (s *BasePlSqlParserListener) EnterJoin_clause(ctx *Join_clauseContext) {}", "func NewNaturalJoin(left, right sql.Node) *JoinNode {\n\treturn NewJoin(left, right, JoinTypeUsing, nil)\n}", "func main() {\n\t// bigger table\n\tfmt.Printf(\"%X \\n\", JoinExample(\"./t/r0.tbl\", \"./t/r0.tbl\", []int{0}, []int{1})) // 767636031\n\tfmt.Printf(\"%X \\n\", JoinExample(\"./t/r0.tbl\", \"./t/r1.tbl\", []int{0}, []int{0})) // 49082128576\n\tfmt.Printf(\"%X \\n\", JoinExample(\"./t/r0.tbl\", \"./t/r1.tbl\", []int{1}, []int{1})) // 85306117839070\n\tfmt.Printf(\"%X \\n\", JoinExample(\"./t/r0.tbl\", \"./t/r2.tbl\", []int{0}, []int{0})) // 48860100254\n\tfmt.Printf(\"%X \\n\", JoinExample(\"./t/r0.tbl\", \"./t/r1.tbl\", []int{0, 1}, []int{0, 1})) //5552101\n\tfmt.Printf(\"%X \\n\", JoinExample(\"./t/r1.tbl\", \"./t/r2.tbl\", []int{0}, []int{0})) // 6331038719880\n\tfmt.Printf(\"%X \\n\", JoinExample(\"./t/r2.tbl\", \"./t/r2.tbl\", []int{0, 1}, []int{0, 1})) // 42056985375886\n}", "func NewLeftJoin(table string) JoinQuery {\n\treturn NewLeftJoinOn(table, \"\", \"\")\n}", "func IsColsAllFromOuterTable(cols []*expression.Column, outerUniqueIDs set.Int64Set) bool {\n\t// There are two cases \"return false\" here:\n\t// 1. If cols represents aggCols, then \"len(cols) == 0\" means not all aggregate functions are duplicate agnostic before.\n\t// 2. If cols represents parentCols, then \"len(cols) == 0\" means no parent logical plan of this join plan.\n\tif len(cols) == 0 {\n\t\treturn false\n\t}\n\tfor _, col := range cols {\n\t\tif !outerUniqueIDs.Exist(col.UniqueID) {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}", "func (b *JoinBuilder) LeftJoin(other *Table) *JoinBuilder {\n\treturn makeJoinBuilder(\"LEFT JOIN\", b, other)\n}", "func NewLeftJoin(table string, filter ...FilterQuery) JoinQuery {\n\treturn NewLeftJoinOn(table, \"\", \"\", filter...)\n}", "func (b *JoinBuilder) Using(cols ...interface{}) *JoinBuilder {\n\tvar vals Columns\n\tfor _, c := range cols {\n\t\tvar name string\n\t\tswitch t := c.(type) {\n\t\tcase string:\n\t\t\tname = t\n\t\tcase ValExprBuilder:\n\t\t\tif n, ok := t.ValExpr.(*ColName); ok {\n\t\t\t\tname = n.Name\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t\tvar v ValExpr\n\t\tif len(name) == 0 {\n\t\t\tv = makeErrVal(\"unsupported type %T: %v\", c, c)\n\t\t} else if b.leftTable.column(name) == nil ||\n\t\t\tb.rightTable.column(name) == nil {\n\t\t\tv = makeErrVal(\"invalid join column: %s\", name)\n\t\t} else {\n\t\t\tv = &ColName{Name: name}\n\t\t}\n\t\tvals = append(vals, &NonStarExpr{Expr: v})\n\t}\n\tb.Cond = &UsingJoinCond{Cols: vals}\n\treturn b\n}", "func (b *Builder) FullJoin(joinTable string, joinCond interface{}) *Builder {\r\n\treturn b.Join(\"FULL\", joinTable, joinCond)\r\n}", "func (j *baseJoiner) filterAndCheckOuterRowStatus(\n\tinput, output *chunk.Chunk, innerColsLen int, outerRowStatus []outerRowStatusFlag,\n\tlUsed, rUsed []int) ([]outerRowStatusFlag, error) {\n\tvar err error\n\tj.selected, j.isNull, err = expression.VectorizedFilterConsiderNull(j.ctx, j.conditions, chunk.NewIterator4Chunk(input), j.selected, j.isNull)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tfor i := 0; i < len(j.selected); i++ {\n\t\tif j.isNull[i] {\n\t\t\touterRowStatus[i] = outerRowHasNull\n\t\t} else if !j.selected[i] {\n\t\t\touterRowStatus[i] = outerRowUnmatched\n\t\t}\n\t}\n\n\tif lUsed != nil || rUsed != nil {\n\t\tlSize := innerColsLen\n\t\tif !j.outerIsRight {\n\t\t\tlSize = input.NumCols() - innerColsLen\n\t\t}\n\t\tused := make([]int, len(lUsed)+len(rUsed))\n\t\tcopy(used, lUsed)\n\t\tfor i := range rUsed {\n\t\t\tused[i+len(lUsed)] = rUsed[i] + lSize\n\t\t}\n\t\tinput = input.Prune(used)\n\t}\n\t// Batch copies selected rows to output chunk.\n\t_, err = chunk.CopySelectedJoinRowsDirect(input, j.selected, output)\n\treturn outerRowStatus, err\n}", "func ELTMap2SelectSQL(nodeLink *NodeLinkInfo, outputName string) (string, error) {\n\t// TODO: will return SELECT\n\tvar b bytes.Buffer\n\twhereConds := make([]string, 0, 0)\n\n\tb.WriteString(\"SELECT \")\n\n\tinputs, _ := getInputTables(&nodeLink.Node)\n\toutput, _ := getOutputTable(&nodeLink.Node, outputName)\n\n\tvar firstcol = true\n\tfor _, col := range output.Columns {\n\t\tif !firstcol {\n\t\t\tb.WriteString(\", \")\n\t\t}\n\t\tfirstcol = false\n\t\tb.WriteString(strings.Trim(col.Expression, \" \"))\n\t\tb.WriteString(\" AS \")\n\t\tb.WriteString(TakeRightObj(col.Name))\n\t}\n\n\tb.WriteString(\" FROM \")\n\n\tvar firsttable = true\n\tfor _, input := range inputs {\n\n\t\tvar linkInput *NodeLinkInfo\n\t\tfor _, prevConn := range nodeLink.PrevConns {\n\t\t\tif prevConn.Label == input.TableName {\n\t\t\t\tlinkInput = prevConn.Link\n\t\t\t}\n\t\t}\n\t\tif linkInput == nil {\n\t\t\tcontinue\n\t\t}\n\n\t\tcomponentType := GetComponentType(&linkInput.Node)\n\t\tvar fromItem string\n\t\tswitch componentType {\n\t\tcase ComponentELTInput:\n\t\t\tfromItem, _ = tELTInput2FromItemSQL(linkInput)\n\t\tcase ComponentELTMap:\n\t\t\tfromItem, _ = ELTMap2SelectSQL(linkInput, input.TableName)\n\t\t\tfromItem = \"(\" + fromItem + \")\"\n\t\t}\n\t\talias := input.Alias\n\n\t\tif input.JoinType == \"NO_JOIN\" {\n\t\t\tif !firsttable {\n\t\t\t\tb.WriteRune(',')\n\t\t\t}\n\t\t\tb.WriteString(fromItem + \" \" + TakeRightObj(alias) + \" \")\n\t\t} else {\n\t\t\t// append `join`` phrase\n\t\t\tb.WriteString(joinType2join(input.JoinType) + \" \" + fromItem + \" \" + TakeRightObj(alias))\n\n\t\t\t// make `on` phrase\n\t\t\tb.WriteString(\" ON (\")\n\t\t\tfirstcol := true\n\t\t\tfor _, col := range input.Columns {\n\t\t\t\tif !col.Join {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\tif !firstcol {\n\t\t\t\t\tb.WriteString(\" AND \")\n\t\t\t\t}\n\t\t\t\tfirstcol = false\n\t\t\t\tb.WriteString(col2cond(alias, &col))\n\t\t\t}\n\t\t\tb.WriteString(\")\")\n\t\t}\n\t\t// collect `where` phrase\n\t\tfor _, col := range input.Columns {\n\t\t\tif col.Join {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif col.Operator == \"\" {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\twhereConds = append(whereConds, col2cond(alias, &col))\n\t\t}\n\n\t\tfirsttable = false\n\t}\n\n\twhereConds = append(whereConds, output.Filters...)\n\n\tif len(whereConds) > 0 {\n\t\tb.WriteString(\" WHERE \")\n\t\tb.WriteString(strings.Join(whereConds, \" AND \"))\n\t}\n\tif len(output.OtherFilters) > 0 {\n\t\tb.WriteRune(' ')\n\t\tb.WriteString(strings.Join(output.OtherFilters, \" \"))\n\t}\n\n\treturn b.String(), nil\n}", "func (fc *FromClause) String() string {\n\tvar str = `FROM ` + fc.table\n\n\tfor _, l := range fc.Leafs {\n\t\tstr += \" \" + l.JoinType + \" JOIN \" + l.Table + \" ON \" + l.Condition\n\t}\n\n\treturn str\n}", "func EqualsRefOfJoinTableExpr(a, b *JoinTableExpr) bool {\n\tif a == b {\n\t\treturn true\n\t}\n\tif a == nil || b == nil {\n\t\treturn false\n\t}\n\treturn EqualsTableExpr(a.LeftExpr, b.LeftExpr) &&\n\t\ta.Join == b.Join &&\n\t\tEqualsTableExpr(a.RightExpr, b.RightExpr) &&\n\t\tEqualsJoinCondition(a.Condition, b.Condition)\n}", "func (l *Loader) OuterSym(i Sym) Sym {\n\t// FIXME: add check for isExternal?\n\treturn l.outer[i]\n}", "func (w *Wrapper) JoinWhere(table interface{}, args ...interface{}) *Wrapper {\n\tw.saveJoinCondition(\"AND\", table, args...)\n\treturn w\n}", "func JoinWithQL(handler *model.JoinTableHandler, ne *engine.Engine, source interface{}) error {\n\tne.Scope.ContextValue(source)\n\ttableName := handler.TableName\n\tquotedTableName := Quote(ne, tableName)\n\tvar joinConditions []string\n\tm, err := GetModelStruct(ne, source)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif handler.Source.ModelType == m.ModelType {\n\t\tne.Search.TableNames = append(ne.Search.TableNames, handler.TableName)\n\t\td := reflect.New(handler.Destination.ModelType).Interface()\n\t\tdestinationTableName := QuotedTableName(ne, d)\n\t\tfor _, foreignKey := range handler.Destination.ForeignKeys {\n\t\t\tjoinConditions = append(joinConditions, fmt.Sprintf(\"%v.%v = %v.%v\",\n\t\t\t\tquotedTableName,\n\t\t\t\tQuote(ne, foreignKey.DBName),\n\t\t\t\tdestinationTableName,\n\t\t\t\tQuote(ne, foreignKey.AssociationDBName)))\n\t\t}\n\n\t\tvar foreignDBNames []string\n\t\tvar foreignFieldNames []string\n\t\tfor _, foreignKey := range handler.Source.ForeignKeys {\n\t\t\tforeignDBNames = append(foreignDBNames, foreignKey.DBName)\n\t\t\tif field, ok := FieldByName(ne, source, foreignKey.AssociationDBName); ok == nil {\n\t\t\t\tforeignFieldNames = append(foreignFieldNames, field.Name)\n\t\t\t}\n\t\t}\n\n\t\tforeignFieldValues := util.ColumnAsArray(foreignFieldNames, ne.Scope.ValueOf())\n\n\t\tif len(foreignFieldValues) > 0 {\n\t\t\tvar quotedForeignDBNames []string\n\t\t\tfor _, dbName := range foreignDBNames {\n\t\t\t\tquotedForeignDBNames = append(quotedForeignDBNames, tableName+\".\"+dbName)\n\t\t\t}\n\t\t\tfor _, q := range quotedForeignDBNames {\n\t\t\t\tjoinConditions = append(joinConditions, fmt.Sprintf(\"%s=?\", q))\n\t\t\t}\n\t\t}\n\t\tsearch.Where(ne, strings.Join(joinConditions, \" AND \"), util.ToQueryValues(foreignFieldValues)...)\n\t\treturn nil\n\t}\n\treturn errors.New(\"wrong source type for join table handler\")\n}", "func (e *explainer) expr(nodeName, fieldName string, n int, expr tree.Expr) {\n\tif e.showExprs && expr != nil {\n\t\tif nodeName == \"join\" {\n\t\t\tqualifySave := e.fmtFlags\n\t\t\te.fmtFlags.SetFlags(tree.FmtShowTableAliases)\n\t\t\tdefer func(e *explainer, f tree.FmtFlags) { e.fmtFlags = f }(e, qualifySave)\n\t\t}\n\t\tif n >= 0 {\n\t\t\tfieldName = fmt.Sprintf(\"%s %d\", fieldName, n)\n\t\t}\n\n\t\tf := tree.NewFmtCtxWithBuf(e.fmtFlags)\n\t\tf.WithPlaceholderFormat(e.showPlaceholderValues)\n\t\tf.FormatNode(expr)\n\t\te.attr(nodeName, fieldName, f.CloseAndGetString())\n\t}\n}", "func (sd *SelectDataset) NaturalRightJoin(table exp.Expression) *SelectDataset {\n\treturn sd.joinTable(exp.NewUnConditionedJoinExpression(exp.NaturalRightJoinType, table))\n}", "func getJoinKey(sc *variable.StatementContext, cols []*expression.Column, row *Row, targetTypes []*types.FieldType,\n\tvals []types.Datum, bytes []byte) (bool, []byte, error) {\n\tvar err error\n\tfor i, col := range cols {\n\t\tvals[i], err = col.Eval(row.Data)\n\t\tif err != nil {\n\t\t\treturn false, nil, errors.Trace(err)\n\t\t}\n\t\tif vals[i].IsNull() {\n\t\t\treturn true, nil, nil\n\t\t}\n\t\tvals[i], err = vals[i].ConvertTo(sc, targetTypes[i])\n\t\tif err != nil {\n\t\t\treturn false, nil, errors.Trace(err)\n\t\t}\n\t}\n\tif len(vals) == 0 {\n\t\treturn false, nil, nil\n\t}\n\tbytes, err = codec.EncodeValue(bytes, vals...)\n\treturn false, bytes, errors.Trace(err)\n}", "func (mp *JoinMultiplicity) JoinDoesNotDuplicateLeftRows(op opt.Operator) bool {\n\tswitch op {\n\tcase opt.InnerJoinOp, opt.LeftJoinOp, opt.FullJoinOp:\n\t\tbreak\n\n\tcase opt.SemiJoinOp:\n\t\treturn true\n\n\tdefault:\n\t\tpanic(errors.AssertionFailedf(\"unsupported operator: %v\", op))\n\t}\n\treturn mp.JoinFiltersDoNotDuplicateLeftRows()\n}", "func (filter *CombinedFilter) joinFilters(separator string, structMap TableAndColumnLocater, dialect gorp.Dialect, startBindIdx int) (string, []interface{}, error) {\n\tbuffer := bytes.Buffer{}\n\targs := make([]interface{}, 0, len(filter.subFilters))\n\tif len(filter.subFilters) > 1 {\n\t\tbuffer.WriteString(\"(\")\n\t}\n\tfor index, subFilter := range filter.subFilters {\n\t\tnextWhere, nextArgs, err := subFilter.Where(structMap, dialect, startBindIdx+len(args))\n\t\tif err != nil {\n\t\t\treturn \"\", nil, err\n\t\t}\n\t\targs = append(args, nextArgs...)\n\t\tif index != 0 {\n\t\t\tbuffer.WriteString(separator)\n\t\t}\n\t\tbuffer.WriteString(nextWhere)\n\t}\n\tif len(filter.subFilters) > 1 {\n\t\tbuffer.WriteString(\")\")\n\t}\n\treturn buffer.String(), args, nil\n}", "func resolveExpression(ctx *sql.Context, expression string, sch schema.Schema, tableName string) (sql.Expression, error) {\n\tquery := fmt.Sprintf(\"SELECT %s from %s.%s\", expression, \"mydb\", tableName)\n\tsqlSch, err := sqlutil.FromDoltSchema(tableName, sch)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tmockTable := memory.NewTable(tableName, sqlSch, nil)\n\tmockDatabase := memory.NewDatabase(\"mydb\")\n\tmockDatabase.AddTable(tableName, mockTable)\n\tmockProvider := memory.NewDBProvider(mockDatabase)\n\tcatalog := analyzer.NewCatalog(mockProvider)\n\n\tpseudoAnalyzedQuery, err := planbuilder.Parse(ctx, catalog, query)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar expr sql.Expression\n\ttransform.Inspect(pseudoAnalyzedQuery, func(n sql.Node) bool {\n\t\tif projector, ok := n.(sql.Projector); ok {\n\t\t\texpr = projector.ProjectedExprs()[0]\n\t\t\treturn false\n\t\t}\n\t\treturn true\n\t})\n\tif expr == nil {\n\t\treturn nil, fmt.Errorf(\"unable to find expression in analyzed query\")\n\t}\n\n\treturn expr, nil\n}", "func (b *Builder) FullJoin(joinTable, joinCond interface{}) *Builder {\n\treturn b.Join(\"FULL\", joinTable, joinCond)\n}" ]
[ "0.7141362", "0.6283584", "0.61286354", "0.6114805", "0.58095336", "0.5579841", "0.5528286", "0.54351157", "0.5422068", "0.53262335", "0.5302064", "0.5258188", "0.5202978", "0.51721895", "0.5171185", "0.51176786", "0.51153624", "0.50725424", "0.504998", "0.5015528", "0.5007657", "0.50041515", "0.49868286", "0.49820632", "0.496345", "0.4961176", "0.4950369", "0.4832806", "0.48083323", "0.47873613", "0.47839683", "0.47722697", "0.4771989", "0.47547776", "0.47509864", "0.4747191", "0.47182676", "0.47178525", "0.47126234", "0.4710577", "0.4701561", "0.46976194", "0.46716094", "0.46618825", "0.4636043", "0.46320042", "0.46235764", "0.46145755", "0.4614478", "0.4602384", "0.45936912", "0.45910755", "0.45905232", "0.45635346", "0.45427656", "0.45286018", "0.45271516", "0.45228374", "0.45211866", "0.45200107", "0.4507529", "0.4506117", "0.4502193", "0.44958872", "0.44788045", "0.44778195", "0.44527245", "0.44408065", "0.4418107", "0.4416991", "0.4414473", "0.43852147", "0.43759194", "0.4372899", "0.4371999", "0.4370432", "0.43690613", "0.43667048", "0.43645507", "0.43586013", "0.43383062", "0.43312064", "0.432967", "0.43290433", "0.43183511", "0.43055487", "0.4272212", "0.42627507", "0.42534083", "0.42496136", "0.42490032", "0.42411947", "0.42207584", "0.42189282", "0.42177215", "0.42136455", "0.4202816", "0.41990033", "0.4196319", "0.41864243" ]
0.6004231
4
OuterJoin the tables. tables[0] is used as reference datatable.
func OuterJoin(tables []*DataTable, on []JoinOn) (*DataTable, error) { return newJoinImpl(outerJoin, tables, on).Compute() }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (left *DataTable) OuterJoin(right *DataTable, on []JoinOn) (*DataTable, error) {\n\treturn newJoinImpl(outerJoin, []*DataTable{left, right}, on).Compute()\n}", "func (mySelf SQLJoin) Outer() SQLJoin {\n\tmySelf.outer = true\n\treturn mySelf\n}", "func (sd *SelectDataset) FullOuterJoin(table exp.Expression, condition exp.JoinCondition) *SelectDataset {\n\treturn sd.joinTable(exp.NewConditionedJoinExpression(exp.FullOuterJoinType, table, condition))\n}", "func OuterJoin(lx, rx reflect.Value) reflect.Value {\n\tljoin := LeftJoin(lx, rx)\n\trjoin := RightJoin(lx, rx)\n\n\tresult := reflect.MakeSlice(reflect.SliceOf(lx.Type().Elem()), ljoin.Len()+rjoin.Len(), ljoin.Len()+rjoin.Len())\n\tfor i := 0; i < ljoin.Len(); i++ {\n\t\tresult.Index(i).Set(ljoin.Index(i))\n\t}\n\tfor i := 0; i < rjoin.Len(); i++ {\n\t\tresult.Index(ljoin.Len() + i).Set(rjoin.Index(i))\n\t}\n\n\treturn result\n}", "func (self Accessor) OuterJoin(expr interface{}) *SelectManager {\n\treturn self.From(self.Relation()).OuterJoin(expr)\n}", "func (sd *SelectDataset) LeftOuterJoin(table exp.Expression, condition exp.JoinCondition) *SelectDataset {\n\treturn sd.joinTable(exp.NewConditionedJoinExpression(exp.LeftOuterJoinType, table, condition))\n}", "func (stmt *statement) FullJoin(table, on string) Statement {\n\tstmt.join(\"FULL JOIN \", table, on)\n\treturn stmt\n}", "func InnerJoin(tables []*DataTable, on []JoinOn) (*DataTable, error) {\n\treturn newJoinImpl(innerJoin, tables, on).Compute()\n}", "func LeftJoin(tables []*DataTable, on []JoinOn) (*DataTable, error) {\n\treturn newJoinImpl(leftJoin, tables, on).Compute()\n}", "func (s *BaseMySqlParserListener) EnterOuterJoin(ctx *OuterJoinContext) {}", "func (s *BasePlSqlParserListener) EnterOuter_join_sign(ctx *Outer_join_signContext) {}", "func (left *DataTable) InnerJoin(right *DataTable, on []JoinOn) (*DataTable, error) {\n\treturn newJoinImpl(innerJoin, []*DataTable{left, right}, on).Compute()\n}", "func (stmt *statement) LeftJoin(table, on string) Statement {\n\tstmt.join(\"LEFT JOIN \", table, on)\n\treturn stmt\n}", "func (left *DataTable) LeftJoin(right *DataTable, on []JoinOn) (*DataTable, error) {\n\treturn newJoinImpl(leftJoin, []*DataTable{left, right}, on).Compute()\n}", "func (s *BasePlSqlParserListener) EnterOuter_join_type(ctx *Outer_join_typeContext) {}", "func (sd *SelectDataset) FullJoin(table exp.Expression, condition exp.JoinCondition) *SelectDataset {\n\treturn sd.joinTable(exp.NewConditionedJoinExpression(exp.FullJoinType, table, condition))\n}", "func (t *Table) LeftJoin(offset int32, count int, crit string, target interface{}) error {\n\tbody, err := t.LeftJoinRaw(offset, count, crit)\n\tif err == nil {\n\t\terr = json.Unmarshal(body, &target)\n\t}\n\treturn err\n}", "func (b *Builder) FullJoin(joinTable string, joinCond interface{}) *Builder {\r\n\treturn b.Join(\"FULL\", joinTable, joinCond)\r\n}", "func Outer(a, b Tensor, opts ...FuncOpt) (retVal Tensor, err error) {\n\tif a.Dtype() != b.Dtype() {\n\t\terr = errors.Errorf(dtypeMismatch, a.Dtype(), b.Dtype())\n\t\treturn\n\t}\n\n\tswitch at := a.(type) {\n\tcase *Dense:\n\t\tbt := b.(*Dense)\n\t\treturn at.Outer(bt, opts...)\n\t}\n\tpanic(\"Unreachable\")\n}", "func (t *Table) LeftJoinRaw(offset int32, count int, crit string) ([]byte, error) {\n\tp := \"https://%s/api/getLeftJoin.sjs?json&object=%s&limit=%d,%d\"\n\tx := fmt.Sprintf(p, t.Host, t.Name, offset, count)\n\tif len(crit) != 0 {\n\t\tx = x + \"&condition=\" + FixCrit(crit)\n\t}\n\t_, body, err := t.Get(x)\n\treturn body, err\n}", "func (b *Builder) FullJoin(joinTable, joinCond interface{}) *Builder {\n\treturn b.Join(\"FULL\", joinTable, joinCond)\n}", "func (w *Wrapper) InnerJoin(table interface{}, condition string) *Wrapper {\n\tw.saveJoin(table, \"INNER JOIN\", condition)\n\treturn w\n}", "func NewFullJoinOn(table string, from string, to string, filter ...FilterQuery) JoinQuery {\n\treturn NewJoinWith(\"FULL JOIN\", table, from, to, filter...)\n}", "func (b *Builder) CrossJoin(joinTable string, joinCond interface{}) *Builder {\r\n\treturn b.Join(\"CROSS\", joinTable, joinCond)\r\n}", "func (w *Wrapper) LeftJoin(table interface{}, condition string) *Wrapper {\n\tw.saveJoin(table, \"LEFT JOIN\", condition)\n\treturn w\n}", "func (b *Builder) CrossJoin(joinTable, joinCond interface{}) *Builder {\n\treturn b.Join(\"CROSS\", joinTable, joinCond)\n}", "func NewFullJoinOn(table string, from string, to string) JoinQuery {\n\treturn NewJoinWith(\"FULL JOIN\", table, from, to)\n}", "func (r1 *csvTable) Join(r2 rel.Relation, zero interface{}) rel.Relation {\n\treturn rel.NewJoin(r1, r2, zero)\n}", "func JoinTableColumnsWithAs(table string, cols ...string) string {\n\t//cols = strings_.SliceTrimEmpty(cols...)\n\treturn strings.Join(ExpandAsColumns(TableColumns(table, cols...)...), \",\")\n}", "func (sd *SelectDataset) Join(table exp.Expression, condition exp.JoinCondition) *SelectDataset {\n\treturn sd.InnerJoin(table, condition)\n}", "func (stmt *statement) Join(table, on string) Statement {\n\tstmt.join(\"JOIN \", table, on)\n\treturn stmt\n}", "func (t *Dense) Outer(other Tensor, opts ...FuncOpt) (retVal *Dense, err error) {\n\t// check both are vectors\n\tif !t.Shape().IsVector() || !other.Shape().IsVector() {\n\t\terr = errors.Errorf(\"Outer only works when there are two vectors. t's shape: %v. other's shape: %v\", t.Shape(), other.Shape())\n\t\treturn\n\t}\n\n\tm := t.Size()\n\tn := other.Size()\n\n\t// check whether retVal has the same size as the resulting matrix would be: mxn\n\texpectedShape := Shape{m, n}\n\n\tfo := ParseFuncOpts(opts...)\n\tdefer returnOpOpt(fo)\n\tif retVal, err = handleReuse(fo.Reuse(), expectedShape, fo.Safe()); err != nil {\n\t\terr = errors.Wrapf(err, opFail, \"Outer\")\n\t\treturn\n\t}\n\n\tif retVal == nil {\n\t\tretVal = recycledDense(t.t, expectedShape, WithEngine(t.e))\n\t\tif t.o.IsColMajor() {\n\t\t\tAsFortran(nil)(retVal)\n\t\t}\n\t}\n\n\te := t.e\n\n\t// DGER does not have any beta. So the values have to be zeroed first if the tensor is to be reused\n\tretVal.Zero()\n\tif op, ok := e.(OuterProder); ok {\n\t\tif err = op.Outer(t, other, retVal); err != nil {\n\t\t\treturn nil, errors.Wrapf(err, opFail, \"engine.uter\")\n\t\t}\n\t\treturn handleIncr(retVal, fo.Reuse(), fo.Incr(), expectedShape)\n\t}\n\treturn nil, errors.New(\"engine does not support Outer\")\n}", "func FullJoin(table, on string) QueryOption {\n\treturn newFuncQueryOption(func(wrapper *QueryWrapper) {\n\t\twrapper.joins = append(wrapper.joins, \"FULL\", \"JOIN\", table, \"ON\", on)\n\t\twrapper.queryLen += 5\n\t})\n}", "func FillAliases(a *Aliases, tables []drivers.Table) {\n\tif a.Tables == nil {\n\t\ta.Tables = make(map[string]TableAlias)\n\t}\n\n\tfor _, t := range tables {\n\t\tif t.IsJoinTable {\n\t\t\tjt, ok := a.Tables[t.Name]\n\t\t\tif !ok {\n\t\t\t\ta.Tables[t.Name] = TableAlias{Relationships: make(map[string]RelationshipAlias)}\n\t\t\t} else if jt.Relationships == nil {\n\t\t\t\tjt.Relationships = make(map[string]RelationshipAlias)\n\t\t\t}\n\t\t\tcontinue\n\t\t}\n\n\t\ttable := a.Tables[t.Name]\n\n\t\tif len(table.UpPlural) == 0 {\n\t\t\ttable.UpPlural = strmangle.TitleCase(strmangle.Plural(t.Name))\n\t\t}\n\t\tif len(table.UpSingular) == 0 {\n\t\t\ttable.UpSingular = strmangle.TitleCase(strmangle.Singular(t.Name))\n\t\t}\n\t\tif len(table.DownPlural) == 0 {\n\t\t\ttable.DownPlural = strmangle.CamelCase(strmangle.Plural(t.Name))\n\t\t}\n\t\tif len(table.DownSingular) == 0 {\n\t\t\ttable.DownSingular = strmangle.CamelCase(strmangle.Singular(t.Name))\n\t\t}\n\n\t\tif table.Columns == nil {\n\t\t\ttable.Columns = make(map[string]string)\n\t\t}\n\t\tif table.Relationships == nil {\n\t\t\ttable.Relationships = make(map[string]RelationshipAlias)\n\t\t}\n\n\t\tfor _, c := range t.Columns {\n\t\t\tif _, ok := table.Columns[c.Name]; !ok {\n\t\t\t\ttable.Columns[c.Name] = strmangle.TitleCase(c.Name)\n\t\t\t}\n\n\t\t\tr, _ := utf8.DecodeRuneInString(table.Columns[c.Name])\n\t\t\tif unicode.IsNumber(r) {\n\t\t\t\ttable.Columns[c.Name] = \"C\" + table.Columns[c.Name]\n\t\t\t}\n\t\t}\n\n\t\ta.Tables[t.Name] = table\n\n\t\tfor _, k := range t.FKeys {\n\t\t\tr := table.Relationships[k.Name]\n\t\t\tif len(r.Local) != 0 && len(r.Foreign) != 0 {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tlocal, foreign := txtNameToOne(k)\n\t\t\tif len(r.Local) == 0 {\n\t\t\t\tr.Local = local\n\t\t\t}\n\t\t\tif len(r.Foreign) == 0 {\n\t\t\t\tr.Foreign = foreign\n\t\t\t}\n\n\t\t\ttable.Relationships[k.Name] = r\n\t\t}\n\n\t}\n\n\tfor _, t := range tables {\n\t\tif !t.IsJoinTable {\n\t\t\tcontinue\n\t\t}\n\n\t\ttable := a.Tables[t.Name]\n\n\t\tlhs := t.FKeys[0]\n\t\trhs := t.FKeys[1]\n\n\t\tlhsAlias, lhsOK := table.Relationships[lhs.Name]\n\t\trhsAlias, rhsOK := table.Relationships[rhs.Name]\n\n\t\tif lhsOK && len(lhsAlias.Local) != 0 && len(lhsAlias.Foreign) != 0 &&\n\t\t\trhsOK && len(rhsAlias.Local) != 0 && len(rhsAlias.Foreign) != 0 {\n\t\t\tcontinue\n\t\t}\n\n\t\t// Here we actually reverse the meaning of local/foreign to be\n\t\t// consistent with the way normal one-to-many relationships are done.\n\t\t// That's to say local = the side with the foreign key. Now in a many-to-many\n\t\t// if we were able to not have a join table our foreign key say \"videos_id\"\n\t\t// would be on the tags table. Hence the relationships should look like:\n\t\t// videos_tags.relationships.fk_video_id.local = \"Tags\"\n\t\t// videos_tags.relationships.fk_video_id.foreign = \"Videos\"\n\t\t// Consistent, yes. Confusing? Also yes.\n\n\t\tlhsName, rhsName := txtNameToMany(lhs, rhs)\n\n\t\tif len(lhsAlias.Local) != 0 {\n\t\t\trhsName = lhsAlias.Local\n\t\t} else if len(rhsAlias.Local) != 0 {\n\t\t\tlhsName = rhsAlias.Local\n\t\t}\n\n\t\tif len(lhsAlias.Foreign) != 0 {\n\t\t\tlhsName = lhsAlias.Foreign\n\t\t} else if len(rhsAlias.Foreign) != 0 {\n\t\t\trhsName = rhsAlias.Foreign\n\t\t}\n\n\t\tif len(lhsAlias.Local) == 0 {\n\t\t\tlhsAlias.Local = rhsName\n\t\t}\n\t\tif len(lhsAlias.Foreign) == 0 {\n\t\t\tlhsAlias.Foreign = lhsName\n\t\t}\n\t\tif len(rhsAlias.Local) == 0 {\n\t\t\trhsAlias.Local = lhsName\n\t\t}\n\t\tif len(rhsAlias.Foreign) == 0 {\n\t\t\trhsAlias.Foreign = rhsName\n\t\t}\n\n\t\ttable.Relationships[lhs.Name] = lhsAlias\n\t\ttable.Relationships[rhs.Name] = rhsAlias\n\t}\n}", "func (sd *SelectDataset) CrossJoin(table exp.Expression) *SelectDataset {\n\treturn sd.joinTable(exp.NewUnConditionedJoinExpression(exp.CrossJoinType, table))\n}", "func NewInnerJoinOn(table string, from string, to string, filter ...FilterQuery) JoinQuery {\n\treturn NewJoinWith(\"INNER JOIN\", table, from, to, filter...)\n}", "func (s *BaseMySqlParserListener) ExitOuterJoin(ctx *OuterJoinContext) {}", "func (w *Wrapper) NaturalJoin(table interface{}, condition string) *Wrapper {\n\tw.saveJoin(table, \"NATURAL JOIN\", condition)\n\treturn w\n}", "func (sd *SelectDataset) joinTable(join exp.JoinExpression) *SelectDataset {\n\treturn sd.copy(sd.clauses.JoinsAppend(join))\n}", "func NewInnerJoinOn(table string, from string, to string) JoinQuery {\n\treturn NewJoinWith(\"INNER JOIN\", table, from, to)\n}", "func main() {\n\t// bigger table\n\tfmt.Printf(\"%X \\n\", JoinExample(\"./t/r0.tbl\", \"./t/r0.tbl\", []int{0}, []int{1})) // 767636031\n\tfmt.Printf(\"%X \\n\", JoinExample(\"./t/r0.tbl\", \"./t/r1.tbl\", []int{0}, []int{0})) // 49082128576\n\tfmt.Printf(\"%X \\n\", JoinExample(\"./t/r0.tbl\", \"./t/r1.tbl\", []int{1}, []int{1})) // 85306117839070\n\tfmt.Printf(\"%X \\n\", JoinExample(\"./t/r0.tbl\", \"./t/r2.tbl\", []int{0}, []int{0})) // 48860100254\n\tfmt.Printf(\"%X \\n\", JoinExample(\"./t/r0.tbl\", \"./t/r1.tbl\", []int{0, 1}, []int{0, 1})) //5552101\n\tfmt.Printf(\"%X \\n\", JoinExample(\"./t/r1.tbl\", \"./t/r2.tbl\", []int{0}, []int{0})) // 6331038719880\n\tfmt.Printf(\"%X \\n\", JoinExample(\"./t/r2.tbl\", \"./t/r2.tbl\", []int{0, 1}, []int{0, 1})) // 42056985375886\n}", "func Using(fields ...string) []JoinOn {\n\tvar jon []JoinOn\n\tfor _, f := range fields {\n\t\tjon = append(jon, JoinOn{Table: \"*\", Field: f})\n\t}\n\treturn jon\n}", "func (session *Session) Join(joinOperator string, tablename interface{}, condition string, args ...interface{}) *Session {\n\tsession.Session = session.Session.Join(joinOperator, tablename, condition, args...)\n\treturn session\n}", "func (sd *SelectDataset) NaturalFullJoin(table exp.Expression) *SelectDataset {\n\treturn sd.joinTable(exp.NewUnConditionedJoinExpression(exp.NaturalFullJoinType, table))\n}", "func (session *Session) Join(joinOperator string, tablename interface{}, condition string, args ...interface{}) *Session {\n\tsession.Statement.Join(joinOperator, tablename, condition, args...)\n\treturn session\n}", "func (sd *SelectDataset) NaturalJoin(table exp.Expression) *SelectDataset {\n\treturn sd.joinTable(exp.NewUnConditionedJoinExpression(exp.NaturalJoinType, table))\n}", "func (s *BasePlSqlParserListener) ExitOuter_join_type(ctx *Outer_join_typeContext) {}", "func LeftJoin(table, on string) QueryOption {\n\treturn newFuncQueryOption(func(wrapper *QueryWrapper) {\n\t\twrapper.joins = append(wrapper.joins, \"LEFT\", \"JOIN\", table, \"ON\", on)\n\t\twrapper.queryLen += 5\n\t})\n}", "func (r *ImageRef) ArrayJoin(images []*ImageRef, across int) error {\n\tallImages := append([]*ImageRef{r}, images...)\n\tinputs := make([]*C.VipsImage, len(allImages))\n\tfor i := range inputs {\n\t\tinputs[i] = allImages[i].image\n\t}\n\tout, err := vipsArrayJoin(inputs, across)\n\tif err != nil {\n\t\treturn err\n\t}\n\tr.setImage(out)\n\treturn nil\n}", "func (sd *SelectDataset) RightOuterJoin(table exp.Expression, condition exp.JoinCondition) *SelectDataset {\n\treturn sd.joinTable(exp.NewConditionedJoinExpression(exp.RightOuterJoinType, table, condition))\n}", "func (vn *VecN) OuterProd(dst *MatMxN, v2 *VecN) *MatMxN {\n\tif vn == nil || v2 == nil {\n\t\treturn nil\n\t}\n\n\tdst = dst.Reshape(len(vn.vec), len(v2.vec))\n\n\tfor c, el1 := range v2.vec {\n\t\tfor r, el2 := range vn.vec {\n\t\t\tdst.Set(r, c, el1*el2)\n\t\t}\n\t}\n\n\treturn dst\n}", "func buildJoin(joins []Join, baseTable string) string {\n\tif len(joins) == 0 {\n\t\treturn \"\"\n\t}\n\n\tjoin := \"\"\n\tfor _, j := range joins {\n\t\tjoin += fmt.Sprintf(\n\t\t\t\" JOIN %s ON (%s.%s %s %s.%s)\",\n\t\t\tquote(j.table),\n\t\t\tquote(baseTable),\n\t\t\tquote(j.on.field),\n\t\t\tstring(j.on.comparison),\n\t\t\tquote(j.table),\n\t\t\tquote(j.on.value.(string)),\n\t\t)\n\t}\n\treturn join\n}", "func ExampleTable() {\n\tuser := q.T(\"user\", \"usr\")\n\tpost := q.T(\"post\", \"pst\")\n\t// user.id -> post.user_id\n\tuser.InnerJoin(post, q.Eq(user.C(\"id\"), post.C(\"user_id\")))\n\tfmt.Println(\"Short:\", user)\n\n\tpostTag := q.T(\"posttag\", \"rel\")\n\ttag := q.T(\"tag\", \"tg\")\n\t// post.id -> posttag.post_id\n\tpost.InnerJoin(postTag, q.Eq(post.C(\"id\"), postTag.C(\"post_id\")))\n\t// posttag.tag_id -> tag.id\n\tpostTag.InnerJoin(tag, q.Eq(postTag.C(\"tag_id\"), tag.C(\"id\")))\n\tfmt.Println(\"Long: \", user)\n\t// Output:\n\t// Short: \"user\" AS \"usr\" INNER JOIN \"post\" AS \"pst\" ON \"usr\".\"id\" = \"pst\".\"user_id\" []\n\t// Long: \"user\" AS \"usr\" INNER JOIN (\"post\" AS \"pst\" INNER JOIN (\"posttag\" AS \"rel\" INNER JOIN \"tag\" AS \"tg\" ON \"rel\".\"tag_id\" = \"tg\".\"id\") ON \"pst\".\"id\" = \"rel\".\"post_id\") ON \"usr\".\"id\" = \"pst\".\"user_id\" []\n}", "func (s *BasePlSqlParserListener) ExitOuter_join_sign(ctx *Outer_join_signContext) {}", "func (sd *SelectDataset) LeftJoin(table exp.Expression, condition exp.JoinCondition) *SelectDataset {\n\treturn sd.joinTable(exp.NewConditionedJoinExpression(exp.LeftJoinType, table, condition))\n}", "func JoinColumns(cols ...string) string {\n\treturn JoinTableColumns(\"\", cols...)\n}", "func JoinTableValues(cols ...string) string {\n\tcols = strings_.SliceTrimEmpty(cols...)\n\tif len(cols) == 0 {\n\t\t// https://dev.mysql.com/doc/refman/5.7/en/data-type-defaults.html\n\t\t// DEFAULT\n\t\treturn \"\"\n\t}\n\treturn strings.Join(TableValues(cols...), \",\")\n}", "func NewFullJoin(table string, filter ...FilterQuery) JoinQuery {\n\treturn NewFullJoinOn(table, \"\", \"\", filter...)\n}", "func (encryptor *QueryDataEncryptor) getTablesFromUpdate(tables sqlparser.TableExprs) []*tableData {\n\tvar outputTables []*tableData\n\tfor _, tableExpr := range tables {\n\t\tswitch statement := tableExpr.(type) {\n\t\tcase *sqlparser.AliasedTableExpr:\n\t\t\taliasedStatement := statement.Expr.(sqlparser.SimpleTableExpr)\n\t\t\tswitch simpleTableStatement := aliasedStatement.(type) {\n\t\t\tcase sqlparser.TableName:\n\t\t\t\toutputTables = append(outputTables, &tableData{TableName: simpleTableStatement, As: statement.As})\n\t\t\tcase *sqlparser.Subquery:\n\t\t\t\t// unsupported\n\t\t\tdefault:\n\t\t\t\tlogrus.Debugf(\"Unsupported SimpleTableExpr type %s\", reflect.TypeOf(simpleTableStatement))\n\t\t\t}\n\t\tcase *sqlparser.ParenTableExpr:\n\t\t\toutputTables = append(outputTables, encryptor.getTablesFromUpdate(statement.Exprs)...)\n\t\tcase *sqlparser.JoinTableExpr:\n\t\t\toutputTables = append(outputTables, encryptor.getTablesFromUpdate(sqlparser.TableExprs{statement.LeftExpr, statement.RightExpr})...)\n\t\tdefault:\n\t\t\tlogrus.Debugf(\"Unsupported TableExpr type %s\", reflect.TypeOf(tableExpr))\n\t\t}\n\t}\n\treturn outputTables\n}", "func NewFullJoin(table string) JoinQuery {\n\treturn NewFullJoinOn(table, \"\", \"\")\n}", "func (gdt *Vector3) Outer(b Vector3) Basis {\n\targ0 := gdt.getBase()\n\targ1 := b.getBase()\n\n\tret := C.go_godot_vector3_outer(GDNative.api, arg0, arg1)\n\n\treturn Basis{base: &ret}\n\n}", "func (sd *SelectDataset) InnerJoin(table exp.Expression, condition exp.JoinCondition) *SelectDataset {\n\treturn sd.joinTable(exp.NewConditionedJoinExpression(exp.InnerJoinType, table, condition))\n}", "func (b *Builder) LeftJoin(joinTable string, joinCond interface{}) *Builder {\r\n\treturn b.Join(\"LEFT\", joinTable, joinCond)\r\n}", "func (w *Wrapper) JoinWhere(table interface{}, args ...interface{}) *Wrapper {\n\tw.saveJoinCondition(\"AND\", table, args...)\n\treturn w\n}", "func (s *BaseMySqlParserListener) EnterInnerJoin(ctx *InnerJoinContext) {}", "func NewJoinOn(table string, from string, to string, filter ...FilterQuery) JoinQuery {\n\treturn NewJoinWith(\"JOIN\", table, from, to, filter...)\n}", "func makeJoinRow(a *Row, b *Row) *Row {\n\tret := &Row{\n\t\tRowKeys: make([]*RowKeyEntry, 0, len(a.RowKeys)+len(b.RowKeys)),\n\t\tData: make([]types.Datum, 0, len(a.Data)+len(b.Data)),\n\t}\n\tret.RowKeys = append(ret.RowKeys, a.RowKeys...)\n\tret.RowKeys = append(ret.RowKeys, b.RowKeys...)\n\tret.Data = append(ret.Data, a.Data...)\n\tret.Data = append(ret.Data, b.Data...)\n\treturn ret\n}", "func JoinTableColumns(table string, cols ...string) string {\n\t//cols = strings_.SliceTrimEmpty(cols...)\n\treturn strings.Join(TableColumns(table, cols...), \",\")\n}", "func (b *Builder) LeftJoin(joinTable, joinCond interface{}) *Builder {\n\treturn b.Join(\"LEFT\", joinTable, joinCond)\n}", "func JoinColumnsWithAs(cols ...string) string {\n\treturn JoinTableColumnsWithAs(\"\", cols...)\n}", "func join(s []jen.Code) *jen.Statement {\n\tr := jen.Empty()\n\tfor i, stmt := range s {\n\t\tif i > 0 {\n\t\t\tr.Line()\n\t\t}\n\t\tr.Add(stmt)\n\t}\n\treturn r\n}", "func NewLeftJoinOn(table string, from string, to string, filter ...FilterQuery) JoinQuery {\n\treturn NewJoinWith(\"LEFT JOIN\", table, from, to, filter...)\n}", "func (sd *SelectDataset) NaturalLeftJoin(table exp.Expression) *SelectDataset {\n\treturn sd.joinTable(exp.NewUnConditionedJoinExpression(exp.NaturalLeftJoinType, table))\n}", "func (b *Builder) InnerJoin(joinTable string, joinCond interface{}) *Builder {\r\n\treturn b.Join(\"INNER\", joinTable, joinCond)\r\n}", "func (b *JoinBuilder) InnerJoin(other *Table) *JoinBuilder {\n\treturn makeJoinBuilder(\"INNER JOIN\", b, other)\n}", "func (*Functions) Join(slices ...interface{}) []interface{} {\n\tout := []interface{}{}\n\tfor _, slice := range slices {\n\t\tv := reflect.ValueOf(slice)\n\t\tfor i, c := 0, v.Len(); i < c; i++ {\n\t\t\tout = append(out, v.Index(i).Interface())\n\t\t}\n\t}\n\treturn out\n}", "func NewLeftJoinOn(table string, from string, to string) JoinQuery {\n\treturn NewJoinWith(\"LEFT JOIN\", table, from, to)\n}", "func (t *Table) LeftJoinMap(offset int32, count int, crit string) ([]map[string]string, error) {\n\tvar a []map[string]string\n\tbody, err := t.LeftJoinRaw(offset, count, crit)\n\ta = unpackGJsonArray(body)\n\treturn a, err\n}", "func NewJoinOn(table string, from string, to string) JoinQuery {\n\treturn NewJoinWith(\"JOIN\", table, from, to)\n}", "func (this *Dao) Join (joinType string, joinTable string, joinOn string) *Dao {\n\tjoin := fmt.Sprintf(\"%s JOIN %s ON %s\", strings.ToUpper(joinType), _table(joinTable), joinOn)\n\n\tthis.queryJoins = append(this.queryJoins, join)\n\treturn this\n}", "func (t *ProcessTable) Underlying() sql.Table {\n\treturn t.Table\n}", "func (b *JoinBuilder) LeftJoin(other *Table) *JoinBuilder {\n\treturn makeJoinBuilder(\"LEFT JOIN\", b, other)\n}", "func (b *JoinBuilder) Using(cols ...interface{}) *JoinBuilder {\n\tvar vals Columns\n\tfor _, c := range cols {\n\t\tvar name string\n\t\tswitch t := c.(type) {\n\t\tcase string:\n\t\t\tname = t\n\t\tcase ValExprBuilder:\n\t\t\tif n, ok := t.ValExpr.(*ColName); ok {\n\t\t\t\tname = n.Name\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t\tvar v ValExpr\n\t\tif len(name) == 0 {\n\t\t\tv = makeErrVal(\"unsupported type %T: %v\", c, c)\n\t\t} else if b.leftTable.column(name) == nil ||\n\t\t\tb.rightTable.column(name) == nil {\n\t\t\tv = makeErrVal(\"invalid join column: %s\", name)\n\t\t} else {\n\t\t\tv = &ColName{Name: name}\n\t\t}\n\t\tvals = append(vals, &NonStarExpr{Expr: v})\n\t}\n\tb.Cond = &UsingJoinCond{Cols: vals}\n\treturn b\n}", "func InnerJoin(table, on string) QueryOption {\n\treturn newFuncQueryOption(func(wrapper *QueryWrapper) {\n\t\twrapper.joins = append(wrapper.joins, \"INNER\", \"JOIN\", table, \"ON\", on)\n\t\twrapper.queryLen += 5\n\t})\n}", "func (b *Builder) InnerJoin(joinTable, joinCond interface{}) *Builder {\n\treturn b.Join(\"INNER\", joinTable, joinCond)\n}", "func JoinTableColumnsValues(cmp string, table string, cols ...string) string {\n\t//cols = strings_.SliceTrimEmpty(cols...)\n\treturn strings.Join(TableColumnsValues(cmp, table, cols...), \",\")\n}", "func (q Query) Join(inner Query,\n\touterKeySelector func(interface{}) interface{},\n\tinnerKeySelector func(interface{}) interface{},\n\tresultSelector func(outer interface{}, inner interface{}) interface{}) Query {\n\n\treturn Query{\n\t\tIterate: func() Iterator {\n\t\t\touternext := q.Iterate()\n\t\t\tinnernext := inner.Iterate()\n\n\t\t\tinnerLookup := make(map[interface{}][]interface{})\n\t\t\tfor innerItem, ok := innernext(); ok; innerItem, ok = innernext() {\n\t\t\t\tinnerKey := innerKeySelector(innerItem)\n\t\t\t\tinnerLookup[innerKey] = append(innerLookup[innerKey], innerItem)\n\t\t\t}\n\n\t\t\tvar outerItem interface{}\n\t\t\tvar innerGroup []interface{}\n\t\t\tinnerLen, innerIndex := 0, 0\n\n\t\t\treturn func() (item interface{}, ok bool) {\n\t\t\t\tif innerIndex >= innerLen {\n\t\t\t\t\thas := false\n\t\t\t\t\tfor !has {\n\t\t\t\t\t\touterItem, ok = outernext()\n\t\t\t\t\t\tif !ok {\n\t\t\t\t\t\t\treturn\n\t\t\t\t\t\t}\n\n\t\t\t\t\t\tinnerGroup, has = innerLookup[outerKeySelector(outerItem)]\n\t\t\t\t\t\tinnerLen = len(innerGroup)\n\t\t\t\t\t\tinnerIndex = 0\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t\titem = resultSelector(outerItem, innerGroup[innerIndex])\n\t\t\t\tinnerIndex++\n\t\t\t\treturn item, true\n\t\t\t}\n\t\t},\n\t}\n}", "func (t *TableExpr) JoinSQL() string {\n\tif t.JoinConditions != \"\" && t.JoinType != \"\" {\n\t\treturn \" \" + t.JoinType + \" \" + t.Table.SQL() + \" on \" +\n\t\t\tt.JoinConditions\n\t}\n\treturn \", \" + t.Table.SQL()\n}", "func (q Query) JoinT(inner Query,\n\touterKeySelectorFn interface{},\n\tinnerKeySelectorFn interface{},\n\tresultSelectorFn interface{}) Query {\n\touterKeySelectorGenericFunc, err := newGenericFunc(\n\t\t\"JoinT\", \"outerKeySelectorFn\", outerKeySelectorFn,\n\t\tsimpleParamValidator(newElemTypeSlice(new(genericType)), newElemTypeSlice(new(genericType))),\n\t)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\touterKeySelectorFunc := func(item interface{}) interface{} {\n\t\treturn outerKeySelectorGenericFunc.Call(item)\n\t}\n\n\tinnerKeySelectorFuncGenericFunc, err := newGenericFunc(\n\t\t\"JoinT\", \"innerKeySelectorFn\",\n\t\tinnerKeySelectorFn,\n\t\tsimpleParamValidator(newElemTypeSlice(new(genericType)), newElemTypeSlice(new(genericType))),\n\t)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\tinnerKeySelectorFunc := func(item interface{}) interface{} {\n\t\treturn innerKeySelectorFuncGenericFunc.Call(item)\n\t}\n\n\tresultSelectorGenericFunc, err := newGenericFunc(\n\t\t\"JoinT\", \"resultSelectorFn\", resultSelectorFn,\n\t\tsimpleParamValidator(newElemTypeSlice(new(genericType), new(genericType)), newElemTypeSlice(new(genericType))),\n\t)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\tresultSelectorFunc := func(outer interface{}, inner interface{}) interface{} {\n\t\treturn resultSelectorGenericFunc.Call(outer, inner)\n\t}\n\n\treturn q.Join(inner, outerKeySelectorFunc, innerKeySelectorFunc, resultSelectorFunc)\n}", "func (lc LowerCaseConvention) Join(names []string) string {\n\treturn strings.Join(names, \"\")\n}", "func (f *predicateSqlizerFactory) addJoinsToSelectBuilder(q sq.SelectBuilder) sq.SelectBuilder {\n\tfor i, alias := range f.joinedTables {\n\t\taliasName := f.aliasName(alias.secondaryTable, i)\n\t\tjoinClause := fmt.Sprintf(\"%s AS %s ON %s = %s\",\n\t\t\tf.db.tableName(alias.secondaryTable), pq.QuoteIdentifier(aliasName),\n\t\t\tfullQuoteIdentifier(f.primaryTable, alias.primaryColumn),\n\t\t\tfullQuoteIdentifier(aliasName, alias.secondaryColumn))\n\t\tq = q.LeftJoin(joinClause)\n\t}\n\n\tif len(f.joinedTables) > 0 {\n\t\tq = q.Distinct()\n\t}\n\treturn q\n}", "func mergeTable(table1 *Table, table2 Table) {\n\tfor _, column := range table2.columns {\n\t\tif containsString(convertColumnsToColumnNames(table1.columns), column.name) {\n\t\t\ttable1.columns = append(table1.columns, column)\n\t\t}\n\t}\n\n\tfor _, index := range table2.indexes {\n\t\tif containsString(convertIndexesToIndexNames(table1.indexes), index.name) {\n\t\t\ttable1.indexes = append(table1.indexes, index)\n\t\t}\n\t}\n}", "func (arr *filterTableArr) UnionEqual(other filterTableArr) *filterTableArr {\n\tfor i, el := range other {\n\t\tif el {\n\t\t\tarr[i] = true\n\t\t}\n\t}\n\n\treturn arr\n}", "func fnJoin(ctx Context, doc *JDoc, params []string) interface{} {\n\tstats := ctx.Value(EelTotalStats).(*ServiceStats)\n\tif params == nil || len(params) != 2 {\n\t\tctx.Log().Error(\"error_type\", \"func_join\", \"op\", \"join\", \"cause\", \"wrong_number_of_parameters\", \"params\", params)\n\t\tstats.IncErrors()\n\t\tAddError(ctx, SyntaxError{fmt.Sprintf(\"wrong number of parameters in call to join function\"), \"join\", params})\n\t\treturn nil\n\t}\n\tdocA, err := NewJDocFromString(extractStringParam(params[0]))\n\tif err != nil {\n\t\tctx.Log().Error(\"error_type\", \"func_join\", \"op\", \"join\", \"cause\", \"non_json_parameter\", \"params\", params, \"error\", err.Error())\n\t\tstats.IncErrors()\n\t\tAddError(ctx, SyntaxError{fmt.Sprintf(\"non json parameters in call to join function\"), \"join\", params})\n\t\treturn nil\n\t}\n\tdocB, err := NewJDocFromString(extractStringParam(params[1]))\n\tif err != nil {\n\t\tctx.Log().Error(\"error_type\", \"func_join\", \"op\", \"join\", \"cause\", \"non_json_parameter\", \"params\", params, \"error\", err.Error())\n\t\tstats.IncErrors()\n\t\tAddError(ctx, SyntaxError{fmt.Sprintf(\"non json parameters in call to join function\"), \"join\", params})\n\t\treturn nil\n\t}\n\tvar section interface{}\n\tsection = docA.GetOriginalObject()\n\tswitch section.(type) {\n\t// not sure any more if we really want an iterative join\n\t// apply sub-transformation iteratively to all array elements\n\t//case []interface{}:\n\t//\tfor i, a := range section.([]interface{}) {\n\t//\t\tlittleDoc, err := NewJDocFromInterface(a)\n\t//\t\tif err != nil {\n\t//\t\t\tctx.Log().Error(\"error_type\", \"func_join\", \"op\", \"join\", \"error\", err.Error(), \"params\", params)\n\t//\t\t\tstats.IncErrors()\n\t//\t\t\treturn \"\"\n\t//\t\t}\n\t//\t\tm := docA.merge(littleDoc.GetOriginalObject(), docB.GetOriginalObject())\n\t//\t\tif m == nil {\n\t//\t\t\tctx.Log().Error(\"error_type\", \"func_join\", \"op\", \"join\", \"cause\", \"merge_failed\", \"params\", params)\n\t//\t\t\tstats.IncErrors()\n\t//\t\t\treturn \"\"\n\t//\t\t}\n\t//\t\tsection.([]interface{})[i] = m\n\t//\t}\n\t//\treturn section\n\t// apply sub-transformation to single sub-section of document\n\tdefault:\n\t\tm := docA.merge(docA.GetOriginalObject(), docB.GetOriginalObject())\n\t\tif m == nil {\n\t\t\tctx.Log().Error(\"error_type\", \"func_join\", \"op\", \"join\", \"cause\", \"merge_failed\", \"params\", params)\n\t\t\tstats.IncErrors()\n\t\t\tAddError(ctx, RuntimeError{fmt.Sprintf(\"merge failed in call to join function\"), \"join\", params})\n\t\t\treturn nil\n\t\t}\n\t\tdocC, err := NewJDocFromInterface(m)\n\t\tif err != nil {\n\t\t\tctx.Log().Error(\"error_type\", \"func_join\", \"op\", \"join\", \"cause\", \"invalid_merge_json\", \"params\", params, \"error\", err.Error())\n\t\t\tstats.IncErrors()\n\t\t\tAddError(ctx, RuntimeError{fmt.Sprintf(\"non json merge result in call to join function\"), \"join\", params})\n\t\t\treturn nil\n\t\t}\n\t\treturn docC.GetOriginalObject()\n\t}\n}", "func JoinWith(handler *model.JoinTableHandler, ne *engine.Engine, source interface{}) error {\n\tne.Scope.ContextValue(source)\n\ttableName := handler.TableName\n\tquotedTableName := Quote(ne, tableName)\n\tvar joinConditions []string\n\tm, err := GetModelStruct(ne, source)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif handler.Source.ModelType == m.ModelType {\n\t\td := reflect.New(handler.Destination.ModelType).Interface()\n\t\tdestinationTableName := QuotedTableName(ne, d)\n\t\tfor _, foreignKey := range handler.Destination.ForeignKeys {\n\t\t\tjoinConditions = append(joinConditions, fmt.Sprintf(\"%v.%v = %v.%v\",\n\t\t\t\tquotedTableName,\n\t\t\t\tQuote(ne, foreignKey.DBName),\n\t\t\t\tdestinationTableName,\n\t\t\t\tQuote(ne, foreignKey.AssociationDBName)))\n\t\t}\n\n\t\tvar foreignDBNames []string\n\t\tvar foreignFieldNames []string\n\t\tfor _, foreignKey := range handler.Source.ForeignKeys {\n\t\t\tforeignDBNames = append(foreignDBNames, foreignKey.DBName)\n\t\t\tif field, ok := FieldByName(ne, source, foreignKey.AssociationDBName); ok == nil {\n\t\t\t\tforeignFieldNames = append(foreignFieldNames, field.Name)\n\t\t\t}\n\t\t}\n\n\t\tforeignFieldValues := util.ColumnAsArray(foreignFieldNames, ne.Scope.ValueOf())\n\n\t\tvar condString string\n\t\tif len(foreignFieldValues) > 0 {\n\t\t\tvar quotedForeignDBNames []string\n\t\t\tfor _, dbName := range foreignDBNames {\n\t\t\t\tquotedForeignDBNames = append(quotedForeignDBNames, tableName+\".\"+dbName)\n\t\t\t}\n\n\t\t\tcondString = fmt.Sprintf(\"%v IN (%v)\",\n\t\t\t\tToQueryCondition(ne, quotedForeignDBNames),\n\t\t\t\tutil.ToQueryMarks(foreignFieldValues))\n\t\t} else {\n\t\t\tcondString = fmt.Sprintf(\"1 <> 1\")\n\t\t}\n\n\t\tsearch.Join(ne,\n\t\t\tfmt.Sprintf(\"INNER JOIN %v ON %v\",\n\t\t\t\tquotedTableName,\n\t\t\t\tstrings.Join(joinConditions, \" AND \")))\n\t\tsearch.Where(ne, condString, util.ToQueryValues(foreignFieldValues)...)\n\t\treturn nil\n\t}\n\treturn errors.New(\"wrong source type for join table handler\")\n}", "func Join(larr, rarr interface{}, fnc JoinFnc) interface{} {\n\tif !IsCollection(larr) {\n\t\tpanic(\"First parameter must be a collection\")\n\t}\n\tif !IsCollection(rarr) {\n\t\tpanic(\"Second parameter must be a collection\")\n\t}\n\n\tlvalue := reflect.ValueOf(larr)\n\trvalue := reflect.ValueOf(rarr)\n\tif NotEqual(lvalue.Type(), rvalue.Type()) {\n\t\tpanic(\"Parameters must have the same type\")\n\t}\n\n\treturn fnc(lvalue, rvalue).Interface()\n}", "func (array *Array) Join(separator string) string {\n\tstr := \"\"\n\tfor _, object := range array.data {\n\t\tstr = fmt.Sprintf(\"%s%s%v\", str, separator, object)\n\t}\n\treturn str\n}", "func (o *outerJoinEliminator) tryToEliminateOuterJoin(p *LogicalJoin, aggCols []*expression.Column, parentCols []*expression.Column, opt *logicalOptimizeOp) (LogicalPlan, bool, error) {\n\tvar innerChildIdx int\n\tswitch p.JoinType {\n\tcase LeftOuterJoin:\n\t\tinnerChildIdx = 1\n\tcase RightOuterJoin:\n\t\tinnerChildIdx = 0\n\tdefault:\n\t\treturn p, false, nil\n\t}\n\n\touterPlan := p.children[1^innerChildIdx]\n\tinnerPlan := p.children[innerChildIdx]\n\touterUniqueIDs := set.NewInt64Set()\n\tfor _, outerCol := range outerPlan.Schema().Columns {\n\t\touterUniqueIDs.Insert(outerCol.UniqueID)\n\t}\n\tmatched := IsColsAllFromOuterTable(parentCols, outerUniqueIDs)\n\tif !matched {\n\t\treturn p, false, nil\n\t}\n\t// outer join elimination with duplicate agnostic aggregate functions\n\tmatched = IsColsAllFromOuterTable(aggCols, outerUniqueIDs)\n\tif matched {\n\t\tappendOuterJoinEliminateAggregationTraceStep(p, outerPlan, aggCols, opt)\n\t\treturn outerPlan, true, nil\n\t}\n\t// outer join elimination without duplicate agnostic aggregate functions\n\tinnerJoinKeys := o.extractInnerJoinKeys(p, innerChildIdx)\n\tcontain, err := o.isInnerJoinKeysContainUniqueKey(innerPlan, innerJoinKeys)\n\tif err != nil {\n\t\treturn p, false, err\n\t}\n\tif contain {\n\t\tappendOuterJoinEliminateTraceStep(p, outerPlan, parentCols, innerJoinKeys, opt)\n\t\treturn outerPlan, true, nil\n\t}\n\tcontain, err = o.isInnerJoinKeysContainIndex(innerPlan, innerJoinKeys)\n\tif err != nil {\n\t\treturn p, false, err\n\t}\n\tif contain {\n\t\tappendOuterJoinEliminateTraceStep(p, outerPlan, parentCols, innerJoinKeys, opt)\n\t\treturn outerPlan, true, nil\n\t}\n\n\treturn p, false, nil\n}", "func (t *ProcessIndexableTable) Underlying() sql.Table {\n\treturn t.DriverIndexableTable\n}", "func (tbl AssociationTable) Using(tx sqlapi.SqlTx) AssociationTabler {\n\ttbl.db = tx\n\treturn tbl\n}" ]
[ "0.72591054", "0.6385042", "0.6179653", "0.60917306", "0.5798085", "0.56046575", "0.5532318", "0.5531475", "0.5526659", "0.5441862", "0.52743226", "0.5258402", "0.5223004", "0.5183927", "0.51464397", "0.5036838", "0.49458668", "0.49448958", "0.4925887", "0.4893099", "0.48820052", "0.48512575", "0.48428088", "0.4839758", "0.48189923", "0.48154837", "0.48015633", "0.47819206", "0.4764588", "0.47513074", "0.4733342", "0.4706668", "0.4693289", "0.46917868", "0.4664223", "0.46392182", "0.46237406", "0.46237215", "0.4622747", "0.46193898", "0.4612084", "0.45863158", "0.4578073", "0.45747644", "0.45729303", "0.4559155", "0.4529584", "0.45276", "0.45094365", "0.4499277", "0.4496122", "0.44883716", "0.4482077", "0.44797555", "0.4441661", "0.4440709", "0.44376156", "0.44312614", "0.4421755", "0.44134098", "0.4398589", "0.43926582", "0.43536553", "0.4343661", "0.4340927", "0.4330712", "0.43218967", "0.43214118", "0.43212292", "0.4309175", "0.4300276", "0.4292944", "0.42899194", "0.42764527", "0.42742187", "0.42716026", "0.42660204", "0.42632857", "0.42424795", "0.42325503", "0.42155784", "0.4214783", "0.42121142", "0.42113888", "0.4199273", "0.4198009", "0.41976398", "0.41773626", "0.4168511", "0.41637564", "0.4143306", "0.41409448", "0.4114006", "0.41129592", "0.41093835", "0.4096133", "0.40914938", "0.40913856", "0.40751323", "0.40704283" ]
0.7889248
0
On creates a "join on" expression ie, as SQL, SELECT FROM A INNER JOIN B ON B.id = A.user_id Syntax: "[table].[field]", "field"
func On(fields ...string) []JoinOn { var jon []JoinOn for _, f := range fields { matches := rgOn.FindStringSubmatch(f) switch len(matches) { case 0: jon = append(jon, JoinOn{Table: "*", Field: f}) case 3: t := matches[1] if len(t) == 0 { t = "*" } jon = append(jon, JoinOn{Table: t, Field: matches[2]}) default: return nil } } return jon }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func NewInnerJoinOn(table string, from string, to string) JoinQuery {\n\treturn NewJoinWith(\"INNER JOIN\", table, from, to)\n}", "func NewJoinOn(table string, from string, to string, filter ...FilterQuery) JoinQuery {\n\treturn NewJoinWith(\"JOIN\", table, from, to, filter...)\n}", "func NewJoinOn(table string, from string, to string) JoinQuery {\n\treturn NewJoinWith(\"JOIN\", table, from, to)\n}", "func NewInnerJoinOn(table string, from string, to string, filter ...FilterQuery) JoinQuery {\n\treturn NewJoinWith(\"INNER JOIN\", table, from, to, filter...)\n}", "func (b *JoinBuilder) On(expr BoolExpr) *JoinBuilder {\n\tb.Cond = &OnJoinCond{Expr: unwrapBoolExpr(expr)}\n\treturn b\n}", "func (stmt *statement) Join(table, on string) Statement {\n\tstmt.join(\"JOIN \", table, on)\n\treturn stmt\n}", "func Using(fields ...string) []JoinOn {\n\tvar jon []JoinOn\n\tfor _, f := range fields {\n\t\tjon = append(jon, JoinOn{Table: \"*\", Field: f})\n\t}\n\treturn jon\n}", "func InnerJoin(table, on string) QueryOption {\n\treturn newFuncQueryOption(func(wrapper *QueryWrapper) {\n\t\twrapper.joins = append(wrapper.joins, \"INNER\", \"JOIN\", table, \"ON\", on)\n\t\twrapper.queryLen += 5\n\t})\n}", "func NewLeftJoinOn(table string, from string, to string) JoinQuery {\n\treturn NewJoinWith(\"LEFT JOIN\", table, from, to)\n}", "func NewFullJoinOn(table string, from string, to string) JoinQuery {\n\treturn NewJoinWith(\"FULL JOIN\", table, from, to)\n}", "func NewFullJoinOn(table string, from string, to string, filter ...FilterQuery) JoinQuery {\n\treturn NewJoinWith(\"FULL JOIN\", table, from, to, filter...)\n}", "func JoinWith(handler *model.JoinTableHandler, ne *engine.Engine, source interface{}) error {\n\tne.Scope.ContextValue(source)\n\ttableName := handler.TableName\n\tquotedTableName := Quote(ne, tableName)\n\tvar joinConditions []string\n\tm, err := GetModelStruct(ne, source)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif handler.Source.ModelType == m.ModelType {\n\t\td := reflect.New(handler.Destination.ModelType).Interface()\n\t\tdestinationTableName := QuotedTableName(ne, d)\n\t\tfor _, foreignKey := range handler.Destination.ForeignKeys {\n\t\t\tjoinConditions = append(joinConditions, fmt.Sprintf(\"%v.%v = %v.%v\",\n\t\t\t\tquotedTableName,\n\t\t\t\tQuote(ne, foreignKey.DBName),\n\t\t\t\tdestinationTableName,\n\t\t\t\tQuote(ne, foreignKey.AssociationDBName)))\n\t\t}\n\n\t\tvar foreignDBNames []string\n\t\tvar foreignFieldNames []string\n\t\tfor _, foreignKey := range handler.Source.ForeignKeys {\n\t\t\tforeignDBNames = append(foreignDBNames, foreignKey.DBName)\n\t\t\tif field, ok := FieldByName(ne, source, foreignKey.AssociationDBName); ok == nil {\n\t\t\t\tforeignFieldNames = append(foreignFieldNames, field.Name)\n\t\t\t}\n\t\t}\n\n\t\tforeignFieldValues := util.ColumnAsArray(foreignFieldNames, ne.Scope.ValueOf())\n\n\t\tvar condString string\n\t\tif len(foreignFieldValues) > 0 {\n\t\t\tvar quotedForeignDBNames []string\n\t\t\tfor _, dbName := range foreignDBNames {\n\t\t\t\tquotedForeignDBNames = append(quotedForeignDBNames, tableName+\".\"+dbName)\n\t\t\t}\n\n\t\t\tcondString = fmt.Sprintf(\"%v IN (%v)\",\n\t\t\t\tToQueryCondition(ne, quotedForeignDBNames),\n\t\t\t\tutil.ToQueryMarks(foreignFieldValues))\n\t\t} else {\n\t\t\tcondString = fmt.Sprintf(\"1 <> 1\")\n\t\t}\n\n\t\tsearch.Join(ne,\n\t\t\tfmt.Sprintf(\"INNER JOIN %v ON %v\",\n\t\t\t\tquotedTableName,\n\t\t\t\tstrings.Join(joinConditions, \" AND \")))\n\t\tsearch.Where(ne, condString, util.ToQueryValues(foreignFieldValues)...)\n\t\treturn nil\n\t}\n\treturn errors.New(\"wrong source type for join table handler\")\n}", "func JoinWithQL(handler *model.JoinTableHandler, ne *engine.Engine, source interface{}) error {\n\tne.Scope.ContextValue(source)\n\ttableName := handler.TableName\n\tquotedTableName := Quote(ne, tableName)\n\tvar joinConditions []string\n\tm, err := GetModelStruct(ne, source)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif handler.Source.ModelType == m.ModelType {\n\t\tne.Search.TableNames = append(ne.Search.TableNames, handler.TableName)\n\t\td := reflect.New(handler.Destination.ModelType).Interface()\n\t\tdestinationTableName := QuotedTableName(ne, d)\n\t\tfor _, foreignKey := range handler.Destination.ForeignKeys {\n\t\t\tjoinConditions = append(joinConditions, fmt.Sprintf(\"%v.%v = %v.%v\",\n\t\t\t\tquotedTableName,\n\t\t\t\tQuote(ne, foreignKey.DBName),\n\t\t\t\tdestinationTableName,\n\t\t\t\tQuote(ne, foreignKey.AssociationDBName)))\n\t\t}\n\n\t\tvar foreignDBNames []string\n\t\tvar foreignFieldNames []string\n\t\tfor _, foreignKey := range handler.Source.ForeignKeys {\n\t\t\tforeignDBNames = append(foreignDBNames, foreignKey.DBName)\n\t\t\tif field, ok := FieldByName(ne, source, foreignKey.AssociationDBName); ok == nil {\n\t\t\t\tforeignFieldNames = append(foreignFieldNames, field.Name)\n\t\t\t}\n\t\t}\n\n\t\tforeignFieldValues := util.ColumnAsArray(foreignFieldNames, ne.Scope.ValueOf())\n\n\t\tif len(foreignFieldValues) > 0 {\n\t\t\tvar quotedForeignDBNames []string\n\t\t\tfor _, dbName := range foreignDBNames {\n\t\t\t\tquotedForeignDBNames = append(quotedForeignDBNames, tableName+\".\"+dbName)\n\t\t\t}\n\t\t\tfor _, q := range quotedForeignDBNames {\n\t\t\t\tjoinConditions = append(joinConditions, fmt.Sprintf(\"%s=?\", q))\n\t\t\t}\n\t\t}\n\t\tsearch.Where(ne, strings.Join(joinConditions, \" AND \"), util.ToQueryValues(foreignFieldValues)...)\n\t\treturn nil\n\t}\n\treturn errors.New(\"wrong source type for join table handler\")\n}", "func NewLeftJoinOn(table string, from string, to string, filter ...FilterQuery) JoinQuery {\n\treturn NewJoinWith(\"LEFT JOIN\", table, from, to, filter...)\n}", "func (sd *SelectDataset) Join(table exp.Expression, condition exp.JoinCondition) *SelectDataset {\n\treturn sd.InnerJoin(table, condition)\n}", "func (w *Wrapper) InnerJoin(table interface{}, condition string) *Wrapper {\n\tw.saveJoin(table, \"INNER JOIN\", condition)\n\treturn w\n}", "func (w *Wrapper) JoinWhere(table interface{}, args ...interface{}) *Wrapper {\n\tw.saveJoinCondition(\"AND\", table, args...)\n\treturn w\n}", "func (session *Session) Join(joinOperator string, tablename interface{}, condition string, args ...interface{}) *Session {\n\tsession.Statement.Join(joinOperator, tablename, condition, args...)\n\treturn session\n}", "func InnerJoin(clause string, args ...interface{}) QueryMod {\n\treturn func(q *queries.Query) {\n\t\tqueries.AppendInnerJoin(q, clause, args...)\n\t}\n}", "func NewRightJoinOn(table string, from string, to string) JoinQuery {\n\treturn NewJoinWith(\"RIGHT JOIN\", table, from, to)\n}", "func (ds *MySQL) Join(source, key, targetKey, joinType string, fields []string) {\n\tds.joinedRepositories[source] = builders.Join{\n\t\tSource: source,\n\t\tKey: key,\n\t\tTargetKey: targetKey,\n\t\tType: joinType,\n\t\tFields: fields,\n\t}\n}", "func (this *Dao) Join (joinType string, joinTable string, joinOn string) *Dao {\n\tjoin := fmt.Sprintf(\"%s JOIN %s ON %s\", strings.ToUpper(joinType), _table(joinTable), joinOn)\n\n\tthis.queryJoins = append(this.queryJoins, join)\n\treturn this\n}", "func NewRightJoinOn(table string, from string, to string, filter ...FilterQuery) JoinQuery {\n\treturn NewJoinWith(\"RIGHT JOIN\", table, from, to, filter...)\n}", "func (b *Builder) Join(joinType, joinTable string, joinCond interface{}) *Builder {\r\n\tswitch joinCond.(type) {\r\n\tcase Cond:\r\n\t\tb.joins = append(b.joins, join{joinType, joinTable, joinCond.(Cond)})\r\n\tcase string:\r\n\t\tb.joins = append(b.joins, join{joinType, joinTable, Expr(joinCond.(string))})\r\n\t}\r\n\r\n\treturn b\r\n}", "func (session *Session) Join(joinOperator string, tablename interface{}, condition string, args ...interface{}) *Session {\n\tsession.Session = session.Session.Join(joinOperator, tablename, condition, args...)\n\treturn session\n}", "func (stmt *statement) LeftJoin(table, on string) Statement {\n\tstmt.join(\"LEFT JOIN \", table, on)\n\treturn stmt\n}", "func (b *Builder) InnerJoin(joinTable string, joinCond interface{}) *Builder {\r\n\treturn b.Join(\"INNER\", joinTable, joinCond)\r\n}", "func InnerJoin(tables []*DataTable, on []JoinOn) (*DataTable, error) {\n\treturn newJoinImpl(innerJoin, tables, on).Compute()\n}", "func (b *Builder) Join(joinType string, joinTable, joinCond interface{}) *Builder {\n\tswitch joinCond.(type) {\n\tcase Cond:\n\t\tb.joins = append(b.joins, join{joinType, joinTable, joinCond.(Cond)})\n\tcase string:\n\t\tb.joins = append(b.joins, join{joinType, joinTable, Expr(joinCond.(string))})\n\t}\n\n\treturn b\n}", "func (b CreateIndexBuilder) On(table string) CreateIndexBuilder {\n\treturn builder.Set(b, \"Table\", table).(CreateIndexBuilder)\n}", "func (b *Builder) InnerJoin(joinTable, joinCond interface{}) *Builder {\n\treturn b.Join(\"INNER\", joinTable, joinCond)\n}", "func (self Accessor) InnerJoin(expr interface{}) *SelectManager {\n\treturn self.From(self.Relation()).InnerJoin(expr)\n}", "func LeftJoin(table, on string) QueryOption {\n\treturn newFuncQueryOption(func(wrapper *QueryWrapper) {\n\t\twrapper.joins = append(wrapper.joins, \"LEFT\", \"JOIN\", table, \"ON\", on)\n\t\twrapper.queryLen += 5\n\t})\n}", "func (o *EnsemblingJobSource) SetJoinOn(v []string) {\n\to.JoinOn = v\n}", "func (b *JoinBuilder) Using(cols ...interface{}) *JoinBuilder {\n\tvar vals Columns\n\tfor _, c := range cols {\n\t\tvar name string\n\t\tswitch t := c.(type) {\n\t\tcase string:\n\t\t\tname = t\n\t\tcase ValExprBuilder:\n\t\t\tif n, ok := t.ValExpr.(*ColName); ok {\n\t\t\t\tname = n.Name\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t\tvar v ValExpr\n\t\tif len(name) == 0 {\n\t\t\tv = makeErrVal(\"unsupported type %T: %v\", c, c)\n\t\t} else if b.leftTable.column(name) == nil ||\n\t\t\tb.rightTable.column(name) == nil {\n\t\t\tv = makeErrVal(\"invalid join column: %s\", name)\n\t\t} else {\n\t\t\tv = &ColName{Name: name}\n\t\t}\n\t\tvals = append(vals, &NonStarExpr{Expr: v})\n\t}\n\tb.Cond = &UsingJoinCond{Cols: vals}\n\treturn b\n}", "func (stmt *statement) FullJoin(table, on string) Statement {\n\tstmt.join(\"FULL JOIN \", table, on)\n\treturn stmt\n}", "func (c *Condition) On(stmt string, args ...interface{}) *Condition {\n\tc.Reset(ON)\n\tc.conditionHelper(ON, stmt, args)\n\treturn c\n}", "func (t *TableExpr) JoinSQL() string {\n\tif t.JoinConditions != \"\" && t.JoinType != \"\" {\n\t\treturn \" \" + t.JoinType + \" \" + t.Table.SQL() + \" on \" +\n\t\t\tt.JoinConditions\n\t}\n\treturn \", \" + t.Table.SQL()\n}", "func (s *BasePlSqlParserListener) EnterJoin_using_part(ctx *Join_using_partContext) {}", "func ExampleTable() {\n\tuser := q.T(\"user\", \"usr\")\n\tpost := q.T(\"post\", \"pst\")\n\t// user.id -> post.user_id\n\tuser.InnerJoin(post, q.Eq(user.C(\"id\"), post.C(\"user_id\")))\n\tfmt.Println(\"Short:\", user)\n\n\tpostTag := q.T(\"posttag\", \"rel\")\n\ttag := q.T(\"tag\", \"tg\")\n\t// post.id -> posttag.post_id\n\tpost.InnerJoin(postTag, q.Eq(post.C(\"id\"), postTag.C(\"post_id\")))\n\t// posttag.tag_id -> tag.id\n\tpostTag.InnerJoin(tag, q.Eq(postTag.C(\"tag_id\"), tag.C(\"id\")))\n\tfmt.Println(\"Long: \", user)\n\t// Output:\n\t// Short: \"user\" AS \"usr\" INNER JOIN \"post\" AS \"pst\" ON \"usr\".\"id\" = \"pst\".\"user_id\" []\n\t// Long: \"user\" AS \"usr\" INNER JOIN (\"post\" AS \"pst\" INNER JOIN (\"posttag\" AS \"rel\" INNER JOIN \"tag\" AS \"tg\" ON \"rel\".\"tag_id\" = \"tg\".\"id\") ON \"pst\".\"id\" = \"rel\".\"post_id\") ON \"usr\".\"id\" = \"pst\".\"user_id\" []\n}", "func (left *DataTable) InnerJoin(right *DataTable, on []JoinOn) (*DataTable, error) {\n\treturn newJoinImpl(innerJoin, []*DataTable{left, right}, on).Compute()\n}", "func NewUsingJoin(left, right sql.Node, op JoinType, cols []string) *JoinNode {\n\treturn &JoinNode{\n\t\tOp: op,\n\t\tBinaryNode: BinaryNode{left: left, right: right},\n\t\tUsingCols: cols,\n\t}\n}", "func (w *Wrapper) LeftJoin(table interface{}, condition string) *Wrapper {\n\tw.saveJoin(table, \"LEFT JOIN\", condition)\n\treturn w\n}", "func (sd *SelectDataset) InnerJoin(table exp.Expression, condition exp.JoinCondition) *SelectDataset {\n\treturn sd.joinTable(exp.NewConditionedJoinExpression(exp.InnerJoinType, table, condition))\n}", "func (b *JoinBuilder) InnerJoin(other *Table) *JoinBuilder {\n\treturn makeJoinBuilder(\"INNER JOIN\", b, other)\n}", "func (h *joinPlanningHelper) remapOnExpr(\n\tplanCtx *PlanningCtx, onCond tree.TypedExpr,\n) (execinfrapb.Expression, error) {\n\tif onCond == nil {\n\t\treturn execinfrapb.Expression{}, nil\n\t}\n\n\tjoinColMap := make([]int, h.numLeftOutCols+h.numRightOutCols)\n\tidx := 0\n\tleftCols := 0\n\tfor i := 0; i < h.numLeftOutCols; i++ {\n\t\tjoinColMap[idx] = h.leftPlanToStreamColMap[i]\n\t\tif h.leftPlanToStreamColMap[i] != -1 {\n\t\t\tleftCols++\n\t\t}\n\t\tidx++\n\t}\n\tfor i := 0; i < h.numRightOutCols; i++ {\n\t\tjoinColMap[idx] = leftCols + h.rightPlanToStreamColMap[i]\n\t\tidx++\n\t}\n\n\treturn physicalplan.MakeExpression(onCond, planCtx, joinColMap)\n}", "func (m Method) On(on string) *jen.Statement {\n\treturn jen.Id(on).Dot(m.function.name)\n}", "func (t *Table) LeftJoin(offset int32, count int, crit string, target interface{}) error {\n\tbody, err := t.LeftJoinRaw(offset, count, crit)\n\tif err == nil {\n\t\terr = json.Unmarshal(body, &target)\n\t}\n\treturn err\n}", "func (s *BasePlSqlParserListener) EnterJoin_on_part(ctx *Join_on_partContext) {}", "func buildJoin(joins []Join, baseTable string) string {\n\tif len(joins) == 0 {\n\t\treturn \"\"\n\t}\n\n\tjoin := \"\"\n\tfor _, j := range joins {\n\t\tjoin += fmt.Sprintf(\n\t\t\t\" JOIN %s ON (%s.%s %s %s.%s)\",\n\t\t\tquote(j.table),\n\t\t\tquote(baseTable),\n\t\t\tquote(j.on.field),\n\t\t\tstring(j.on.comparison),\n\t\t\tquote(j.table),\n\t\t\tquote(j.on.value.(string)),\n\t\t)\n\t}\n\treturn join\n}", "func (b *QueryBuilder) Join(n NodeI, condition NodeI) {\n\t// Possible TBD: If we ever want to support joining the same tables multiple\n\t// times with different conditions, we could use an alias to name each join. We would\n\t// then need to create an Alias node to specify which join is meant in different clauses.\n\n\tif b.Joins != nil {\n\t\tif !NodeIsReferenceI(n) {\n\t\t\tpanic(\"you can only join Reference, ReverseReference and ManyManyReference nodes\")\n\t\t}\n\n\t\tif NodeTableName(RootNode(n)) != NodeTableName(b.Joins[0]) {\n\t\t\tpanic(\"you can only join nodes starting from the same table as the root node. This node must start from \" + NodeTableName(b.Joins[0]))\n\t\t}\n\t}\n\n\tNodeSetCondition(n, condition)\n\tb.Joins = append(b.Joins, n)\n}", "func (w *Wrapper) NaturalJoin(table interface{}, condition string) *Wrapper {\n\tw.saveJoin(table, \"NATURAL JOIN\", condition)\n\treturn w\n}", "func FullJoin(table, on string) QueryOption {\n\treturn newFuncQueryOption(func(wrapper *QueryWrapper) {\n\t\twrapper.joins = append(wrapper.joins, \"FULL\", \"JOIN\", table, \"ON\", on)\n\t\twrapper.queryLen += 5\n\t})\n}", "func (t *ToyBrick) Join(fv FieldSelection) *ToyBrick {\n\treturn t.Scope(func(t *ToyBrick) *ToyBrick {\n\t\tif t.alias == \"\" {\n\t\t\tt = t.Alias(\"m\")\n\t\t}\n\t\tfield := t.Model.fieldSelect(fv)\n\n\t\tif join := t.JoinMap[field.Name()]; join != nil {\n\t\t\tnewt := *t\n\t\t\tcurrentJoinSwap := joinSwap(t.SwapMap[field.Name()], &newt)\n\t\t\tnewt.Model = join.SubModel\n\t\t\tnewt.preSwap = &PreJoinSwap{currentJoinSwap, t.preSwap, t.Model, field}\n\t\t\treturn &newt\n\t\t} else if join := t.Toy.Join(t.Model, field); join != nil {\n\t\t\tnewt := *t\n\t\t\tnewt.Model = join.SubModel\n\t\t\tswap := NewJoinSwap(fmt.Sprintf(\"%s_%d\", t.alias, len(t.JoinMap)))\n\t\t\tcurrentJoinSwap := joinSwap(swap, &newt)\n\n\t\t\t// add field to pre swap\n\t\t\tcurrentJoinSwap.JoinMap = t.CopyJoin()\n\t\t\tcurrentJoinSwap.JoinMap[field.Name()] = join\n\t\t\tcurrentJoinSwap.SwapMap = t.CopyJoinSwap()\n\t\t\tcurrentJoinSwap.SwapMap[field.Name()] = swap\n\n\t\t\tnewt.preSwap = &PreJoinSwap{currentJoinSwap, t.preSwap, t.Model, field}\n\t\t\treturn &newt\n\t\t} else {\n\t\t\tpanic(ErrInvalidPreloadField{t.Model.ReflectType.Name(), field.Name()})\n\t\t}\n\n\t})\n}", "func (sd *SelectDataset) joinTable(join exp.JoinExpression) *SelectDataset {\n\treturn sd.copy(sd.clauses.JoinsAppend(join))\n}", "func (c *Command) OnJoin(user string) {\n}", "func OuterJoin(tables []*DataTable, on []JoinOn) (*DataTable, error) {\n\treturn newJoinImpl(outerJoin, tables, on).Compute()\n}", "func (self Accessor) OuterJoin(expr interface{}) *SelectManager {\n\treturn self.From(self.Relation()).OuterJoin(expr)\n}", "func JoinQuery(c *Context, sep string, values []interface{}) string {\n\ts := make([]interface{}, len(values)*2-1)\n\tfor k, v := range values {\n\t\tif k > 0 {\n\t\t\ts[k*2-1] = sep\n\t\t}\n\t\ts[k*2] = MakeField(v)\n\t}\n\n\treturn ConcatQuery(c, s...)\n}", "func (o *EnsemblingJobSource) GetJoinOn() []string {\n\tif o == nil {\n\t\tvar ret []string\n\t\treturn ret\n\t}\n\n\treturn o.JoinOn\n}", "func (b *Builder) CrossJoin(joinTable string, joinCond interface{}) *Builder {\r\n\treturn b.Join(\"CROSS\", joinTable, joinCond)\r\n}", "func (mySelf SQLJoin) Using(column SQLColumn) SQLJoin {\n\tmySelf.using = column\n\treturn mySelf\n}", "func NewJoinWith(mode string, table string, from string, to string) JoinQuery {\n\treturn JoinQuery{\n\t\tMode: mode,\n\t\tTable: table,\n\t\tFrom: from,\n\t\tTo: to,\n\t}\n}", "func (r *MysqlDatasource) JoinWithAuthors(transaction *gorm.DB, id int) ([]*BookAuthorJoinModel, error) {\n\tdb := r.db\n\tif transaction != nil {\n\t\tdb = transaction\n\t}\n\n\tresult := []*BookAuthorJoinModel{}\n\terr := db.Raw(`\n\tSELECT \n\t\tbook.*,\n\t\tauthor.id as author_id, \n\t\tauthor.name as author_name, \n\t\tauthor.birthdate as author_birthdate\n\tFROM books book\n\tINNER JOIN book_authors ba on book.id = ba.book_id \n\tINNER JOIN authors author on author.id = ba.author_id \n\twhere book.id = ?\n\t`, id).Find(&result).Error\n\tif err != nil {\n\t\treturn nil, err\n\t} else if len(result) == 0 {\n\t\treturn nil, gorm.ErrRecordNotFound\n\t}\n\n\treturn result, nil\n}", "func NewJoinWith(mode string, table string, from string, to string, filter ...FilterQuery) JoinQuery {\n\treturn JoinQuery{\n\t\tMode: mode,\n\t\tTable: table,\n\t\tFrom: from,\n\t\tTo: to,\n\t\tFilter: And(filter...),\n\t}\n}", "func LeftJoin(tables []*DataTable, on []JoinOn) (*DataTable, error) {\n\treturn newJoinImpl(leftJoin, tables, on).Compute()\n}", "func NewInnerJoin(table string, filter ...FilterQuery) JoinQuery {\n\treturn NewInnerJoinOn(table, \"\", \"\", filter...)\n}", "func SessionJoinSQL(prefix string) string {\n\tfields := []string{\"id\", \"created_at\", \"deleted_at\", \"user_id\"}\n\toutput := \"\"\n\n\tfor i, field := range fields {\n\t\tif i != 0 {\n\t\t\toutput += \", \"\n\t\t}\n\n\t\tfullName := fmt.Sprintf(\"%s.%s\", prefix, field)\n\t\toutput += fmt.Sprintf(\"%s \\\"%s\\\"\", fullName, fullName)\n\t}\n\n\treturn output\n}", "func NewInnerJoin(table string) JoinQuery {\n\treturn NewInnerJoinOn(table, \"\", \"\")\n}", "func (b *Builder) CrossJoin(joinTable, joinCond interface{}) *Builder {\n\treturn b.Join(\"CROSS\", joinTable, joinCond)\n}", "func EqualsJoinCondition(a, b JoinCondition) bool {\n\treturn EqualsExpr(a.On, b.On) &&\n\t\tEqualsColumns(a.Using, b.Using)\n}", "func (b *Builder) LeftJoin(joinTable string, joinCond interface{}) *Builder {\r\n\treturn b.Join(\"LEFT\", joinTable, joinCond)\r\n}", "func (b *Builder) LeftJoin(joinTable, joinCond interface{}) *Builder {\n\treturn b.Join(\"LEFT\", joinTable, joinCond)\n}", "func (b *JoinBuilder) LeftJoin(other *Table) *JoinBuilder {\n\treturn makeJoinBuilder(\"LEFT JOIN\", b, other)\n}", "func NewJoin(table string, filter ...FilterQuery) JoinQuery {\n\treturn NewJoinWith(\"JOIN\", table, \"\", \"\", filter...)\n}", "func (filter *JoinFilter) JoinClause(structMap TableAndColumnLocater, dialect gorp.Dialect, startBindIdx int) (string, []interface{}, error) {\n\tjoin := \" inner join \" + filter.QuotedJoinTable\n\ton, args, err := filter.AndFilter.Where(structMap, dialect, startBindIdx)\n\tif err != nil {\n\t\treturn \"\", nil, err\n\t}\n\tif on != \"\" {\n\t\tjoin += \" on \" + on\n\t}\n\treturn join, args, nil\n}", "func (left *DataTable) OuterJoin(right *DataTable, on []JoinOn) (*DataTable, error) {\n\treturn newJoinImpl(outerJoin, []*DataTable{left, right}, on).Compute()\n}", "func (b *Builder) FullJoin(joinTable string, joinCond interface{}) *Builder {\r\n\treturn b.Join(\"FULL\", joinTable, joinCond)\r\n}", "func (s *BaseMySqlParserListener) EnterInnerJoin(ctx *InnerJoinContext) {}", "func NewJoin(table string) JoinQuery {\n\treturn NewJoinWith(\"JOIN\", table, \"\", \"\")\n}", "func TestJoinTableSqlBuilder(t *testing.T) {\n\tmock := NewMockOptimizer(false)\n\n\t// should pass\n\tsqls := []string{\n\t\t\"SELECT N_NAME,N_REGIONKEY FROM NATION join REGION on NATION.N_REGIONKEY = REGION.R_REGIONKEY\",\n\t\t\"SELECT N_NAME, N_REGIONKEY FROM NATION join REGION on NATION.N_REGIONKEY = REGION.R_REGIONKEY WHERE NATION.N_REGIONKEY > 0\",\n\t\t\"SELECT N_NAME, NATION2.R_REGIONKEY FROM NATION2 join REGION using(R_REGIONKEY) WHERE NATION2.R_REGIONKEY > 0\",\n\t\t\"SELECT N_NAME, NATION2.R_REGIONKEY FROM NATION2 NATURAL JOIN REGION WHERE NATION2.R_REGIONKEY > 0\",\n\t\t\"SELECT N_NAME FROM NATION NATURAL JOIN REGION\", //have no same column name but it's ok\n\t\t\"SELECT N_NAME,N_REGIONKEY FROM NATION a join REGION b on a.N_REGIONKEY = b.R_REGIONKEY WHERE a.N_REGIONKEY > 0\", //test alias\n\t\t\"SELECT l.L_ORDERKEY a FROM CUSTOMER c, ORDERS o, LINEITEM l WHERE c.C_CUSTKEY = o.O_CUSTKEY and l.L_ORDERKEY = o.O_ORDERKEY and o.O_ORDERKEY < 10\", //join three tables\n\t\t\"SELECT c.* FROM CUSTOMER c, ORDERS o, LINEITEM l WHERE c.C_CUSTKEY = o.O_CUSTKEY and l.L_ORDERKEY = o.O_ORDERKEY\", //test star\n\t\t\"SELECT * FROM CUSTOMER c, ORDERS o, LINEITEM l WHERE c.C_CUSTKEY = o.O_CUSTKEY and l.L_ORDERKEY = o.O_ORDERKEY\", //test star\n\t\t\"SELECT a.* FROM NATION a join REGION b on a.N_REGIONKEY = b.R_REGIONKEY WHERE a.N_REGIONKEY > 0\", //test star\n\t\t\"SELECT * FROM NATION a join REGION b on a.N_REGIONKEY = b.R_REGIONKEY WHERE a.N_REGIONKEY > 0\",\n\t\t\"SELECT N_NAME, R_REGIONKEY FROM NATION2 join REGION using(R_REGIONKEY)\",\n\t\t\"select nation.n_name from nation join nation2 on nation.n_name !='a' join region on nation.n_regionkey = region.r_regionkey\",\n\t\t\"select * from nation, nation2, region\",\n\t}\n\trunTestShouldPass(mock, t, sqls, false, false)\n\n\t// should error\n\tsqls = []string{\n\t\t\"SELECT N_NAME,N_REGIONKEY FROM NATION join REGION on NATION.N_REGIONKEY = REGION.NotExistColumn\", //column not exist\n\t\t\"SELECT N_NAME, R_REGIONKEY FROM NATION join REGION using(R_REGIONKEY)\", //column not exist\n\t\t\"SELECT N_NAME,N_REGIONKEY FROM NATION a join REGION b on a.N_REGIONKEY = b.R_REGIONKEY WHERE aaaaa.N_REGIONKEY > 0\", //table alias not exist\n\t\t\"select *\", //No table used\n\t\t\"SELECT * FROM NATION a join REGION b on a.N_REGIONKEY = b.R_REGIONKEY WHERE a.N_REGIONKEY > 0 for update\", //Not support\n\t\t\"select * from nation, nation2, region for update\", // Not support\n\t}\n\trunTestShouldError(mock, t, sqls)\n}", "func Joined(v time.Time) predicate.User {\n\treturn predicate.User(func(s *sql.Selector) {\n\t\ts.Where(sql.EQ(s.C(FieldJoined), v))\n\t})\n}", "func (q Query) JoinT(inner Query,\n\touterKeySelectorFn interface{},\n\tinnerKeySelectorFn interface{},\n\tresultSelectorFn interface{}) Query {\n\touterKeySelectorGenericFunc, err := newGenericFunc(\n\t\t\"JoinT\", \"outerKeySelectorFn\", outerKeySelectorFn,\n\t\tsimpleParamValidator(newElemTypeSlice(new(genericType)), newElemTypeSlice(new(genericType))),\n\t)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\touterKeySelectorFunc := func(item interface{}) interface{} {\n\t\treturn outerKeySelectorGenericFunc.Call(item)\n\t}\n\n\tinnerKeySelectorFuncGenericFunc, err := newGenericFunc(\n\t\t\"JoinT\", \"innerKeySelectorFn\",\n\t\tinnerKeySelectorFn,\n\t\tsimpleParamValidator(newElemTypeSlice(new(genericType)), newElemTypeSlice(new(genericType))),\n\t)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\tinnerKeySelectorFunc := func(item interface{}) interface{} {\n\t\treturn innerKeySelectorFuncGenericFunc.Call(item)\n\t}\n\n\tresultSelectorGenericFunc, err := newGenericFunc(\n\t\t\"JoinT\", \"resultSelectorFn\", resultSelectorFn,\n\t\tsimpleParamValidator(newElemTypeSlice(new(genericType), new(genericType)), newElemTypeSlice(new(genericType))),\n\t)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\tresultSelectorFunc := func(outer interface{}, inner interface{}) interface{} {\n\t\treturn resultSelectorGenericFunc.Call(outer, inner)\n\t}\n\n\treturn q.Join(inner, outerKeySelectorFunc, innerKeySelectorFunc, resultSelectorFunc)\n}", "func (left *DataTable) LeftJoin(right *DataTable, on []JoinOn) (*DataTable, error) {\n\treturn newJoinImpl(leftJoin, []*DataTable{left, right}, on).Compute()\n}", "func (q Query) Join(inner Query,\n\touterKeySelector func(interface{}) interface{},\n\tinnerKeySelector func(interface{}) interface{},\n\tresultSelector func(outer interface{}, inner interface{}) interface{}) Query {\n\n\treturn Query{\n\t\tIterate: func() Iterator {\n\t\t\touternext := q.Iterate()\n\t\t\tinnernext := inner.Iterate()\n\n\t\t\tinnerLookup := make(map[interface{}][]interface{})\n\t\t\tfor innerItem, ok := innernext(); ok; innerItem, ok = innernext() {\n\t\t\t\tinnerKey := innerKeySelector(innerItem)\n\t\t\t\tinnerLookup[innerKey] = append(innerLookup[innerKey], innerItem)\n\t\t\t}\n\n\t\t\tvar outerItem interface{}\n\t\t\tvar innerGroup []interface{}\n\t\t\tinnerLen, innerIndex := 0, 0\n\n\t\t\treturn func() (item interface{}, ok bool) {\n\t\t\t\tif innerIndex >= innerLen {\n\t\t\t\t\thas := false\n\t\t\t\t\tfor !has {\n\t\t\t\t\t\touterItem, ok = outernext()\n\t\t\t\t\t\tif !ok {\n\t\t\t\t\t\t\treturn\n\t\t\t\t\t\t}\n\n\t\t\t\t\t\tinnerGroup, has = innerLookup[outerKeySelector(outerItem)]\n\t\t\t\t\t\tinnerLen = len(innerGroup)\n\t\t\t\t\t\tinnerIndex = 0\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t\titem = resultSelector(outerItem, innerGroup[innerIndex])\n\t\t\t\tinnerIndex++\n\t\t\t\treturn item, true\n\t\t\t}\n\t\t},\n\t}\n}", "func (sd *SelectDataset) LeftJoin(table exp.Expression, condition exp.JoinCondition) *SelectDataset {\n\treturn sd.joinTable(exp.NewConditionedJoinExpression(exp.LeftJoinType, table, condition))\n}", "func (s *Subscription) OnJoin(handler JoinHandler) {\n\tproxy := &subEventProxy{sub: s, onJoin: handler}\n\ts.sub.OnJoin(proxy)\n}", "func (w *Wrapper) buildJoin() (query string) {\n\tif len(w.joins) == 0 {\n\t\treturn\n\t}\n\n\tfor _, v := range w.joins {\n\t\t// The join type (ex: LEFT JOIN, RIGHT JOIN, INNER JOIN).\n\t\tquery += fmt.Sprintf(\"%s \", v.typ)\n\t\tswitch d := v.table.(type) {\n\t\t// Sub query.\n\t\tcase *Wrapper:\n\t\t\tquery += fmt.Sprintf(\"%s AS %s ON \", w.bindParam(d), d.alias)\n\t\t// Table name.\n\t\tcase string:\n\t\t\tquery += fmt.Sprintf(\"%s ON \", d)\n\t\t}\n\n\t\tif len(v.conditions) == 0 {\n\t\t\tquery += fmt.Sprintf(\"(%s) \", v.condition)\n\t\t} else {\n\t\t\tconditionsQuery := strings.TrimSpace(w.buildConditions(v.conditions))\n\t\t\tquery += fmt.Sprintf(\"(%s %s %s) \", v.condition, v.conditions[0].connector, conditionsQuery)\n\t\t}\n\t}\n\treturn\n}", "func (b *Builder) FullJoin(joinTable, joinCond interface{}) *Builder {\n\treturn b.Join(\"FULL\", joinTable, joinCond)\n}", "func EqualsRefOfJoinCondition(a, b *JoinCondition) bool {\n\tif a == b {\n\t\treturn true\n\t}\n\tif a == nil || b == nil {\n\t\treturn false\n\t}\n\treturn EqualsExpr(a.On, b.On) &&\n\t\tEqualsColumns(a.Using, b.Using)\n}", "func JoinedIn(vs ...time.Time) predicate.User {\n\tv := make([]interface{}, len(vs))\n\tfor i := range v {\n\t\tv[i] = vs[i]\n\t}\n\treturn predicate.User(func(s *sql.Selector) {\n\t\t// if not arguments were provided, append the FALSE constants,\n\t\t// since we can't apply \"IN ()\". This will make this predicate falsy.\n\t\tif len(v) == 0 {\n\t\t\ts.Where(sql.False())\n\t\t\treturn\n\t\t}\n\t\ts.Where(sql.In(s.C(FieldJoined), v...))\n\t})\n}", "func (w *Wrapper) JoinOrWhere(table interface{}, args ...interface{}) *Wrapper {\n\tw.saveJoinCondition(\"OR\", table, args...)\n\treturn w\n}", "func (sd *SelectDataset) NaturalJoin(table exp.Expression) *SelectDataset {\n\treturn sd.joinTable(exp.NewUnConditionedJoinExpression(exp.NaturalJoinType, table))\n}", "func NewJoinAssocWith(mode string, assoc string, filter ...FilterQuery) JoinQuery {\n\treturn JoinQuery{\n\t\tMode: mode,\n\t\tAssoc: assoc,\n\t\tFilter: And(filter...),\n\t}\n}", "func (s *BasePlSqlParserListener) EnterOuter_join_sign(ctx *Outer_join_signContext) {}", "func (f *predicateSqlizerFactory) addJoinsToSelectBuilder(q sq.SelectBuilder) sq.SelectBuilder {\n\tfor i, alias := range f.joinedTables {\n\t\taliasName := f.aliasName(alias.secondaryTable, i)\n\t\tjoinClause := fmt.Sprintf(\"%s AS %s ON %s = %s\",\n\t\t\tf.db.tableName(alias.secondaryTable), pq.QuoteIdentifier(aliasName),\n\t\t\tfullQuoteIdentifier(f.primaryTable, alias.primaryColumn),\n\t\t\tfullQuoteIdentifier(aliasName, alias.secondaryColumn))\n\t\tq = q.LeftJoin(joinClause)\n\t}\n\n\tif len(f.joinedTables) > 0 {\n\t\tq = q.Distinct()\n\t}\n\treturn q\n}", "func (w *Wrapper) saveJoin(table interface{}, typ string, condition string) {\n\tswitch v := table.(type) {\n\t// Sub query joining.\n\tcase *Wrapper:\n\t\tw.joins[v.query] = &join{\n\t\t\ttyp: typ,\n\t\t\ttable: table,\n\t\t\tcondition: condition,\n\t\t}\n\t// Common table joining.\n\tcase string:\n\t\tw.joins[v] = &join{\n\t\t\ttyp: typ,\n\t\t\ttable: table,\n\t\t\tcondition: condition,\n\t\t}\n\t}\n}", "func NewJoinAssoc(assoc string, filter ...FilterQuery) JoinQuery {\n\treturn NewJoinAssocWith(\"JOIN\", assoc, filter...)\n}", "func (sd *SelectDataset) FullJoin(table exp.Expression, condition exp.JoinCondition) *SelectDataset {\n\treturn sd.joinTable(exp.NewConditionedJoinExpression(exp.FullJoinType, table, condition))\n}", "func (s *Subscription) OnJoin(handler JoinHandler) {\n\ts.events.onJoin = handler\n}" ]
[ "0.68034065", "0.6787461", "0.6781993", "0.67451125", "0.6543356", "0.6212595", "0.61956203", "0.6015106", "0.59313536", "0.5923219", "0.5879308", "0.5878465", "0.5866631", "0.58613676", "0.5822869", "0.5781752", "0.5721144", "0.57195073", "0.5690846", "0.56610715", "0.56603783", "0.56377757", "0.56016254", "0.5522105", "0.550981", "0.54945564", "0.5467586", "0.54309946", "0.5409985", "0.53989327", "0.5332885", "0.53150666", "0.5291054", "0.52778196", "0.5264887", "0.522957", "0.5227238", "0.5207244", "0.51685166", "0.5166373", "0.51586676", "0.51536673", "0.5153615", "0.5150791", "0.515037", "0.5130692", "0.5120524", "0.5105237", "0.5097027", "0.50934106", "0.5052655", "0.50203264", "0.5011418", "0.5011353", "0.50031894", "0.49922368", "0.49712145", "0.4940476", "0.4935221", "0.49318436", "0.49117962", "0.48943338", "0.4882257", "0.48785216", "0.48709413", "0.48537037", "0.48456895", "0.48423332", "0.48406568", "0.48361892", "0.48323908", "0.48235595", "0.4802343", "0.47974244", "0.47953218", "0.47924224", "0.4782777", "0.47480702", "0.47449243", "0.47407427", "0.47394794", "0.47382188", "0.47374466", "0.47080284", "0.46813637", "0.467103", "0.46435505", "0.4631474", "0.46244892", "0.4606115", "0.4596709", "0.4593826", "0.45904493", "0.45871228", "0.45681673", "0.4566611", "0.4547262", "0.45463037", "0.45368978", "0.45050666" ]
0.61027235
7
Using creates a "join using" expression ie, as SQL, SELECT FROM A INNER JOIN B USING 'field'
func Using(fields ...string) []JoinOn { var jon []JoinOn for _, f := range fields { jon = append(jon, JoinOn{Table: "*", Field: f}) } return jon }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (mySelf SQLJoin) Using(column SQLColumn) SQLJoin {\n\tmySelf.using = column\n\treturn mySelf\n}", "func (b *JoinBuilder) Using(cols ...interface{}) *JoinBuilder {\n\tvar vals Columns\n\tfor _, c := range cols {\n\t\tvar name string\n\t\tswitch t := c.(type) {\n\t\tcase string:\n\t\t\tname = t\n\t\tcase ValExprBuilder:\n\t\t\tif n, ok := t.ValExpr.(*ColName); ok {\n\t\t\t\tname = n.Name\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t\tvar v ValExpr\n\t\tif len(name) == 0 {\n\t\t\tv = makeErrVal(\"unsupported type %T: %v\", c, c)\n\t\t} else if b.leftTable.column(name) == nil ||\n\t\t\tb.rightTable.column(name) == nil {\n\t\t\tv = makeErrVal(\"invalid join column: %s\", name)\n\t\t} else {\n\t\t\tv = &ColName{Name: name}\n\t\t}\n\t\tvals = append(vals, &NonStarExpr{Expr: v})\n\t}\n\tb.Cond = &UsingJoinCond{Cols: vals}\n\treturn b\n}", "func NewUsingJoin(left, right sql.Node, op JoinType, cols []string) *JoinNode {\n\treturn &JoinNode{\n\t\tOp: op,\n\t\tBinaryNode: BinaryNode{left: left, right: right},\n\t\tUsingCols: cols,\n\t}\n}", "func (tbl AssociationTable) Using(tx sqlapi.SqlTx) AssociationTabler {\n\ttbl.db = tx\n\treturn tbl\n}", "func (s *BasePlSqlParserListener) EnterJoin_using_part(ctx *Join_using_partContext) {}", "func (tbl DbCompoundTable) Using(tx *sql.Tx) DbCompoundTable {\n\ttbl.db = tx\n\treturn tbl\n}", "func (tbl RecordTable) Using(tx sqlapi.SqlTx) RecordTable {\n\ttbl.db = tx\n\treturn tbl\n}", "func JoinWithQL(handler *model.JoinTableHandler, ne *engine.Engine, source interface{}) error {\n\tne.Scope.ContextValue(source)\n\ttableName := handler.TableName\n\tquotedTableName := Quote(ne, tableName)\n\tvar joinConditions []string\n\tm, err := GetModelStruct(ne, source)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif handler.Source.ModelType == m.ModelType {\n\t\tne.Search.TableNames = append(ne.Search.TableNames, handler.TableName)\n\t\td := reflect.New(handler.Destination.ModelType).Interface()\n\t\tdestinationTableName := QuotedTableName(ne, d)\n\t\tfor _, foreignKey := range handler.Destination.ForeignKeys {\n\t\t\tjoinConditions = append(joinConditions, fmt.Sprintf(\"%v.%v = %v.%v\",\n\t\t\t\tquotedTableName,\n\t\t\t\tQuote(ne, foreignKey.DBName),\n\t\t\t\tdestinationTableName,\n\t\t\t\tQuote(ne, foreignKey.AssociationDBName)))\n\t\t}\n\n\t\tvar foreignDBNames []string\n\t\tvar foreignFieldNames []string\n\t\tfor _, foreignKey := range handler.Source.ForeignKeys {\n\t\t\tforeignDBNames = append(foreignDBNames, foreignKey.DBName)\n\t\t\tif field, ok := FieldByName(ne, source, foreignKey.AssociationDBName); ok == nil {\n\t\t\t\tforeignFieldNames = append(foreignFieldNames, field.Name)\n\t\t\t}\n\t\t}\n\n\t\tforeignFieldValues := util.ColumnAsArray(foreignFieldNames, ne.Scope.ValueOf())\n\n\t\tif len(foreignFieldValues) > 0 {\n\t\t\tvar quotedForeignDBNames []string\n\t\t\tfor _, dbName := range foreignDBNames {\n\t\t\t\tquotedForeignDBNames = append(quotedForeignDBNames, tableName+\".\"+dbName)\n\t\t\t}\n\t\t\tfor _, q := range quotedForeignDBNames {\n\t\t\t\tjoinConditions = append(joinConditions, fmt.Sprintf(\"%s=?\", q))\n\t\t\t}\n\t\t}\n\t\tsearch.Where(ne, strings.Join(joinConditions, \" AND \"), util.ToQueryValues(foreignFieldValues)...)\n\t\treturn nil\n\t}\n\treturn errors.New(\"wrong source type for join table handler\")\n}", "func JoinWith(handler *model.JoinTableHandler, ne *engine.Engine, source interface{}) error {\n\tne.Scope.ContextValue(source)\n\ttableName := handler.TableName\n\tquotedTableName := Quote(ne, tableName)\n\tvar joinConditions []string\n\tm, err := GetModelStruct(ne, source)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif handler.Source.ModelType == m.ModelType {\n\t\td := reflect.New(handler.Destination.ModelType).Interface()\n\t\tdestinationTableName := QuotedTableName(ne, d)\n\t\tfor _, foreignKey := range handler.Destination.ForeignKeys {\n\t\t\tjoinConditions = append(joinConditions, fmt.Sprintf(\"%v.%v = %v.%v\",\n\t\t\t\tquotedTableName,\n\t\t\t\tQuote(ne, foreignKey.DBName),\n\t\t\t\tdestinationTableName,\n\t\t\t\tQuote(ne, foreignKey.AssociationDBName)))\n\t\t}\n\n\t\tvar foreignDBNames []string\n\t\tvar foreignFieldNames []string\n\t\tfor _, foreignKey := range handler.Source.ForeignKeys {\n\t\t\tforeignDBNames = append(foreignDBNames, foreignKey.DBName)\n\t\t\tif field, ok := FieldByName(ne, source, foreignKey.AssociationDBName); ok == nil {\n\t\t\t\tforeignFieldNames = append(foreignFieldNames, field.Name)\n\t\t\t}\n\t\t}\n\n\t\tforeignFieldValues := util.ColumnAsArray(foreignFieldNames, ne.Scope.ValueOf())\n\n\t\tvar condString string\n\t\tif len(foreignFieldValues) > 0 {\n\t\t\tvar quotedForeignDBNames []string\n\t\t\tfor _, dbName := range foreignDBNames {\n\t\t\t\tquotedForeignDBNames = append(quotedForeignDBNames, tableName+\".\"+dbName)\n\t\t\t}\n\n\t\t\tcondString = fmt.Sprintf(\"%v IN (%v)\",\n\t\t\t\tToQueryCondition(ne, quotedForeignDBNames),\n\t\t\t\tutil.ToQueryMarks(foreignFieldValues))\n\t\t} else {\n\t\t\tcondString = fmt.Sprintf(\"1 <> 1\")\n\t\t}\n\n\t\tsearch.Join(ne,\n\t\t\tfmt.Sprintf(\"INNER JOIN %v ON %v\",\n\t\t\t\tquotedTableName,\n\t\t\t\tstrings.Join(joinConditions, \" AND \")))\n\t\tsearch.Where(ne, condString, util.ToQueryValues(foreignFieldValues)...)\n\t\treturn nil\n\t}\n\treturn errors.New(\"wrong source type for join table handler\")\n}", "func (session *Session) Join(joinOperator string, tablename interface{}, condition string, args ...interface{}) *Session {\n\tsession.Statement.Join(joinOperator, tablename, condition, args...)\n\treturn session\n}", "func (stmt *statement) Join(table, on string) Statement {\n\tstmt.join(\"JOIN \", table, on)\n\treturn stmt\n}", "func (t *ToyBrick) Join(fv FieldSelection) *ToyBrick {\n\treturn t.Scope(func(t *ToyBrick) *ToyBrick {\n\t\tif t.alias == \"\" {\n\t\t\tt = t.Alias(\"m\")\n\t\t}\n\t\tfield := t.Model.fieldSelect(fv)\n\n\t\tif join := t.JoinMap[field.Name()]; join != nil {\n\t\t\tnewt := *t\n\t\t\tcurrentJoinSwap := joinSwap(t.SwapMap[field.Name()], &newt)\n\t\t\tnewt.Model = join.SubModel\n\t\t\tnewt.preSwap = &PreJoinSwap{currentJoinSwap, t.preSwap, t.Model, field}\n\t\t\treturn &newt\n\t\t} else if join := t.Toy.Join(t.Model, field); join != nil {\n\t\t\tnewt := *t\n\t\t\tnewt.Model = join.SubModel\n\t\t\tswap := NewJoinSwap(fmt.Sprintf(\"%s_%d\", t.alias, len(t.JoinMap)))\n\t\t\tcurrentJoinSwap := joinSwap(swap, &newt)\n\n\t\t\t// add field to pre swap\n\t\t\tcurrentJoinSwap.JoinMap = t.CopyJoin()\n\t\t\tcurrentJoinSwap.JoinMap[field.Name()] = join\n\t\t\tcurrentJoinSwap.SwapMap = t.CopyJoinSwap()\n\t\t\tcurrentJoinSwap.SwapMap[field.Name()] = swap\n\n\t\t\tnewt.preSwap = &PreJoinSwap{currentJoinSwap, t.preSwap, t.Model, field}\n\t\t\treturn &newt\n\t\t} else {\n\t\t\tpanic(ErrInvalidPreloadField{t.Model.ReflectType.Name(), field.Name()})\n\t\t}\n\n\t})\n}", "func (session *Session) Join(joinOperator string, tablename interface{}, condition string, args ...interface{}) *Session {\n\tsession.Session = session.Session.Join(joinOperator, tablename, condition, args...)\n\treturn session\n}", "func TestJoinTableSqlBuilder(t *testing.T) {\n\tmock := NewMockOptimizer(false)\n\n\t// should pass\n\tsqls := []string{\n\t\t\"SELECT N_NAME,N_REGIONKEY FROM NATION join REGION on NATION.N_REGIONKEY = REGION.R_REGIONKEY\",\n\t\t\"SELECT N_NAME, N_REGIONKEY FROM NATION join REGION on NATION.N_REGIONKEY = REGION.R_REGIONKEY WHERE NATION.N_REGIONKEY > 0\",\n\t\t\"SELECT N_NAME, NATION2.R_REGIONKEY FROM NATION2 join REGION using(R_REGIONKEY) WHERE NATION2.R_REGIONKEY > 0\",\n\t\t\"SELECT N_NAME, NATION2.R_REGIONKEY FROM NATION2 NATURAL JOIN REGION WHERE NATION2.R_REGIONKEY > 0\",\n\t\t\"SELECT N_NAME FROM NATION NATURAL JOIN REGION\", //have no same column name but it's ok\n\t\t\"SELECT N_NAME,N_REGIONKEY FROM NATION a join REGION b on a.N_REGIONKEY = b.R_REGIONKEY WHERE a.N_REGIONKEY > 0\", //test alias\n\t\t\"SELECT l.L_ORDERKEY a FROM CUSTOMER c, ORDERS o, LINEITEM l WHERE c.C_CUSTKEY = o.O_CUSTKEY and l.L_ORDERKEY = o.O_ORDERKEY and o.O_ORDERKEY < 10\", //join three tables\n\t\t\"SELECT c.* FROM CUSTOMER c, ORDERS o, LINEITEM l WHERE c.C_CUSTKEY = o.O_CUSTKEY and l.L_ORDERKEY = o.O_ORDERKEY\", //test star\n\t\t\"SELECT * FROM CUSTOMER c, ORDERS o, LINEITEM l WHERE c.C_CUSTKEY = o.O_CUSTKEY and l.L_ORDERKEY = o.O_ORDERKEY\", //test star\n\t\t\"SELECT a.* FROM NATION a join REGION b on a.N_REGIONKEY = b.R_REGIONKEY WHERE a.N_REGIONKEY > 0\", //test star\n\t\t\"SELECT * FROM NATION a join REGION b on a.N_REGIONKEY = b.R_REGIONKEY WHERE a.N_REGIONKEY > 0\",\n\t\t\"SELECT N_NAME, R_REGIONKEY FROM NATION2 join REGION using(R_REGIONKEY)\",\n\t\t\"select nation.n_name from nation join nation2 on nation.n_name !='a' join region on nation.n_regionkey = region.r_regionkey\",\n\t\t\"select * from nation, nation2, region\",\n\t}\n\trunTestShouldPass(mock, t, sqls, false, false)\n\n\t// should error\n\tsqls = []string{\n\t\t\"SELECT N_NAME,N_REGIONKEY FROM NATION join REGION on NATION.N_REGIONKEY = REGION.NotExistColumn\", //column not exist\n\t\t\"SELECT N_NAME, R_REGIONKEY FROM NATION join REGION using(R_REGIONKEY)\", //column not exist\n\t\t\"SELECT N_NAME,N_REGIONKEY FROM NATION a join REGION b on a.N_REGIONKEY = b.R_REGIONKEY WHERE aaaaa.N_REGIONKEY > 0\", //table alias not exist\n\t\t\"select *\", //No table used\n\t\t\"SELECT * FROM NATION a join REGION b on a.N_REGIONKEY = b.R_REGIONKEY WHERE a.N_REGIONKEY > 0 for update\", //Not support\n\t\t\"select * from nation, nation2, region for update\", // Not support\n\t}\n\trunTestShouldError(mock, t, sqls)\n}", "func InnerJoin(table, on string) QueryOption {\n\treturn newFuncQueryOption(func(wrapper *QueryWrapper) {\n\t\twrapper.joins = append(wrapper.joins, \"INNER\", \"JOIN\", table, \"ON\", on)\n\t\twrapper.queryLen += 5\n\t})\n}", "func NewJoinWith(mode string, table string, from string, to string) JoinQuery {\n\treturn JoinQuery{\n\t\tMode: mode,\n\t\tTable: table,\n\t\tFrom: from,\n\t\tTo: to,\n\t}\n}", "func (m Manager) Using(conn sol.Conn) Manager {\n\tm.conn = conn\n\treturn m\n}", "func (sd *SelectDataset) Join(table exp.Expression, condition exp.JoinCondition) *SelectDataset {\n\treturn sd.InnerJoin(table, condition)\n}", "func (sd *SelectDataset) joinTable(join exp.JoinExpression) *SelectDataset {\n\treturn sd.copy(sd.clauses.JoinsAppend(join))\n}", "func (s *BasePlSqlParserListener) EnterUsing_clause(ctx *Using_clauseContext) {}", "func InnerJoin(clause string, args ...interface{}) QueryMod {\n\treturn func(q *queries.Query) {\n\t\tqueries.AppendInnerJoin(q, clause, args...)\n\t}\n}", "func (q Query) JoinT(inner Query,\n\touterKeySelectorFn interface{},\n\tinnerKeySelectorFn interface{},\n\tresultSelectorFn interface{}) Query {\n\touterKeySelectorGenericFunc, err := newGenericFunc(\n\t\t\"JoinT\", \"outerKeySelectorFn\", outerKeySelectorFn,\n\t\tsimpleParamValidator(newElemTypeSlice(new(genericType)), newElemTypeSlice(new(genericType))),\n\t)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\touterKeySelectorFunc := func(item interface{}) interface{} {\n\t\treturn outerKeySelectorGenericFunc.Call(item)\n\t}\n\n\tinnerKeySelectorFuncGenericFunc, err := newGenericFunc(\n\t\t\"JoinT\", \"innerKeySelectorFn\",\n\t\tinnerKeySelectorFn,\n\t\tsimpleParamValidator(newElemTypeSlice(new(genericType)), newElemTypeSlice(new(genericType))),\n\t)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\tinnerKeySelectorFunc := func(item interface{}) interface{} {\n\t\treturn innerKeySelectorFuncGenericFunc.Call(item)\n\t}\n\n\tresultSelectorGenericFunc, err := newGenericFunc(\n\t\t\"JoinT\", \"resultSelectorFn\", resultSelectorFn,\n\t\tsimpleParamValidator(newElemTypeSlice(new(genericType), new(genericType)), newElemTypeSlice(new(genericType))),\n\t)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\tresultSelectorFunc := func(outer interface{}, inner interface{}) interface{} {\n\t\treturn resultSelectorGenericFunc.Call(outer, inner)\n\t}\n\n\treturn q.Join(inner, outerKeySelectorFunc, innerKeySelectorFunc, resultSelectorFunc)\n}", "func (self Accessor) InnerJoin(expr interface{}) *SelectManager {\n\treturn self.From(self.Relation()).InnerJoin(expr)\n}", "func NewJoinWith(mode string, table string, from string, to string, filter ...FilterQuery) JoinQuery {\n\treturn JoinQuery{\n\t\tMode: mode,\n\t\tTable: table,\n\t\tFrom: from,\n\t\tTo: to,\n\t\tFilter: And(filter...),\n\t}\n}", "func ExampleTable() {\n\tuser := q.T(\"user\", \"usr\")\n\tpost := q.T(\"post\", \"pst\")\n\t// user.id -> post.user_id\n\tuser.InnerJoin(post, q.Eq(user.C(\"id\"), post.C(\"user_id\")))\n\tfmt.Println(\"Short:\", user)\n\n\tpostTag := q.T(\"posttag\", \"rel\")\n\ttag := q.T(\"tag\", \"tg\")\n\t// post.id -> posttag.post_id\n\tpost.InnerJoin(postTag, q.Eq(post.C(\"id\"), postTag.C(\"post_id\")))\n\t// posttag.tag_id -> tag.id\n\tpostTag.InnerJoin(tag, q.Eq(postTag.C(\"tag_id\"), tag.C(\"id\")))\n\tfmt.Println(\"Long: \", user)\n\t// Output:\n\t// Short: \"user\" AS \"usr\" INNER JOIN \"post\" AS \"pst\" ON \"usr\".\"id\" = \"pst\".\"user_id\" []\n\t// Long: \"user\" AS \"usr\" INNER JOIN (\"post\" AS \"pst\" INNER JOIN (\"posttag\" AS \"rel\" INNER JOIN \"tag\" AS \"tg\" ON \"rel\".\"tag_id\" = \"tg\".\"id\") ON \"pst\".\"id\" = \"rel\".\"post_id\") ON \"usr\".\"id\" = \"pst\".\"user_id\" []\n}", "func (stmt *statement) FullJoin(table, on string) Statement {\n\tstmt.join(\"FULL JOIN \", table, on)\n\treturn stmt\n}", "func (s *BasePlSqlParserListener) ExitJoin_using_part(ctx *Join_using_partContext) {}", "func (ds *MySQL) Join(source, key, targetKey, joinType string, fields []string) {\n\tds.joinedRepositories[source] = builders.Join{\n\t\tSource: source,\n\t\tKey: key,\n\t\tTargetKey: targetKey,\n\t\tType: joinType,\n\t\tFields: fields,\n\t}\n}", "func NewInnerJoinOn(table string, from string, to string) JoinQuery {\n\treturn NewJoinWith(\"INNER JOIN\", table, from, to)\n}", "func (filter *JoinFilter) JoinClause(structMap TableAndColumnLocater, dialect gorp.Dialect, startBindIdx int) (string, []interface{}, error) {\n\tjoin := \" inner join \" + filter.QuotedJoinTable\n\ton, args, err := filter.AndFilter.Where(structMap, dialect, startBindIdx)\n\tif err != nil {\n\t\treturn \"\", nil, err\n\t}\n\tif on != \"\" {\n\t\tjoin += \" on \" + on\n\t}\n\treturn join, args, nil\n}", "func (c *UsingConstraint) String() string {\n\tvar buf bytes.Buffer\n\tbuf.WriteString(\"USING (\")\n\tfor i, col := range c.Columns {\n\t\tif i != 0 {\n\t\t\tbuf.WriteString(\", \")\n\t\t}\n\t\tbuf.WriteString(col.String())\n\t}\n\tbuf.WriteString(\")\")\n\treturn buf.String()\n}", "func (t *TableExpr) JoinSQL() string {\n\tif t.JoinConditions != \"\" && t.JoinType != \"\" {\n\t\treturn \" \" + t.JoinType + \" \" + t.Table.SQL() + \" on \" +\n\t\t\tt.JoinConditions\n\t}\n\treturn \", \" + t.Table.SQL()\n}", "func NewJoinOn(table string, from string, to string) JoinQuery {\n\treturn NewJoinWith(\"JOIN\", table, from, to)\n}", "func (sb *SchemaBuilder) Use() string {\n\treturn fmt.Sprintf(`USE SCHEMA %v`, sb.QualifiedName())\n}", "func NewJoinOn(table string, from string, to string, filter ...FilterQuery) JoinQuery {\n\treturn NewJoinWith(\"JOIN\", table, from, to, filter...)\n}", "func NewInnerJoin(table string) JoinQuery {\n\treturn NewInnerJoinOn(table, \"\", \"\")\n}", "func (this *Dao) Join (joinType string, joinTable string, joinOn string) *Dao {\n\tjoin := fmt.Sprintf(\"%s JOIN %s ON %s\", strings.ToUpper(joinType), _table(joinTable), joinOn)\n\n\tthis.queryJoins = append(this.queryJoins, join)\n\treturn this\n}", "func NewInnerJoinOn(table string, from string, to string, filter ...FilterQuery) JoinQuery {\n\treturn NewJoinWith(\"INNER JOIN\", table, from, to, filter...)\n}", "func (b *Builder) Join(joinType, joinTable string, joinCond interface{}) *Builder {\r\n\tswitch joinCond.(type) {\r\n\tcase Cond:\r\n\t\tb.joins = append(b.joins, join{joinType, joinTable, joinCond.(Cond)})\r\n\tcase string:\r\n\t\tb.joins = append(b.joins, join{joinType, joinTable, Expr(joinCond.(string))})\r\n\t}\r\n\r\n\treturn b\r\n}", "func (w *Wrapper) InnerJoin(table interface{}, condition string) *Wrapper {\n\tw.saveJoin(table, \"INNER JOIN\", condition)\n\treturn w\n}", "func (b *Builder) InnerJoin(joinTable string, joinCond interface{}) *Builder {\r\n\treturn b.Join(\"INNER\", joinTable, joinCond)\r\n}", "func (self Accessor) OuterJoin(expr interface{}) *SelectManager {\n\treturn self.From(self.Relation()).OuterJoin(expr)\n}", "func (b *JoinBuilder) InnerJoin(other *Table) *JoinBuilder {\n\treturn makeJoinBuilder(\"INNER JOIN\", b, other)\n}", "func (q Query) Join(inner Query,\n\touterKeySelector func(interface{}) interface{},\n\tinnerKeySelector func(interface{}) interface{},\n\tresultSelector func(outer interface{}, inner interface{}) interface{}) Query {\n\n\treturn Query{\n\t\tIterate: func() Iterator {\n\t\t\touternext := q.Iterate()\n\t\t\tinnernext := inner.Iterate()\n\n\t\t\tinnerLookup := make(map[interface{}][]interface{})\n\t\t\tfor innerItem, ok := innernext(); ok; innerItem, ok = innernext() {\n\t\t\t\tinnerKey := innerKeySelector(innerItem)\n\t\t\t\tinnerLookup[innerKey] = append(innerLookup[innerKey], innerItem)\n\t\t\t}\n\n\t\t\tvar outerItem interface{}\n\t\t\tvar innerGroup []interface{}\n\t\t\tinnerLen, innerIndex := 0, 0\n\n\t\t\treturn func() (item interface{}, ok bool) {\n\t\t\t\tif innerIndex >= innerLen {\n\t\t\t\t\thas := false\n\t\t\t\t\tfor !has {\n\t\t\t\t\t\touterItem, ok = outernext()\n\t\t\t\t\t\tif !ok {\n\t\t\t\t\t\t\treturn\n\t\t\t\t\t\t}\n\n\t\t\t\t\t\tinnerGroup, has = innerLookup[outerKeySelector(outerItem)]\n\t\t\t\t\t\tinnerLen = len(innerGroup)\n\t\t\t\t\t\tinnerIndex = 0\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t\titem = resultSelector(outerItem, innerGroup[innerIndex])\n\t\t\t\tinnerIndex++\n\t\t\t\treturn item, true\n\t\t\t}\n\t\t},\n\t}\n}", "func (sd *SelectDataset) InnerJoin(table exp.Expression, condition exp.JoinCondition) *SelectDataset {\n\treturn sd.joinTable(exp.NewConditionedJoinExpression(exp.InnerJoinType, table, condition))\n}", "func SessionJoinSQL(prefix string) string {\n\tfields := []string{\"id\", \"created_at\", \"deleted_at\", \"user_id\"}\n\toutput := \"\"\n\n\tfor i, field := range fields {\n\t\tif i != 0 {\n\t\t\toutput += \", \"\n\t\t}\n\n\t\tfullName := fmt.Sprintf(\"%s.%s\", prefix, field)\n\t\toutput += fmt.Sprintf(\"%s \\\"%s\\\"\", fullName, fullName)\n\t}\n\n\treturn output\n}", "func NewJoin(table string) JoinQuery {\n\treturn NewJoinWith(\"JOIN\", table, \"\", \"\")\n}", "func buildJoin(joins []Join, baseTable string) string {\n\tif len(joins) == 0 {\n\t\treturn \"\"\n\t}\n\n\tjoin := \"\"\n\tfor _, j := range joins {\n\t\tjoin += fmt.Sprintf(\n\t\t\t\" JOIN %s ON (%s.%s %s %s.%s)\",\n\t\t\tquote(j.table),\n\t\t\tquote(baseTable),\n\t\t\tquote(j.on.field),\n\t\t\tstring(j.on.comparison),\n\t\t\tquote(j.table),\n\t\t\tquote(j.on.value.(string)),\n\t\t)\n\t}\n\treturn join\n}", "func (f *predicateSqlizerFactory) addJoinsToSelectBuilder(q sq.SelectBuilder) sq.SelectBuilder {\n\tfor i, alias := range f.joinedTables {\n\t\taliasName := f.aliasName(alias.secondaryTable, i)\n\t\tjoinClause := fmt.Sprintf(\"%s AS %s ON %s = %s\",\n\t\t\tf.db.tableName(alias.secondaryTable), pq.QuoteIdentifier(aliasName),\n\t\t\tfullQuoteIdentifier(f.primaryTable, alias.primaryColumn),\n\t\t\tfullQuoteIdentifier(aliasName, alias.secondaryColumn))\n\t\tq = q.LeftJoin(joinClause)\n\t}\n\n\tif len(f.joinedTables) > 0 {\n\t\tq = q.Distinct()\n\t}\n\treturn q\n}", "func (b *Builder) Join(joinType string, joinTable, joinCond interface{}) *Builder {\n\tswitch joinCond.(type) {\n\tcase Cond:\n\t\tb.joins = append(b.joins, join{joinType, joinTable, joinCond.(Cond)})\n\tcase string:\n\t\tb.joins = append(b.joins, join{joinType, joinTable, Expr(joinCond.(string))})\n\t}\n\n\treturn b\n}", "func InnerJoin(tables []*DataTable, on []JoinOn) (*DataTable, error) {\n\treturn newJoinImpl(innerJoin, tables, on).Compute()\n}", "func (b *Builder) InnerJoin(joinTable, joinCond interface{}) *Builder {\n\treturn b.Join(\"INNER\", joinTable, joinCond)\n}", "func NewJoin(table string, filter ...FilterQuery) JoinQuery {\n\treturn NewJoinWith(\"JOIN\", table, \"\", \"\", filter...)\n}", "func NewJoinAssocWith(mode string, assoc string, filter ...FilterQuery) JoinQuery {\n\treturn JoinQuery{\n\t\tMode: mode,\n\t\tAssoc: assoc,\n\t\tFilter: And(filter...),\n\t}\n}", "func NewInnerJoin(table string, filter ...FilterQuery) JoinQuery {\n\treturn NewInnerJoinOn(table, \"\", \"\", filter...)\n}", "func Using(lexer Lexer) Emitter {\n\treturn EmitterFunc(func(groups []string, _ Lexer) Iterator {\n\t\tit, err := lexer.Tokenise(&TokeniseOptions{State: \"root\", Nested: true}, groups[0])\n\t\tif err != nil {\n\t\t\tpanic(err)\n\t\t}\n\t\treturn it\n\t})\n}", "func NewLookupJoin(left, right sql.Node, cond sql.Expression) *JoinNode {\n\treturn NewJoin(left, right, JoinTypeLookup, cond)\n}", "func JoinTypeToUseCounter(op Operator) telemetry.Counter {\n\tswitch op {\n\tcase InnerJoinOp:\n\t\treturn sqltelemetry.JoinTypeInnerUseCounter\n\tcase LeftJoinOp, RightJoinOp:\n\t\treturn sqltelemetry.JoinTypeLeftUseCounter\n\tcase FullJoinOp:\n\t\treturn sqltelemetry.JoinTypeFullUseCounter\n\tcase SemiJoinOp:\n\t\treturn sqltelemetry.JoinTypeSemiUseCounter\n\tcase AntiJoinOp:\n\t\treturn sqltelemetry.JoinTypeAntiUseCounter\n\tdefault:\n\t\tpanic(errors.AssertionFailedf(\"unhandled join op %s\", op))\n\t}\n}", "func ELTMap2SelectSQL(nodeLink *NodeLinkInfo, outputName string) (string, error) {\n\t// TODO: will return SELECT\n\tvar b bytes.Buffer\n\twhereConds := make([]string, 0, 0)\n\n\tb.WriteString(\"SELECT \")\n\n\tinputs, _ := getInputTables(&nodeLink.Node)\n\toutput, _ := getOutputTable(&nodeLink.Node, outputName)\n\n\tvar firstcol = true\n\tfor _, col := range output.Columns {\n\t\tif !firstcol {\n\t\t\tb.WriteString(\", \")\n\t\t}\n\t\tfirstcol = false\n\t\tb.WriteString(strings.Trim(col.Expression, \" \"))\n\t\tb.WriteString(\" AS \")\n\t\tb.WriteString(TakeRightObj(col.Name))\n\t}\n\n\tb.WriteString(\" FROM \")\n\n\tvar firsttable = true\n\tfor _, input := range inputs {\n\n\t\tvar linkInput *NodeLinkInfo\n\t\tfor _, prevConn := range nodeLink.PrevConns {\n\t\t\tif prevConn.Label == input.TableName {\n\t\t\t\tlinkInput = prevConn.Link\n\t\t\t}\n\t\t}\n\t\tif linkInput == nil {\n\t\t\tcontinue\n\t\t}\n\n\t\tcomponentType := GetComponentType(&linkInput.Node)\n\t\tvar fromItem string\n\t\tswitch componentType {\n\t\tcase ComponentELTInput:\n\t\t\tfromItem, _ = tELTInput2FromItemSQL(linkInput)\n\t\tcase ComponentELTMap:\n\t\t\tfromItem, _ = ELTMap2SelectSQL(linkInput, input.TableName)\n\t\t\tfromItem = \"(\" + fromItem + \")\"\n\t\t}\n\t\talias := input.Alias\n\n\t\tif input.JoinType == \"NO_JOIN\" {\n\t\t\tif !firsttable {\n\t\t\t\tb.WriteRune(',')\n\t\t\t}\n\t\t\tb.WriteString(fromItem + \" \" + TakeRightObj(alias) + \" \")\n\t\t} else {\n\t\t\t// append `join`` phrase\n\t\t\tb.WriteString(joinType2join(input.JoinType) + \" \" + fromItem + \" \" + TakeRightObj(alias))\n\n\t\t\t// make `on` phrase\n\t\t\tb.WriteString(\" ON (\")\n\t\t\tfirstcol := true\n\t\t\tfor _, col := range input.Columns {\n\t\t\t\tif !col.Join {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\tif !firstcol {\n\t\t\t\t\tb.WriteString(\" AND \")\n\t\t\t\t}\n\t\t\t\tfirstcol = false\n\t\t\t\tb.WriteString(col2cond(alias, &col))\n\t\t\t}\n\t\t\tb.WriteString(\")\")\n\t\t}\n\t\t// collect `where` phrase\n\t\tfor _, col := range input.Columns {\n\t\t\tif col.Join {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif col.Operator == \"\" {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\twhereConds = append(whereConds, col2cond(alias, &col))\n\t\t}\n\n\t\tfirsttable = false\n\t}\n\n\twhereConds = append(whereConds, output.Filters...)\n\n\tif len(whereConds) > 0 {\n\t\tb.WriteString(\" WHERE \")\n\t\tb.WriteString(strings.Join(whereConds, \" AND \"))\n\t}\n\tif len(output.OtherFilters) > 0 {\n\t\tb.WriteRune(' ')\n\t\tb.WriteString(strings.Join(output.OtherFilters, \" \"))\n\t}\n\n\treturn b.String(), nil\n}", "func (ae aliasExpression) Table(table string) IdentifierExpression {\n\treturn ae.alias.Table(table)\n}", "func (w *Wrapper) JoinWhere(table interface{}, args ...interface{}) *Wrapper {\n\tw.saveJoinCondition(\"AND\", table, args...)\n\treturn w\n}", "func (mySelf SQLJoin) Outer() SQLJoin {\n\tmySelf.outer = true\n\treturn mySelf\n}", "func Alias(subquery *Query, name string) SQLProvider {\n\treturn &alias{subquery, name}\n}", "func NewJoinClause(table string) Clauser {\n\treturn &joinClause{table}\n}", "func FullJoin(table, on string) QueryOption {\n\treturn newFuncQueryOption(func(wrapper *QueryWrapper) {\n\t\twrapper.joins = append(wrapper.joins, \"FULL\", \"JOIN\", table, \"ON\", on)\n\t\twrapper.queryLen += 5\n\t})\n}", "func (sb SQLBuilder) UseTable(tableFunc func(table metadata.Table) TableSQLBuilder) SQLBuilder {\n\tsb.Table = tableFunc\n\treturn sb\n}", "func JoinQuery(c *Context, sep string, values []interface{}) string {\n\ts := make([]interface{}, len(values)*2-1)\n\tfor k, v := range values {\n\t\tif k > 0 {\n\t\t\ts[k*2-1] = sep\n\t\t}\n\t\ts[k*2] = MakeField(v)\n\t}\n\n\treturn ConcatQuery(c, s...)\n}", "func TestPlanner_Plan_Join(t *testing.T) {\n\tdb := NewDB(\"2000-01-01T12:00:00Z\")\n\tdb.WriteSeries(\"cpu.0\", map[string]string{}, \"2000-01-01T00:00:00Z\", map[string]interface{}{\"value\": float64(1)})\n\tdb.WriteSeries(\"cpu.0\", map[string]string{}, \"2000-01-01T00:00:10Z\", map[string]interface{}{\"value\": float64(2)})\n\tdb.WriteSeries(\"cpu.0\", map[string]string{}, \"2000-01-01T00:00:20Z\", map[string]interface{}{\"value\": float64(3)})\n\tdb.WriteSeries(\"cpu.0\", map[string]string{}, \"2000-01-01T00:00:30Z\", map[string]interface{}{\"value\": float64(4)})\n\n\tdb.WriteSeries(\"cpu.1\", map[string]string{}, \"2000-01-01T00:00:00Z\", map[string]interface{}{\"value\": float64(10)})\n\tdb.WriteSeries(\"cpu.1\", map[string]string{}, \"2000-01-01T00:00:10Z\", map[string]interface{}{\"value\": float64(20)})\n\tdb.WriteSeries(\"cpu.1\", map[string]string{}, \"2000-01-01T00:00:30Z\", map[string]interface{}{\"value\": float64(40)})\n\n\t// Query must join the series and sum the values.\n\trs := db.MustPlanAndExecute(`\n\t\tSELECT sum(cpu.0.value) + sum(cpu.1.value) AS sum\n\t\tFROM JOIN(cpu.0, cpu.1)\n\t\tWHERE time >= '2000-01-01 00:00:00' AND time < '2000-01-01 00:01:00'\n\t\tGROUP BY time(10s)`)\n\n\t// Expected resultset.\n\texp := minify(`[{\n\t\t\"columns\":[\"time\",\"sum\"],\n\t\t\"values\":[\n\t\t\t[946684800000000,11],\n\t\t\t[946684810000000,22],\n\t\t\t[946684820000000,3],\n\t\t\t[946684830000000,44],\n\t\t\t[946684840000000,0],\n\t\t\t[946684850000000,0]\n\t\t]\n\t}]`)\n\n\t// Compare resultsets.\n\tif act := jsonify(rs); exp != act {\n\t\tt.Fatalf(\"unexpected resultset: %s\", indent(act))\n\t}\n}", "func (w *Wrapper) buildJoin() (query string) {\n\tif len(w.joins) == 0 {\n\t\treturn\n\t}\n\n\tfor _, v := range w.joins {\n\t\t// The join type (ex: LEFT JOIN, RIGHT JOIN, INNER JOIN).\n\t\tquery += fmt.Sprintf(\"%s \", v.typ)\n\t\tswitch d := v.table.(type) {\n\t\t// Sub query.\n\t\tcase *Wrapper:\n\t\t\tquery += fmt.Sprintf(\"%s AS %s ON \", w.bindParam(d), d.alias)\n\t\t// Table name.\n\t\tcase string:\n\t\t\tquery += fmt.Sprintf(\"%s ON \", d)\n\t\t}\n\n\t\tif len(v.conditions) == 0 {\n\t\t\tquery += fmt.Sprintf(\"(%s) \", v.condition)\n\t\t} else {\n\t\t\tconditionsQuery := strings.TrimSpace(w.buildConditions(v.conditions))\n\t\t\tquery += fmt.Sprintf(\"(%s %s %s) \", v.condition, v.conditions[0].connector, conditionsQuery)\n\t\t}\n\t}\n\treturn\n}", "func (bu *BookingUpdate) SetUsing(c *ClientEntity) *BookingUpdate {\n\treturn bu.SetUsingID(c.ID)\n}", "func (sd *SelectDataset) As(alias string) *SelectDataset {\n\treturn sd.copy(sd.clauses.SetAlias(T(alias)))\n}", "func (fc *FromClause) String() string {\n\tvar str = `FROM ` + fc.table\n\n\tfor _, l := range fc.Leafs {\n\t\tstr += \" \" + l.JoinType + \" JOIN \" + l.Table + \" ON \" + l.Condition\n\t}\n\n\treturn str\n}", "func NewSourceJoin(builder expr.SubVisitor, leftFrom, rightFrom *expr.SqlSource, conf *datasource.RuntimeSchema) (*SourceJoin, error) {\n\n\tm := &SourceJoin{\n\t\tTaskBase: NewTaskBase(\"SourceJoin\"),\n\t\tcolIndex: make(map[string]int),\n\t}\n\tm.TaskBase.TaskType = m.Type()\n\n\tm.leftStmt = leftFrom\n\tm.rightStmt = rightFrom\n\n\t//u.Debugf(\"leftFrom.Name:'%v' : %v\", leftFrom.Name, leftFrom.Source.StringAST())\n\tsource := conf.Conn(leftFrom.Name)\n\t//u.Debugf(\"left source: %T\", source)\n\t// Must provider either Scanner, SourcePlanner, Seeker interfaces\n\tif sourcePlan, ok := source.(datasource.SourcePlanner); ok {\n\t\t// This is flawed, visitor pattern would have you pass in a object which implements interface\n\t\t// but is one of many different objects that implement that interface so that the\n\t\t// Accept() method calls the apppropriate method\n\t\top, err := sourcePlan.Accept(NewSourcePlan(leftFrom))\n\t\t// plan := NewSourcePlan(leftFrom)\n\t\t// op, err := plan.Accept(sourcePlan)\n\t\tif err != nil {\n\t\t\tu.Errorf(\"Could not source plan for %v %T %#v\", leftFrom.Name, source, source)\n\t\t}\n\t\t//u.Debugf(\"got op: %T %#v\", op, op)\n\t\tif scanner, ok := op.(datasource.Scanner); !ok {\n\t\t\tu.Errorf(\"Could not create scanner for %v %T %#v\", leftFrom.Name, op, op)\n\t\t\treturn nil, fmt.Errorf(\"Must Implement Scanner\")\n\t\t} else {\n\t\t\tm.leftSource = scanner\n\t\t}\n\t} else {\n\t\tif scanner, ok := source.(datasource.Scanner); !ok {\n\t\t\tu.Errorf(\"Could not create scanner for %v %T %#v\", leftFrom.Name, source, source)\n\t\t\treturn nil, fmt.Errorf(\"Must Implement Scanner\")\n\t\t} else {\n\t\t\tm.leftSource = scanner\n\t\t\t//u.Debugf(\"got scanner: %T %#v\", scanner, scanner)\n\t\t}\n\t}\n\n\t//u.Debugf(\"right: Name:'%v' : %v\", rightFrom.Name, rightFrom.Source.String())\n\tsource2 := conf.Conn(rightFrom.Name)\n\t//u.Debugf(\"source right: %T\", source2)\n\t// Must provider either Scanner, and or Seeker interfaces\n\n\t// Must provider either Scanner, SourcePlanner, Seeker interfaces\n\tif sourcePlan, ok := source2.(datasource.SourcePlanner); ok {\n\t\t// This is flawed, visitor pattern would have you pass in a object which implements interface\n\t\t// but is one of many different objects that implement that interface so that the\n\t\t// Accept() method calls the apppropriate method\n\t\top, err := sourcePlan.Accept(NewSourcePlan(rightFrom))\n\t\t// plan := NewSourcePlan(rightFrom)\n\t\t// op, err := plan.Accept(sourcePlan)\n\t\tif err != nil {\n\t\t\tu.Errorf(\"Could not source plan for %v %T %#v\", rightFrom.Name, source2, source2)\n\t\t}\n\t\t//u.Debugf(\"got op: %T %#v\", op, op)\n\t\tif scanner, ok := op.(datasource.Scanner); !ok {\n\t\t\tu.Errorf(\"Could not create scanner for %v %T %#v\", rightFrom.Name, op, op)\n\t\t\treturn nil, fmt.Errorf(\"Must Implement Scanner\")\n\t\t} else {\n\t\t\tm.rightSource = scanner\n\t\t}\n\t} else {\n\t\tif scanner, ok := source2.(datasource.Scanner); !ok {\n\t\t\tu.Errorf(\"Could not create scanner for %v %T %#v\", rightFrom.Name, source2, source2)\n\t\t\treturn nil, fmt.Errorf(\"Must Implement Scanner\")\n\t\t} else {\n\t\t\tm.rightSource = scanner\n\t\t\t//u.Debugf(\"got scanner: %T %#v\", scanner, scanner)\n\t\t}\n\t}\n\n\treturn m, nil\n}", "func NewFullJoinOn(table string, from string, to string) JoinQuery {\n\treturn NewJoinWith(\"FULL JOIN\", table, from, to)\n}", "func (s *BasePlSqlParserListener) EnterUsing_element(ctx *Using_elementContext) {}", "func (ta TableAliases) add(alias sql.Nameable, target sql.Nameable) error {\n\tlowerName := strings.ToLower(alias.Name())\n\tif _, ok := ta[lowerName]; ok && lowerName != plan.DualTableName {\n\t\treturn sql.ErrDuplicateAliasOrTable.New(alias.Name())\n\t}\n\n\tta[lowerName] = target\n\treturn nil\n}", "func Join(a, b Pair) Pair {\n\treturn nest{a, b}\n}", "func (r1 *csvTable) Join(r2 rel.Relation, zero interface{}) rel.Relation {\n\treturn rel.NewJoin(r1, r2, zero)\n}", "func importalias(pos src.XPos, s *types.Sym, t *types.Type) *ir.Name {\n\treturn importobj(pos, s, ir.OTYPE, ir.PEXTERN, t)\n}", "func (p *simpleParser) parseJoinOperator(r reporter) (stmt *ast.JoinOperator) {\n\tstmt = &ast.JoinOperator{}\n\tnext, ok := p.lookahead(r)\n\tif !ok {\n\t\treturn\n\t}\n\tif next.Value() == \",\" {\n\t\tstmt.Comma = next\n\t\tp.consumeToken()\n\t\treturn\n\t}\n\tif next.Type() == token.KeywordJoin {\n\t\tstmt.Join = next\n\t\tp.consumeToken()\n\t\treturn\n\t}\n\tif next.Type() == token.KeywordNatural {\n\t\tstmt.Natural = next\n\t\tp.consumeToken()\n\t}\n\n\tnext, ok = p.lookahead(r)\n\tif !ok {\n\t\treturn\n\t}\n\tswitch next.Type() {\n\tcase token.KeywordLeft:\n\t\tstmt.Left = next\n\t\tp.consumeToken()\n\t\tnext, ok = p.lookahead(r)\n\t\tif !ok {\n\t\t\treturn\n\t\t}\n\t\tif next.Type() == token.KeywordOuter {\n\t\t\tstmt.Outer = next\n\t\t\tp.consumeToken()\n\t\t}\n\tcase token.KeywordInner:\n\t\tstmt.Inner = next\n\t\tp.consumeToken()\n\tcase token.KeywordCross:\n\t\tstmt.Cross = next\n\t\tp.consumeToken()\n\t}\n\n\tnext, ok = p.lookahead(r)\n\tif !ok {\n\t\treturn\n\t}\n\tif next.Type() == token.KeywordJoin {\n\t\tstmt.Join = next\n\t\tp.consumeToken()\n\t}\n\treturn\n}", "func (t *Table) Alias(from, to string) {\n\tt.Aliases[from] = to\n}", "func (sd *SelectDataset) FullOuterJoin(table exp.Expression, condition exp.JoinCondition) *SelectDataset {\n\treturn sd.joinTable(exp.NewConditionedJoinExpression(exp.FullOuterJoinType, table, condition))\n}", "func JoinModelExternalRef(model interface{}) func(*gorm.DB) *gorm.DB {\n\treturn func(db *gorm.DB) *gorm.DB {\n\t\tmScope := db.NewScope(model)\n\t\tquotedTableName := mScope.QuotedTableName()\n\n\t\tjoinField, ok := mScope.FieldByName(\"ExternalReferences\")\n\t\tif !ok {\n\t\t\tpanic(\"ExternalReferences field not found\")\n\t\t}\n\n\t\tif joinField.Relationship == nil || joinField.Relationship.Kind != \"many_to_many\" {\n\t\t\tpanic(\"ExternalReferences not a many2many relation\")\n\t\t}\n\n\t\tjoinTableHandler := joinField.Relationship.JoinTableHandler\n\t\tjoinTableName := mScope.Quote(joinTableHandler.Table(db))\n\n\t\tsourceKeys := joinTableHandler.SourceForeignKeys()\n\t\tif len(sourceKeys) != 1 {\n\t\t\tpanic(\"invalid number of source keys\")\n\t\t}\n\n\t\tdestKeys := joinTableHandler.DestinationForeignKeys()\n\t\tif len(destKeys) != 1 {\n\t\t\tpanic(\"invalid number of destination keys\")\n\t\t}\n\n\t\tjoinDestKey := mScope.Quote(destKeys[0].DBName)\n\t\tjoinSourceKey := mScope.Quote(sourceKeys[0].DBName)\n\n\t\tquery := fmt.Sprintf(externalRefSelect, joinTableName, joinSourceKey, quotedTableName, joinDestKey)\n\t\treturn db.Joins(query)\n\t}\n}", "func (s *BasePlSqlParserListener) EnterOuter_join_type(ctx *Outer_join_typeContext) {}", "func JoinWithLeftAssociativeOp(op OpCode, a Expr, b Expr) Expr {\n\t// \"(a, b) op c\" => \"a, b op c\"\n\tif comma, ok := a.Data.(*EBinary); ok && comma.Op == BinOpComma {\n\t\tcomma.Right = JoinWithLeftAssociativeOp(op, comma.Right, b)\n\t\treturn a\n\t}\n\n\t// \"a op (b op c)\" => \"(a op b) op c\"\n\t// \"a op (b op (c op d))\" => \"((a op b) op c) op d\"\n\tif binary, ok := b.Data.(*EBinary); ok && binary.Op == op {\n\t\treturn JoinWithLeftAssociativeOp(\n\t\t\top,\n\t\t\tJoinWithLeftAssociativeOp(op, a, binary.Left),\n\t\t\tbinary.Right,\n\t\t)\n\t}\n\n\t// \"a op b\" => \"a op b\"\n\t// \"(a op b) op c\" => \"(a op b) op c\"\n\treturn Expr{Loc: a.Loc, Data: &EBinary{Op: op, Left: a, Right: b}}\n}", "func alias(a, b *decimal.Big) *decimal.Big {\n\tif a != b {\n\t\treturn a\n\t}\n\tz := new(decimal.Big)\n\tz.Context = a.Context\n\treturn z\n}", "func NewFullJoinOn(table string, from string, to string, filter ...FilterQuery) JoinQuery {\n\treturn NewJoinWith(\"FULL JOIN\", table, from, to, filter...)\n}", "func (buo *BookingUpdateOne) SetUsing(c *ClientEntity) *BookingUpdateOne {\n\treturn buo.SetUsingID(c.ID)\n}", "func NewJoinFragment(expr string, args ...any) JoinQuery {\n\tif args == nil {\n\t\t// prevent buildJoin to populate From and To variable.\n\t\targs = []any{}\n\t}\n\n\treturn JoinQuery{\n\t\tMode: expr,\n\t\tArguments: args,\n\t}\n}", "func (ouq *OrgUnitQuery) ForShare(opts ...sql.LockOption) *OrgUnitQuery {\n\tif ouq.driver.Dialect() == dialect.Postgres {\n\t\touq.Unique(false)\n\t}\n\touq.modifiers = append(ouq.modifiers, func(s *sql.Selector) {\n\t\ts.ForShare(opts...)\n\t})\n\treturn ouq\n}", "func (s *BasePlSqlParserListener) EnterJoin_on_part(ctx *Join_on_partContext) {}", "func As(fn Aggregate, end string) Aggregate {\n\treturn Aggregate{\n\t\tSQL: func(s *sql.Selector) string {\n\t\t\treturn sql.As(fn.SQL(s), end)\n\t\t},\n\t}\n}", "func (sb *SQLBuilder) JoinRaw(join string, values ...interface{}) *SQLBuilder {\n\tvar buf strings.Builder\n\n\tbuf.WriteString(sb._join)\n\tif buf.Len() != 0 {\n\t\tbuf.WriteString(\" \")\n\t}\n\tbuf.WriteString(join)\n\n\tsb._join = buf.String()\n\n\tfor _, value := range values {\n\t\tsb._joinParams = append(sb._joinParams, value)\n\t}\n\n\treturn sb\n}", "func (b *Builder) FullJoin(joinTable string, joinCond interface{}) *Builder {\r\n\treturn b.Join(\"FULL\", joinTable, joinCond)\r\n}", "func NewFullJoin(table string) JoinQuery {\n\treturn NewFullJoinOn(table, \"\", \"\")\n}", "func NewJoinFragment(expr string, args ...interface{}) JoinQuery {\n\tif args == nil {\n\t\t// prevent buildJoin to populate From and To variable.\n\t\targs = []interface{}{}\n\t}\n\n\treturn JoinQuery{\n\t\tMode: expr,\n\t\tArguments: args,\n\t}\n}", "func join(s []jen.Code) *jen.Statement {\n\tr := jen.Empty()\n\tfor i, stmt := range s {\n\t\tif i > 0 {\n\t\t\tr.Line()\n\t\t}\n\t\tr.Add(stmt)\n\t}\n\treturn r\n}", "func (r *MysqlDatasource) JoinWithAuthors(transaction *gorm.DB, id int) ([]*BookAuthorJoinModel, error) {\n\tdb := r.db\n\tif transaction != nil {\n\t\tdb = transaction\n\t}\n\n\tresult := []*BookAuthorJoinModel{}\n\terr := db.Raw(`\n\tSELECT \n\t\tbook.*,\n\t\tauthor.id as author_id, \n\t\tauthor.name as author_name, \n\t\tauthor.birthdate as author_birthdate\n\tFROM books book\n\tINNER JOIN book_authors ba on book.id = ba.book_id \n\tINNER JOIN authors author on author.id = ba.author_id \n\twhere book.id = ?\n\t`, id).Find(&result).Error\n\tif err != nil {\n\t\treturn nil, err\n\t} else if len(result) == 0 {\n\t\treturn nil, gorm.ErrRecordNotFound\n\t}\n\n\treturn result, nil\n}", "func (c *UserClient) QueryRoomuse(u *User) *RoomuseQuery {\n\tquery := &RoomuseQuery{config: c.config}\n\tquery.path = func(ctx context.Context) (fromV *sql.Selector, _ error) {\n\t\tid := u.ID\n\t\tstep := sqlgraph.NewStep(\n\t\t\tsqlgraph.From(user.Table, user.FieldID, id),\n\t\t\tsqlgraph.To(roomuse.Table, roomuse.FieldID),\n\t\t\tsqlgraph.Edge(sqlgraph.O2M, false, user.RoomuseTable, user.RoomuseColumn),\n\t\t)\n\t\tfromV = sqlgraph.Neighbors(u.driver.Dialect(), step)\n\t\treturn fromV, nil\n\t}\n\treturn query\n}", "func (s *BasePlSqlParserListener) EnterUsing_index_clause(ctx *Using_index_clauseContext) {}" ]
[ "0.7300421", "0.7030628", "0.695963", "0.63989073", "0.62903017", "0.6052447", "0.60412854", "0.59246576", "0.56960696", "0.5692112", "0.56797403", "0.5607766", "0.55761683", "0.5556999", "0.5433705", "0.53773844", "0.5347844", "0.5342743", "0.5314456", "0.5305886", "0.5296419", "0.5259806", "0.5187181", "0.5183554", "0.5105172", "0.5074173", "0.50740445", "0.50659037", "0.50591105", "0.5029434", "0.5015892", "0.5004607", "0.49727398", "0.49640903", "0.48989433", "0.4898557", "0.48979223", "0.48975936", "0.48970303", "0.488947", "0.48373547", "0.4824572", "0.48117542", "0.48060414", "0.47963214", "0.47952166", "0.47656888", "0.47334793", "0.47214785", "0.4708328", "0.4698227", "0.46755022", "0.46606722", "0.46569517", "0.4645199", "0.4643867", "0.4636051", "0.46196514", "0.46080923", "0.45895162", "0.45762128", "0.45747223", "0.4557246", "0.45386264", "0.45302323", "0.45300883", "0.45253962", "0.45106903", "0.45094743", "0.45033586", "0.45014867", "0.44806632", "0.44769862", "0.44750118", "0.4473566", "0.4464933", "0.44474283", "0.4437394", "0.4429866", "0.44289237", "0.4426096", "0.44230124", "0.44113466", "0.44089887", "0.44070214", "0.44001493", "0.43988594", "0.439349", "0.43895748", "0.4386124", "0.43804106", "0.43788746", "0.43774685", "0.43768936", "0.4366768", "0.43630946", "0.43506604", "0.43365526", "0.43069327", "0.43046784" ]
0.67135864
3
GetOrders retrieves paginated orders from the Mesh DB at a specific snapshot in time. Passing an empty string as `snapshotID` creates a new snapshot and returns the first set of results. To fetch all orders, continue to make requests supplying the `snapshotID` returned from the first request. After 1 minute of not received further requests referencing a specific snapshot, the snapshot expires and can no longer be used.
func (app *App) GetOrders(page, perPage int, snapshotID string) (*rpc.GetOrdersResponse, error) { ordersInfos := []*zeroex.AcceptedOrderInfo{} if perPage <= 0 { return &rpc.GetOrdersResponse{ OrdersInfos: ordersInfos, SnapshotID: snapshotID, }, nil } var snapshot *db.Snapshot if snapshotID == "" { // Create a new snapshot snapshotID = uuid.New().String() var err error snapshot, err = app.db.Orders.GetSnapshot() if err != nil { return nil, err } expirationTimestamp := time.Now().Add(1 * time.Minute) app.snapshotExpirationWatcher.Add(expirationTimestamp, snapshotID) app.muIdToSnapshotInfo.Lock() app.idToSnapshotInfo[snapshotID] = snapshotInfo{ Snapshot: snapshot, ExpirationTimestamp: expirationTimestamp, } app.muIdToSnapshotInfo.Unlock() } else { // Try and find an existing snapshot app.muIdToSnapshotInfo.Lock() info, ok := app.idToSnapshotInfo[snapshotID] if !ok { app.muIdToSnapshotInfo.Unlock() return nil, ErrSnapshotNotFound{id: snapshotID} } snapshot = info.Snapshot // Reset the snapshot's expiry app.snapshotExpirationWatcher.Remove(info.ExpirationTimestamp, snapshotID) expirationTimestamp := time.Now().Add(1 * time.Minute) app.snapshotExpirationWatcher.Add(expirationTimestamp, snapshotID) app.idToSnapshotInfo[snapshotID] = snapshotInfo{ Snapshot: snapshot, ExpirationTimestamp: expirationTimestamp, } app.muIdToSnapshotInfo.Unlock() } notRemovedFilter := app.db.Orders.IsRemovedIndex.ValueFilter([]byte{0}) var selectedOrders []*meshdb.Order err := snapshot.NewQuery(notRemovedFilter).Offset(page * perPage).Max(perPage).Run(&selectedOrders) if err != nil { return nil, err } for _, order := range selectedOrders { ordersInfos = append(ordersInfos, &zeroex.AcceptedOrderInfo{ OrderHash: order.Hash, SignedOrder: order.SignedOrder, FillableTakerAssetAmount: order.FillableTakerAssetAmount, }) } getOrdersResponse := &rpc.GetOrdersResponse{ SnapshotID: snapshotID, OrdersInfos: ordersInfos, } return getOrdersResponse, nil }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func GetOrders(c *gin.Context) {\n\tid := c.Params.ByName(\"id\")\n\n\tif id == \"\" {\n\t\terrors.ErrRequiredParam(c.Writer, http.StatusBadRequest, \"order id is required\")\n\t\treturn\n\t}\n\n\torder, err := s.client.GetOrder(id)\n\tif err != nil {\n\t\ts.l.Printf(\"failed to request order information: %s\\n\", err)\n\t\treturn\n\t}\n\n\tmodels.Respond(c.Writer, order)\n\treturn\n}", "func GetOrders() (orders []Orders, err error) {\r\n\tvar rows *sql.Rows\r\n\tif rows, err = Get(`select * from orders where deleted_at is null order by created_at desc;`); err != nil {\r\n\t\tCheckError(\"Error getting Orders.\", err, false)\r\n\t\treturn nil, err\r\n\t}\r\n\r\n\tdefer rows.Close()\r\n\tfor rows.Next() {\r\n\t\torder := Orders{}\r\n\t\tif err = rows.Scan(&order.ID, &order.DocEntry, &order.DocNum, &order.Canceled, &order.CardCode, &order.CardName, &order.VatSum, &order.DocTotal, &order.Synced, &order.CreatedBy, &order.CreatedAt, &order.UpdatedAt, &order.DeletedAt, &order.Comment, &order.Returned, &order.DiscountApprovedBy); err != nil {\r\n\t\t\tCheckError(\"Error Scanning Orders.\", err, false)\r\n\t\t} else {\r\n\t\t\torders = append(orders, order)\r\n\t\t}\r\n\t}\r\n\r\n\treturn\r\n}", "func (h *HitBTC) GetOrders(ctx context.Context, currency string) ([]OrderHistoryResponse, error) {\n\tvalues := url.Values{}\n\tvalues.Set(\"symbol\", currency)\n\tvar result []OrderHistoryResponse\n\n\treturn result, h.SendAuthenticatedHTTPRequest(ctx, exchange.RestSpot, http.MethodGet,\n\t\tapiV2OrderHistory,\n\t\tvalues,\n\t\ttradingRequests,\n\t\t&result)\n}", "func (s *ApiService) GetOrders(ctx context.Context, orderId string) (ordf.ImplResponse, error) {\n\t// TODO: implement long polling on separate polling API\n\t// will need to update SDK to pass in last known state and check for change\n\torder, err := s.ordersService.GetOrder(ctx, orderId)\n\tif err != nil {\n\t\treturn ordf.Response(500, nil), err\n\t}\n\n\treturn ordf.Response(200, order), nil\n}", "func (c *Client) GetOrders(pageID int) *types.OrderList {\n\torders := &types.OrderList{}\n\tc.Client.Find(&orders.Items).Where(\"id >= ?\", pageID).Order(\"id\").Limit(pageSize + 1)\n\tif len(orders.Items) == pageSize+1 {\n\t\torders.NextPageID = orders.Items[len(orders.Items)-1].ID\n\t\torders.Items = orders.Items[:pageSize+1]\n\t}\n\treturn orders\n}", "func (s *Client) GetOrders(options *types.Options) (orders []*types.Order, err error) {\n\turl := baseURL + \"/orders\"\n\tquery := util.ParseOptions(options)\n\tsign, err := s.sign(query)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tbody, err := s.getResponse(url, \"GET\", sign, query)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\terr = json.Unmarshal(body, &orders)\n\treturn\n}", "func GetOrders(page int, limit int) ([]*Order, error) {\n\tvar orders []*Order\n\n\terr := db.Offset(page * limit).Limit(limit).Find(&orders).Error\n\tif err != nil && err != gorm.ErrRecordNotFound {\n\t\treturn nil, err\n\t}\n\n\treturn orders, nil\n}", "func (h *HitBTC) GetOpenOrders(ctx context.Context, currency string) ([]OrderHistoryResponse, error) {\n\tvalues := url.Values{}\n\tvalues.Set(\"symbol\", currency)\n\tvar result []OrderHistoryResponse\n\n\treturn result, h.SendAuthenticatedHTTPRequest(ctx, exchange.RestSpot, http.MethodGet,\n\t\tapiv2OpenOrders,\n\t\tvalues,\n\t\ttradingRequests,\n\t\t&result)\n}", "func (driver *Driver) GetSnapshot(volumeID, snapshotID, snapshotName string) ([]*storagedriver.Snapshot, error) {\n\tvar snapshotsInt []*storagedriver.Snapshot\n\tif volumeID != \"\" {\n\t\tvolumes, err := driver.getVolume(volumeID, \"\")\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tfor _, volume := range volumes {\n\t\t\tfor _, destSnap := range volume.DestSnapList {\n\t\t\t\tsnapshot, err := driver.getSnapshot(strconv.Itoa(int(destSnap.([]interface{})[2].(float64))), \"\")\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn nil, err\n\t\t\t\t}\n\n\t\t\t\tvolSize, _ := strconv.Atoi(volume.VolSize)\n\t\t\t\tsnapshotSD := &storagedriver.Snapshot{\n\t\t\t\t\tName: snapshot[0].Name,\n\t\t\t\t\tVolumeID: strconv.Itoa(volume.Index),\n\t\t\t\t\tSnapshotID: strconv.Itoa(snapshot[0].Index),\n\t\t\t\t\tVolumeSize: strconv.Itoa(volSize / 1024 / 1024),\n\t\t\t\t\tStartTime: snapshot[0].CreationTime,\n\t\t\t\t\tDescription: \"\",\n\t\t\t\t\tStatus: \"\",\n\t\t\t\t}\n\t\t\t\tsnapshotsInt = append(snapshotsInt, snapshotSD)\n\t\t\t}\n\t\t}\n\t} else {\n\t\tsnapshots, err := driver.getSnapshot(snapshotID, snapshotName)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tfor _, snapshot := range snapshots {\n\t\t\tsnapshot, err := goxtremio.GetSnapshot(strconv.Itoa(snapshot.Index), \"\")\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\n\t\t\tvolume, err := driver.getVolume(strconv.Itoa(int(snapshot.AncestorVolID[2].(float64))), \"\")\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\n\t\t\tvolSize, _ := strconv.Atoi(volume[0].VolSize)\n\t\t\tsnapshotSD := &storagedriver.Snapshot{\n\t\t\t\tName: snapshot.Name,\n\t\t\t\tVolumeID: strconv.Itoa(int(snapshot.AncestorVolID[2].(float64))),\n\t\t\t\tSnapshotID: strconv.Itoa(snapshot.Index),\n\t\t\t\tVolumeSize: strconv.Itoa(volSize / 1024 / 1024),\n\t\t\t\tStartTime: snapshot.CreationTime,\n\t\t\t\tDescription: \"\",\n\t\t\t\tStatus: \"\",\n\t\t\t}\n\t\t\tsnapshotsInt = append(snapshotsInt, snapshotSD)\n\t\t}\n\n\t}\n\n\treturn snapshotsInt, nil\n}", "func (h *HUOBI) GetOrders(ctx context.Context, symbol currency.Pair, types, start, end, states, from, direct, size string) ([]OrderInfo, error) {\n\tresp := struct {\n\t\tOrders []OrderInfo `json:\"data\"`\n\t}{}\n\n\tvals := url.Values{}\n\tsymbolValue, err := h.FormatSymbol(symbol, asset.Spot)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tvals.Set(\"symbol\", symbolValue)\n\tvals.Set(\"states\", states)\n\n\tif types != \"\" {\n\t\tvals.Set(\"types\", types)\n\t}\n\n\tif start != \"\" {\n\t\tvals.Set(\"start-date\", start)\n\t}\n\n\tif end != \"\" {\n\t\tvals.Set(\"end-date\", end)\n\t}\n\n\tif from != \"\" {\n\t\tvals.Set(\"from\", from)\n\t}\n\n\tif direct != \"\" {\n\t\tvals.Set(\"direct\", direct)\n\t}\n\n\tif size != \"\" {\n\t\tvals.Set(\"size\", size)\n\t}\n\n\terr = h.SendAuthenticatedHTTPRequest(ctx, exchange.RestSpot, http.MethodGet, huobiGetOrders, vals, nil, &resp, false)\n\treturn resp.Orders, err\n}", "func (a *SnapshotApiService) SnapshotsGet(ctx _context.Context) ApiSnapshotsGetRequest {\n\treturn ApiSnapshotsGetRequest{\n\t\tApiService: a,\n\t\tctx: ctx,\n\t}\n}", "func (a *OrdersApiService) V1OrdersGet(ctx _context.Context, localVarOptionals *V1OrdersGetOpts) ([]OrderExecutionReport, *_nethttp.Response, error) {\n\tvar (\n\t\tlocalVarHTTPMethod = _nethttp.MethodGet\n\t\tlocalVarPostBody interface{}\n\t\tlocalVarFormFileName string\n\t\tlocalVarFileName string\n\t\tlocalVarFileBytes []byte\n\t\tlocalVarReturnValue []OrderExecutionReport\n\t)\n\n\t// create path and map variables\n\tlocalVarPath := a.client.cfg.BasePath + \"/v1/orders\"\n\tlocalVarHeaderParams := make(map[string]string)\n\tlocalVarQueryParams := _neturl.Values{}\n\tlocalVarFormParams := _neturl.Values{}\n\n\tif localVarOptionals != nil && localVarOptionals.ExchangeId.IsSet() {\n\t\tlocalVarQueryParams.Add(\"exchange_id\", parameterToString(localVarOptionals.ExchangeId.Value(), \"\"))\n\t}\n\t// to determine the Content-Type header\n\tlocalVarHTTPContentTypes := []string{}\n\n\t// set Content-Type header\n\tlocalVarHTTPContentType := selectHeaderContentType(localVarHTTPContentTypes)\n\tif localVarHTTPContentType != \"\" {\n\t\tlocalVarHeaderParams[\"Content-Type\"] = localVarHTTPContentType\n\t}\n\n\t// to determine the Accept header\n\tlocalVarHTTPHeaderAccepts := []string{\"application/json\", \"appliction/json\"}\n\n\t// set Accept header\n\tlocalVarHTTPHeaderAccept := selectHeaderAccept(localVarHTTPHeaderAccepts)\n\tif localVarHTTPHeaderAccept != \"\" {\n\t\tlocalVarHeaderParams[\"Accept\"] = localVarHTTPHeaderAccept\n\t}\n\tr, err := a.client.prepareRequest(ctx, localVarPath, localVarHTTPMethod, localVarPostBody, localVarHeaderParams, localVarQueryParams, localVarFormParams, localVarFormFileName, localVarFileName, localVarFileBytes)\n\tif err != nil {\n\t\treturn localVarReturnValue, nil, err\n\t}\n\n\tlocalVarHTTPResponse, err := a.client.callAPI(r)\n\tif err != nil || localVarHTTPResponse == nil {\n\t\treturn localVarReturnValue, localVarHTTPResponse, err\n\t}\n\n\tlocalVarBody, err := _ioutil.ReadAll(localVarHTTPResponse.Body)\n\tlocalVarHTTPResponse.Body.Close()\n\tif err != nil {\n\t\treturn localVarReturnValue, localVarHTTPResponse, err\n\t}\n\n\tif localVarHTTPResponse.StatusCode >= 300 {\n\t\tnewErr := GenericOpenAPIError{\n\t\t\tbody: localVarBody,\n\t\t\terror: localVarHTTPResponse.Status,\n\t\t}\n\t\tif localVarHTTPResponse.StatusCode == 490 {\n\t\t\tvar v Message\n\t\t\terr = a.client.decode(&v, localVarBody, localVarHTTPResponse.Header.Get(\"Content-Type\"))\n\t\t\tif err != nil {\n\t\t\t\tnewErr.error = err.Error()\n\t\t\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t\t\t}\n\t\t\tnewErr.model = v\n\t\t}\n\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t}\n\n\terr = a.client.decode(&localVarReturnValue, localVarBody, localVarHTTPResponse.Header.Get(\"Content-Type\"))\n\tif err != nil {\n\t\tnewErr := GenericOpenAPIError{\n\t\t\tbody: localVarBody,\n\t\t\terror: err.Error(),\n\t\t}\n\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t}\n\n\treturn localVarReturnValue, localVarHTTPResponse, nil\n}", "func GetOrders(db *sqlx.DB) gin.HandlerFunc {\n\n\treturn func(c *gin.Context) {\n\n\t\tvar user1 User\n\t\tuserName, exists := c.Get(\"user\")\n\t\tif !exists {\n\t\t\tc.AbortWithError(503, errors.NewAPIError(503, \"failed to get user\", \"internal server error\", c))\n\t\t\treturn\n\t\t}\n\n\t\tdbErr := db.Get(&user1, \"SELECT * FROM gaea.user WHERE user_name=$1\", userName)\n\t\tif dbErr != nil {\n\t\t\tc.AbortWithError(503, errors.NewAPIError(503, \"failed to get user\", \"internal server error\", c))\n\t\t\treturn\n\t\t}\n\n\t\tvar memberStatus bool\n\t\tswitch {\n\t\tcase user1.Role == \"nonmember\":\n\t\t\tmemberStatus = false\n\t\tdefault:\n\t\t\tmemberStatus = true\n\t\t}\n\n\t\tvar ords []Order\n\t\tvar retOrds []Order\n\t\tvar qtyOrd int\n\n\t\terr1 := db.Get(&qtyOrd, `SELECT COUNT(*) FROM gaea.order WHERE user_name=$1`,\n\t\t\tuserName)\n\t\tif err1 != nil {\n\t\t\tfmt.Println(err1)\n\t\t\tc.AbortWithError(503, errors.NewAPIError(503, \"failed to get orders\", \"internal server error\", c))\n\t\t\treturn\n\t\t}\n\t\tif qtyOrd > 0 {\n\t\t\terr2 := db.Select(&ords, `SELECT * FROM gaea.order WHERE user_name=$1`,\n\t\t\t\tuserName)\n\t\t\tif err2 != nil {\n\t\t\t\tfmt.Println(err2)\n\t\t\t\tc.AbortWithError(503, errors.NewAPIError(503, \"failed to get orders\", \"internal server error\", c))\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tvar amtErr error\n\n\t\t\tfor _, order := range ords {\n\t\t\t\torder.ItemQty, order.AmountTotal, amtErr = CalcOrderTotals(order.OrderId, memberStatus, db)\n\t\t\t\tif amtErr != nil {\n\t\t\t\t\tfmt.Printf(\"%s\", amtErr)\n\t\t\t\t}\n\t\t\t\tretOrds = append(retOrds, order)\n\t\t\t}\n\t\t}\n\n\t\tc.JSON(200, gin.H{\"qty\": qtyOrd, \"orders\": retOrds})\n\t}\n}", "func (wc *WooCommerce) GetOrders(page int) ([]models.Order, error) {\n\tbody, err := wc.GetOrdersJSON(page)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar result []models.Order\n\tif err := json.Unmarshal(body, &result); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn result, nil\n}", "func (s *SnapshotsServiceOp) Get(snapshotID string) (*Snapshot, *Response, error) {\n\treturn s.get(interface{}(snapshotID))\n}", "func (a *SnapshotApiService) SnapshotsGet(ctx _context.Context, optionals *SnapshotsGetOpts) (Snapshots, *APIResponse, error) {\n\tvar (\n\t\tlocalVarHTTPMethod = _nethttp.MethodGet\n\t\tlocalVarPostBody interface{}\n\t\tlocalVarFormFileName string\n\t\tlocalVarFileName string\n\t\tlocalVarFileBytes []byte\n\t\tlocalVarReturnValue Snapshots\n\t)\n\n\t// create path and map variables\n\tlocalVarPath := a.client.cfg.BasePath + \"/snapshots\"\n\tlocalVarHeaderParams := make(map[string]string)\n\tlocalVarQueryParams := _neturl.Values{}\n\tlocalVarFormParams := _neturl.Values{}\n\n\tif optionals != nil && optionals.Pretty != nil {\n\t\tlocalVarQueryParams.Add(\"pretty\", parameterToString(*optionals.Pretty, \"\"))\n\t}\n\tif optionals != nil && optionals.Depth != nil {\n\t\tlocalVarQueryParams.Add(\"depth\", parameterToString(*optionals.Depth, \"\"))\n\t}\n\t// to determine the Content-Type header\n\tlocalVarHTTPContentTypes := []string{}\n\n\t// set Content-Type header\n\tlocalVarHTTPContentType := selectHeaderContentType(localVarHTTPContentTypes)\n\tif localVarHTTPContentType != \"\" {\n\t\tlocalVarHeaderParams[\"Content-Type\"] = localVarHTTPContentType\n\t}\n\n\t// to determine the Accept header\n\tlocalVarHTTPHeaderAccepts := []string{\"application/json\"}\n\n\t// set Accept header\n\tlocalVarHTTPHeaderAccept := selectHeaderAccept(localVarHTTPHeaderAccepts)\n\tif localVarHTTPHeaderAccept != \"\" {\n\t\tlocalVarHeaderParams[\"Accept\"] = localVarHTTPHeaderAccept\n\t}\n\tif optionals != nil && optionals.XContractNumber != nil {\n\t\tlocalVarHeaderParams[\"X-Contract-Number\"] = parameterToString(*optionals.XContractNumber, \"\")\n\t}\n\tif ctx != nil {\n\t\t// API Key Authentication\n\t\tif auth, ok := ctx.Value(ContextAPIKey).(APIKey); ok {\n\t\t\tvar key string\n\t\t\tif auth.Prefix != \"\" {\n\t\t\t\tkey = auth.Prefix + \" \" + auth.Key\n\t\t\t} else {\n\t\t\t\tkey = auth.Key\n\t\t\t}\n\t\t\tlocalVarHeaderParams[\"Authorization\"] = key\n\t\t}\n\t}\n\tr, err := a.client.prepareRequest(ctx, localVarPath, localVarHTTPMethod, localVarPostBody, localVarHeaderParams, localVarQueryParams, localVarFormParams, localVarFormFileName, localVarFileName, localVarFileBytes)\n\tif err != nil {\n\t\treturn localVarReturnValue, nil, err\n\t}\n\n\tlocalVarHTTPResponse, err := a.client.callAPI(r)\n\tlocalVarAPIResponse := &APIResponse {\n\t\tResponse: localVarHTTPResponse,\n\t\tMethod: localVarHTTPMethod,\n\t\tRequestURL: localVarPath,\n\t\tOperation: \"SnapshotsGet\",\n\t}\n\tif err != nil || localVarHTTPResponse == nil {\n\t\treturn localVarReturnValue, localVarAPIResponse, err\n\t}\n\n\tlocalVarBody, err := _ioutil.ReadAll(localVarHTTPResponse.Body)\n\tlocalVarHTTPResponse.Body.Close()\n\tlocalVarAPIResponse.Payload = localVarBody\n\tif err != nil {\n\t\treturn localVarReturnValue, localVarAPIResponse, err\n\t}\n\n\tif localVarHTTPResponse.StatusCode >= 300 {\n\t\tnewErr := GenericOpenAPIError{\n\t\t\tbody: localVarBody,\n\t\t\terror: localVarHTTPResponse.Status,\n\t\t}\n\t\t\tvar v Error\n\t\t\terr = a.client.decode(&v, localVarBody, localVarHTTPResponse.Header.Get(\"Content-Type\"))\n\t\t\tif err != nil {\n\t\t\t\tnewErr.error = err.Error()\n\t\t\t\treturn localVarReturnValue, localVarAPIResponse, newErr\n\t\t\t}\n\t\t\tnewErr.model = v\n\t\treturn localVarReturnValue, localVarAPIResponse, newErr\n\t}\n\n\terr = a.client.decode(&localVarReturnValue, localVarBody, localVarHTTPResponse.Header.Get(\"Content-Type\"))\n\tif err != nil {\n\t\tnewErr := GenericOpenAPIError{\n\t\t\tbody: localVarBody,\n\t\t\terror: err.Error(),\n\t\t}\n\t\treturn localVarReturnValue, localVarAPIResponse, newErr\n\t}\n\n\treturn localVarReturnValue, localVarAPIResponse, nil\n}", "func (controller OrderController) GetOrders() *graphql.Field {\n\treturn &graphql.Field{\n\t\tType: graphql.NewList(ordertype.OrderType),\n\t\tArgs: graphql.FieldConfigArgument{\n\t\t\t\"token\": &graphql.ArgumentConfig{\n\t\t\t\tType: graphql.String,\n\t\t\t},\n\t\t\t\"skip\": &graphql.ArgumentConfig{\n\t\t\t\tType: graphql.Int,\n\t\t\t},\n\t\t\t\"limit\": &graphql.ArgumentConfig{\n\t\t\t\tType: graphql.Int,\n\t\t\t},\n\t\t},\n\t\tResolve: func(p graphql.ResolveParams) (interface{}, error) {\n\t\t\ttoken, _ := p.Args[\"token\"].(string)\n\t\t\tskip, _ := p.Args[\"skip\"].(int)\n\t\t\tlimit, _ := p.Args[\"limit\"].(int)\n\n\t\t\tuser, _ := controller.usercase.ParseToken(p.Context, token)\n\n\t\t\torders, err := controller.ordercase.GetOrders(p.Context, user, skip, limit)\n\n\t\t\treturn orders, err\n\t\t},\n\t}\n\n}", "func (s *SnapshotsServiceOp) Get(ctx context.Context, snapshotID string) (*Snapshot, *Response, error) {\n\treturn s.get(ctx, snapshotID)\n}", "func GetOrders() ([]byte, error) {\n\tvar db, _ = sql.Open(\"sqlite3\", \"cache/users.sqlite3\")\n\tdefer db.Close()\n\tvar ou string\n\tvar ta, ts int64 \n\tq, err := db.Query(\"select ouid, chargedamount, timestamp from orders\")\n\tif err != nil {\n\t\tfmt.Println(err)\n\t}\n\t\n\tvar a []interface{}\n\t\n\tfor q.Next() {\n\t\tq.Scan(&ou, &ta, &ts)\n\t\tb := make(map[string]interface{})\t\n\t\tb[\"ouid\"] = ou\n\t\tb[\"chargedamount\"] = float64(ta)/100\n\t\t// b[\"timestamp\"] = ts\n\t\tb[\"timestamp\"] = string(time.Unix(ts, 0).Format(\"02.01.2006 15:04:05\"))\n\t\ta = append(a, b)\n\t}\n\t\n\tgetord, err := json.Marshal(a)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\t\treturn getord, nil\n}", "func (a *App) getOrders(w http.ResponseWriter, r *http.Request) {\n\tpage, err := strconv.Atoi(r.URL.Query().Get(\"page\"))\n\tif err != nil {\n\t\thelpers.RespondWithError(w, http.StatusBadRequest, \"INVALID_PAGE_NUMBER\")\n\t\treturn\n\t}\n\n\tlimit, err := strconv.Atoi(r.URL.Query().Get(\"limit\"))\n\tif err != nil {\n\t\thelpers.RespondWithError(w, http.StatusBadRequest, \"INVALID_LIMIT_NUMBER\")\n\t\treturn\n\t}\n\n\tOrders, err := models.GetOrders(a.DB, (page - 1), limit)\n\tif err != nil {\n\t\thelpers.RespondWithError(w, http.StatusInternalServerError, \"DB_CONNECTION_ERR\")\n\t\treturn\n\t}\n\tif len(Orders) == 0 {\n\t\thelpers.RespondWithError(w, http.StatusInternalServerError, \"DATA_NOT_FOUND\")\n\t\treturn\n\t}\n\thelpers.RespondWithJSON(w, http.StatusOK, Orders)\n}", "func (c *TradeClient) GetHistoryOrders(param *model.GetHistoryOrdersParam, page, limit int) (*model.HistoryOrderList, error) {\n\tif page < 1 {\n\t\treturn nil, errors.New(\"page must >=1\")\n\t}\n\tif limit <= 0 {\n\t\treturn nil, errors.New(\"limit must >0\")\n\t}\n\n\tqueries := map[string]string{\n\t\t\"page\": strconv.Itoa(page),\n\t\t\"limit\": strconv.Itoa(limit),\n\t}\n\treq := &coremodel.ApiRequestModel{\n\t\tParam: param,\n\t}\n\tbody, err := json.Marshal(req)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tres := model.NewGetHistoryOrdersResponse()\n\terr = c.requester.Post(\"/api/v1/order_history\", body, queries, true, res)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn res.Data, nil\n}", "func getOrders(shopCode string) error {\n\n\tmethods := []string{\"gy.erp.trade.history.get\", \"gy.erp.trade.get\"}\n\tpgSize, _ := strconv.Atoi(config.Config(\"PAGE_SIZE\"))\n\n\tif err := saveOrders(pgSize, shopCode, methods); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}", "func (w *ServerInterfaceWrapper) GetOrders(ctx echo.Context) error {\n\tvar err error\n\n\tctx.Set(ApiKeyAuthScopes, []string{\"\"})\n\n\t// Parameter object where we will unmarshal all parameters from the context\n\tvar params GetOrdersParams\n\t// ------------- Optional query parameter \"symbol\" -------------\n\n\terr = runtime.BindQueryParameter(\"form\", true, false, \"symbol\", ctx.QueryParams(), &params.Symbol)\n\tif err != nil {\n\t\treturn echo.NewHTTPError(http.StatusBadRequest, fmt.Sprintf(\"Invalid format for parameter symbol: %s\", err))\n\t}\n\n\t// ------------- Optional query parameter \"from\" -------------\n\n\terr = runtime.BindQueryParameter(\"form\", true, false, \"from\", ctx.QueryParams(), &params.From)\n\tif err != nil {\n\t\treturn echo.NewHTTPError(http.StatusBadRequest, fmt.Sprintf(\"Invalid format for parameter from: %s\", err))\n\t}\n\n\t// ------------- Optional query parameter \"to\" -------------\n\n\terr = runtime.BindQueryParameter(\"form\", true, false, \"to\", ctx.QueryParams(), &params.To)\n\tif err != nil {\n\t\treturn echo.NewHTTPError(http.StatusBadRequest, fmt.Sprintf(\"Invalid format for parameter to: %s\", err))\n\t}\n\n\t// ------------- Optional query parameter \"status\" -------------\n\n\terr = runtime.BindQueryParameter(\"form\", true, false, \"status\", ctx.QueryParams(), &params.Status)\n\tif err != nil {\n\t\treturn echo.NewHTTPError(http.StatusBadRequest, fmt.Sprintf(\"Invalid format for parameter status: %s\", err))\n\t}\n\n\t// ------------- Optional query parameter \"limit\" -------------\n\n\terr = runtime.BindQueryParameter(\"form\", true, false, \"limit\", ctx.QueryParams(), &params.Limit)\n\tif err != nil {\n\t\treturn echo.NewHTTPError(http.StatusBadRequest, fmt.Sprintf(\"Invalid format for parameter limit: %s\", err))\n\t}\n\n\t// Invoke the callback with all the unmarshalled arguments\n\terr = w.Handler.GetOrders(ctx, params)\n\treturn err\n}", "func (s *service) GetMarketSnapshot(ctx context.Context) (MarketSnapshot, error) {\n\tlogger := log.With(s.logger, \"method\", \"GetMarketSnapshot\")\n\tsnapshot := MarketSnapshot{}\n\tif Orders.IsEmpty() {\n\t\tlevel.Error(logger).Log(\"err\", ErrOrderBookIsEmpty)\n\t\treturn snapshot, ErrOrderBookIsEmpty\n\t}\n\n\tfor order := range Orders.IterBuffered() {\n\t\tval := reflect.ValueOf(order.Val)\n\n\t\tnew := MarketSnapshotItem{\n\t\t\tPrice: val.FieldByName(\"Price\").Float(),\n\t\t\tQuantity: val.FieldByName(\"Quantity\").Int(),\n\t\t}\n\t\tif val.FieldByName(\"Status\").String() == \"Active\" {\n\t\t\tif strings.ToUpper(val.FieldByName(\"Side\").String()) == \"ASK\" {\n\t\t\t\tsnapshot.Asks = append(snapshot.Asks, new)\n\t\t\t} else {\n\t\t\t\tsnapshot.Bids = append(snapshot.Bids, new)\n\t\t\t}\n\t\t}\n\n\t}\n\n\t// sorting\n\tsnapshot.Sort()\n\n\tsnapshot.Spread = spread.getSpread()\n\treturn snapshot, nil\n}", "func (order *Order) Get(pan *Panaccess, params *url.Values) ([]Order, error) {\n\t//Everything has a limit\n\tif (*params).Get(\"limit\") == \"\" {\n\t\t(*params).Add(\"limit\", \"1000\")\n\t}\n\t//Call Function\n\tresp, err := pan.Call(\n\t\t\"getListOfOrders\",\n\t\tparams)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\t//Retrieve all rows and parse as a slice of Subscriber\n\tvar rows GetOrdersFilterResponse\n\tbodyBytes, err := json.Marshal(resp.Answer)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\terr = json.Unmarshal(bodyBytes, &rows)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif !resp.Success {\n\t\treturn nil, errors.New(resp.ErrorMessage)\n\t}\n\treturn rows.OrderEntries, nil\n}", "func (k *Kraken) GetOrderHistory(getOrdersRequest *exchange.GetOrdersRequest) ([]exchange.OrderDetail, error) {\n\treq := GetClosedOrdersOptions{}\n\tif getOrdersRequest.StartTicks.Unix() > 0 {\n\t\treq.Start = fmt.Sprintf(\"%v\", getOrdersRequest.StartTicks.Unix())\n\t}\n\tif getOrdersRequest.EndTicks.Unix() > 0 {\n\t\treq.End = fmt.Sprintf(\"%v\", getOrdersRequest.EndTicks.Unix())\n\t}\n\n\tresp, err := k.GetClosedOrders(req)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar orders []exchange.OrderDetail\n\tfor i := range resp.Closed {\n\t\tsymbol := currency.NewPairDelimiter(resp.Closed[i].Descr.Pair,\n\t\t\tk.ConfigCurrencyPairFormat.Delimiter)\n\t\torderDate := time.Unix(int64(resp.Closed[i].StartTm), 0)\n\t\tside := exchange.OrderSide(strings.ToUpper(resp.Closed[i].Descr.Type))\n\n\t\torders = append(orders, exchange.OrderDetail{\n\t\t\tID: i,\n\t\t\tAmount: resp.Closed[i].Vol,\n\t\t\tRemainingAmount: (resp.Closed[i].Vol - resp.Closed[i].VolExec),\n\t\t\tExecutedAmount: resp.Closed[i].VolExec,\n\t\t\tExchange: k.Name,\n\t\t\tOrderDate: orderDate,\n\t\t\tPrice: resp.Closed[i].Price,\n\t\t\tOrderSide: side,\n\t\t\tCurrencyPair: symbol,\n\t\t})\n\t}\n\n\texchange.FilterOrdersBySide(&orders, getOrdersRequest.OrderSide)\n\texchange.FilterOrdersByCurrencies(&orders, getOrdersRequest.Currencies)\n\n\treturn orders, nil\n}", "func (h *HUOBIHADAX) GetOrders(symbol, types, start, end, states, from, direct, size string) ([]OrderInfo, error) {\n\ttype response struct {\n\t\tResponse\n\t\tOrders []OrderInfo `json:\"data\"`\n\t}\n\n\tvals := url.Values{}\n\tvals.Set(\"symbol\", symbol)\n\tvals.Set(\"states\", states)\n\n\tif types != \"\" {\n\t\tvals.Set(\"types\", types)\n\t}\n\n\tif start != \"\" {\n\t\tvals.Set(\"start-date\", start)\n\t}\n\n\tif end != \"\" {\n\t\tvals.Set(\"end-date\", end)\n\t}\n\n\tif from != \"\" {\n\t\tvals.Set(\"from\", from)\n\t}\n\n\tif direct != \"\" {\n\t\tvals.Set(\"direct\", direct)\n\t}\n\n\tif size != \"\" {\n\t\tvals.Set(\"size\", size)\n\t}\n\n\tvar result response\n\terr := h.SendAuthenticatedHTTPRequest(http.MethodGet, huobihadaxGetOrders, vals, &result)\n\n\tif result.ErrorMessage != \"\" {\n\t\treturn nil, errors.New(result.ErrorMessage)\n\t}\n\treturn result.Orders, err\n}", "func GetOpenOrders() (orders []Order, error error) {\n\tjsonData, err := doTauRequest(1, \"GET\", \"trading/myopenorders/\", nil)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"GetOpenOrders->%v\", err)\n\t}\n\tlog.Tracef(\"jsonData=%s\", string(jsonData))\n\tif err := json.Unmarshal(jsonData, &orders); err != nil {\n\t\treturn nil, fmt.Errorf(\"GetOpenOrders->%v\", err)\n\t}\n\treturn orders, nil\n}", "func (a *SnapshotApiService) SnapshotsGetExecute(r ApiSnapshotsGetRequest) (Snapshots, *APIResponse, error) {\n\tvar (\n\t\tlocalVarHTTPMethod = _nethttp.MethodGet\n\t\tlocalVarPostBody interface{}\n\t\tlocalVarFormFileName string\n\t\tlocalVarFileName string\n\t\tlocalVarFileBytes []byte\n\t\tlocalVarReturnValue Snapshots\n\t)\n\n\tlocalBasePath, err := a.client.cfg.ServerURLWithContext(r.ctx, \"SnapshotApiService.SnapshotsGet\")\n\tif err != nil {\n\t\treturn localVarReturnValue, nil, GenericOpenAPIError{error: err.Error()}\n\t}\n\n\tlocalVarPath := localBasePath + \"/snapshots\"\n\n\tlocalVarHeaderParams := make(map[string]string)\n\tlocalVarQueryParams := _neturl.Values{}\n\tlocalVarFormParams := _neturl.Values{}\n\n\tif r.pretty != nil {\n\t\tlocalVarQueryParams.Add(\"pretty\", parameterToString(*r.pretty, \"\"))\n\t}\n\tif r.depth != nil {\n\t\tlocalVarQueryParams.Add(\"depth\", parameterToString(*r.depth, \"\"))\n\t}\n\t// to determine the Content-Type header\n\tlocalVarHTTPContentTypes := []string{}\n\n\t// set Content-Type header\n\tlocalVarHTTPContentType := selectHeaderContentType(localVarHTTPContentTypes)\n\tif localVarHTTPContentType != \"\" {\n\t\tlocalVarHeaderParams[\"Content-Type\"] = localVarHTTPContentType\n\t}\n\n\t// to determine the Accept header\n\tlocalVarHTTPHeaderAccepts := []string{\"application/json\"}\n\n\t// set Accept header\n\tlocalVarHTTPHeaderAccept := selectHeaderAccept(localVarHTTPHeaderAccepts)\n\tif localVarHTTPHeaderAccept != \"\" {\n\t\tlocalVarHeaderParams[\"Accept\"] = localVarHTTPHeaderAccept\n\t}\n\tif r.xContractNumber != nil {\n\t\tlocalVarHeaderParams[\"X-Contract-Number\"] = parameterToString(*r.xContractNumber, \"\")\n\t}\n\tif r.ctx != nil {\n\t\t// API Key Authentication\n\t\tif auth, ok := r.ctx.Value(ContextAPIKeys).(map[string]APIKey); ok {\n\t\t\tif apiKey, ok := auth[\"Token Authentication\"]; ok {\n\t\t\t\tvar key string\n\t\t\t\tif apiKey.Prefix != \"\" {\n\t\t\t\t\tkey = apiKey.Prefix + \" \" + apiKey.Key\n\t\t\t\t} else {\n\t\t\t\t\tkey = apiKey.Key\n\t\t\t\t}\n\t\t\t\tlocalVarHeaderParams[\"Authorization\"] = key\n\t\t\t}\n\t\t}\n\t}\n\treq, err := a.client.prepareRequest(r.ctx, localVarPath, localVarHTTPMethod, localVarPostBody, localVarHeaderParams, localVarQueryParams, localVarFormParams, localVarFormFileName, localVarFileName, localVarFileBytes)\n\tif err != nil {\n\t\treturn localVarReturnValue, nil, err\n\t}\n\n\tlocalVarHTTPResponse, err := a.client.callAPI(req)\n\n\tlocalVarAPIResponse := &APIResponse {\n\t\tResponse: localVarHTTPResponse,\n\t\tMethod: localVarHTTPMethod,\n\t\tRequestURL: localVarPath,\n\t\tOperation: \"SnapshotsGet\",\n\t}\n\n\tif err != nil || localVarHTTPResponse == nil {\n\t\treturn localVarReturnValue, localVarAPIResponse, err\n\t}\n\n\tlocalVarBody, err := _ioutil.ReadAll(localVarHTTPResponse.Body)\n\tlocalVarHTTPResponse.Body.Close()\n\tlocalVarAPIResponse.Payload = localVarBody\n\tif err != nil {\n\t\treturn localVarReturnValue, localVarAPIResponse, err\n\t}\n\n\tif localVarHTTPResponse.StatusCode >= 300 {\n\t\tnewErr := GenericOpenAPIError{\n\t\t\tbody: localVarBody,\n\t\t\terror: localVarHTTPResponse.Status,\n\t\t}\n\t\t\tvar v Error\n\t\t\terr = a.client.decode(&v, localVarBody, localVarHTTPResponse.Header.Get(\"Content-Type\"))\n\t\t\tif err != nil {\n\t\t\t\tnewErr.error = err.Error()\n\t\t\t\treturn localVarReturnValue, localVarAPIResponse, newErr\n\t\t\t}\n\t\t\tnewErr.model = v\n\t\treturn localVarReturnValue, localVarAPIResponse, newErr\n\t}\n\n\terr = a.client.decode(&localVarReturnValue, localVarBody, localVarHTTPResponse.Header.Get(\"Content-Type\"))\n\tif err != nil {\n\t\tnewErr := GenericOpenAPIError{\n\t\t\tbody: localVarBody,\n\t\t\terror: err.Error(),\n\t\t}\n\t\treturn localVarReturnValue, localVarAPIResponse, newErr\n\t}\n\n\treturn localVarReturnValue, localVarAPIResponse, nil\n}", "func (s *PurchaseOrdersEndpoint) Get(ctx context.Context, division int, id *types.GUID) (*PurchaseOrders, error) {\n\tb, _ := s.client.ResolvePathWithDivision(\"/api/v1/{division}/purchaseorder/PurchaseOrders\", division) // #nosec\n\tu, err := api.AddOdataKeyToURL(b, id)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\te := &PurchaseOrders{}\n\t_, _, requestError := s.client.NewRequestAndDo(ctx, \"GET\", u.String(), nil, e)\n\treturn e, requestError\n}", "func (k *Kraken) GetActiveOrders(getOrdersRequest *exchange.GetOrdersRequest) ([]exchange.OrderDetail, error) {\n\tresp, err := k.GetOpenOrders(OrderInfoOptions{})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar orders []exchange.OrderDetail\n\tfor i := range resp.Open {\n\t\tsymbol := currency.NewPairDelimiter(resp.Open[i].Descr.Pair,\n\t\t\tk.ConfigCurrencyPairFormat.Delimiter)\n\t\torderDate := time.Unix(int64(resp.Open[i].StartTm), 0)\n\t\tside := exchange.OrderSide(strings.ToUpper(resp.Open[i].Descr.Type))\n\n\t\torders = append(orders, exchange.OrderDetail{\n\t\t\tID: i,\n\t\t\tAmount: resp.Open[i].Vol,\n\t\t\tRemainingAmount: (resp.Open[i].Vol - resp.Open[i].VolExec),\n\t\t\tExecutedAmount: resp.Open[i].VolExec,\n\t\t\tExchange: k.Name,\n\t\t\tOrderDate: orderDate,\n\t\t\tPrice: resp.Open[i].Price,\n\t\t\tOrderSide: side,\n\t\t\tCurrencyPair: symbol,\n\t\t})\n\t}\n\n\texchange.FilterOrdersByTickRange(&orders, getOrdersRequest.StartTicks,\n\t\tgetOrdersRequest.EndTicks)\n\texchange.FilterOrdersBySide(&orders, getOrdersRequest.OrderSide)\n\texchange.FilterOrdersByCurrencies(&orders, getOrdersRequest.Currencies)\n\n\treturn orders, nil\n}", "func (wc *WooCommerce) GetOrdersJSON(page int) ([]byte, error) {\n\tif page < 1 {\n\t\tpage = 1\n\t}\n\t\n\tapiURL := fmt.Sprintf(\"%s/wp-json/wc/v3/orders?page=%d\", wc.URL, page)\n\treq, err := http.NewRequest(\"GET\", apiURL, nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treq.SetBasicAuth(wc.Username, wc.Password)\n\n\tresp, err := http.DefaultClient.Do(req)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tbody, err := ioutil.ReadAll(resp.Body)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\t_ = resp.Body.Close()\n\n\treturn body, nil\n}", "func (h *HUOBI) GetOpenOrders(ctx context.Context, symbol currency.Pair, accountID, side string, size int64) ([]OrderInfo, error) {\n\tresp := struct {\n\t\tOrders []OrderInfo `json:\"data\"`\n\t}{}\n\n\tvals := url.Values{}\n\tsymbolValue, err := h.FormatSymbol(symbol, asset.Spot)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tvals.Set(\"symbol\", symbolValue)\n\tvals.Set(\"accountID\", accountID)\n\tif len(side) > 0 {\n\t\tvals.Set(\"side\", side)\n\t}\n\tvals.Set(\"size\", strconv.FormatInt(size, 10))\n\n\terr = h.SendAuthenticatedHTTPRequest(ctx, exchange.RestSpot, http.MethodGet, huobiGetOpenOrders, vals, nil, &resp, false)\n\treturn resp.Orders, err\n}", "func (h *Hbdm) OpenOrders(symbol string, pageIndex, pageSize *int) (orders *OrdersResponse, err error) {\n\tpayload := make(map[string]interface{}, 3)\n\tif symbol != \"\" {\n\t\tpayload[\"symbol\"] = symbol\n\t}\n\tif pageIndex != nil {\n\t\tpayload[\"page_index\"] = *pageIndex\n\t}\n\tif pageSize != nil {\n\t\tpayload[\"page_size\"] = *pageSize\n\t}\n\n\tr, err := h.client.do(\"POST\", \"contract_openorders\", payload, true)\n\tif err != nil {\n\t\treturn\n\t}\n\n\tvar response interface{}\n\tif err = json.Unmarshal(r, &response); err != nil {\n\t\treturn\n\t}\n\n\tif err = handleErr(response); err != nil {\n\t\treturn\n\t}\n\n\terr = json.Unmarshal(r, &orders)\n\treturn\n}", "func (api *API) GetOpenOrders(accountName string) ([]*OpenOrders, error) {\n\tvar resp []*OpenOrders\n\terr := api.call(\"market_history\", \"get_open_orders\", []string{accountName}, &resp)\n\treturn resp, err\n}", "func (c *Coinbene) GetSwapOrderHistoryByOrderID(beginTime, endTime, symbol, status string,\n\tlatestOrderID int64) (SwapOrders, error) {\n\tv := url.Values{}\n\tif beginTime != \"\" {\n\t\tv.Set(\"beginTime\", beginTime)\n\t}\n\tif endTime != \"\" {\n\t\tv.Set(\"endTime\", endTime)\n\t}\n\tif symbol != \"\" {\n\t\tv.Set(\"symbol\", symbol)\n\t}\n\tif status != \"\" {\n\t\tv.Set(\"status\", status)\n\t}\n\tif latestOrderID != 0 {\n\t\tv.Set(\"latestOrderId\", strconv.FormatInt(latestOrderID, 10))\n\t}\n\ttype resp struct {\n\t\tData SwapOrders `json:\"data\"`\n\t}\n\n\tvar r resp\n\tpath := coinbeneAPIVersion + coinbeneClosedOrdersByPage\n\terr := c.SendAuthHTTPRequest(exchange.RestSwap, http.MethodGet,\n\t\tpath,\n\t\tcoinbeneClosedOrdersByPage,\n\t\ttrue,\n\t\tv,\n\t\t&r,\n\t\tcontractGetClosedOrdersbyPage)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn r.Data, nil\n}", "func (p *Poloniex) GetOpenOrders(ctx context.Context, currency string) (OpenOrdersResponse, error) {\n\tvalues := url.Values{}\n\tvalues.Set(\"currencyPair\", currency)\n\tresult := OpenOrdersResponse{}\n\treturn result, p.SendAuthenticatedHTTPRequest(ctx, exchange.RestSpot, http.MethodPost, poloniexOrders, values, &result.Data)\n}", "func (r *Repo) GetSnapshots() ([]*Snapshot, error) {\n\tout, err := exec.Command(resticCmd, \"-r\", r.Path, \"-p\",\n\t\tr.Passwordfile, \"snapshots\", \"--json\").Output()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tfmt.Printf(\"%d bytes of output\\n\", len(out))\n\n\tvar snaps []*Snapshot\n\terr = json.Unmarshal(out, &snaps)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn snaps, nil\n}", "func GetAllOrders(service order.Service) http.HandlerFunc {\n\treturn http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\torders, err := service.GetAllOrders(r.Context())\n\t\tif err != nil {\n\t\t\tutils.RespondJSON(w, err.Code, err.Error())\n\t\t\treturn\n\t\t}\n\t\tresponse := make([]response.Order, 0)\n\t\tfor _, order := range orders {\n\t\t\tresponse = append(response, toOrderResponse(order))\n\t\t}\n\t\tutils.RespondJSON(w, http.StatusOK, response)\n\t})\n}", "func (t *TauAPI) GetOpenOrders() (orders []Order, error error) {\n\tjsonData, err := t.doTauRequest(&TauReq{\n\t\tVersion: 1,\n\t\tMethod: \"GET\",\n\t\tPath: \"trading/myopenorders\",\n\t\tNeedsAuth: true,\n\t})\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"GetOpenOrders->%v\", err)\n\t}\n\tif err := json.Unmarshal(jsonData, &orders); err != nil {\n\t\treturn nil, fmt.Errorf(\"GetOpenOrders->%v\", err)\n\t}\n\treturn orders, nil\n}", "func (client *AWSClient) GetSnapshots(ctx context.Context, imageID string) ([]string, error) {\n\tresult, err := client.svcEC2.DescribeImagesWithContext(ctx, &ec2.DescribeImagesInput{\n\t\tImageIds: []*string{aws.String(imageID)},\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar snapshots []string\n\tfor _, b := range result.Images[0].BlockDeviceMappings {\n\t\tif b.Ebs == nil {\n\t\t\tcontinue\n\t\t}\n\t\tsnapshots = append(snapshots, *b.Ebs.SnapshotId)\n\t}\n\n\treturn snapshots, nil\n}", "func (m *VirtualEndpoint) GetSnapshots()([]CloudPcSnapshotable) {\n val, err := m.GetBackingStore().Get(\"snapshots\")\n if err != nil {\n panic(err)\n }\n if val != nil {\n return val.([]CloudPcSnapshotable)\n }\n return nil\n}", "func (b *Bitmex) GetOrderHistory(ctx context.Context, req *order.MultiOrderRequest) (order.FilteredOrders, error) {\n\terr := req.Validate()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tparams := OrdersRequest{}\n\tresp, err := b.GetOrders(ctx, &params)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tformat, err := b.GetPairFormat(asset.PerpetualContract, false)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\torders := make([]order.Detail, len(resp))\n\tfor i := range resp {\n\t\torderSide := orderSideMap[resp[i].Side]\n\t\tvar orderStatus order.Status\n\t\torderStatus, err = order.StringToOrderStatus(resp[i].OrdStatus)\n\t\tif err != nil {\n\t\t\tlog.Errorf(log.ExchangeSys, \"%s %v\", b.Name, err)\n\t\t}\n\n\t\tpair := currency.NewPairWithDelimiter(resp[i].Symbol, resp[i].SettlCurrency, format.Delimiter)\n\n\t\tvar oType order.Type\n\t\toType, err = b.getOrderType(resp[i].OrdType)\n\t\tif err != nil {\n\t\t\tlog.Errorf(log.ExchangeSys, \"%s %v\", b.Name, err)\n\t\t}\n\n\t\torderDetail := order.Detail{\n\t\t\tPrice: resp[i].Price,\n\t\t\tAverageExecutedPrice: resp[i].AvgPx,\n\t\t\tAmount: resp[i].OrderQty,\n\t\t\tExecutedAmount: resp[i].CumQty,\n\t\t\tRemainingAmount: resp[i].LeavesQty,\n\t\t\tDate: resp[i].TransactTime,\n\t\t\tCloseTime: resp[i].Timestamp,\n\t\t\tExchange: b.Name,\n\t\t\tOrderID: resp[i].OrderID,\n\t\t\tSide: orderSide,\n\t\t\tStatus: orderStatus,\n\t\t\tType: oType,\n\t\t\tPair: pair,\n\t\t}\n\t\torderDetail.InferCostsAndTimes()\n\n\t\torders[i] = orderDetail\n\t}\n\treturn req.Filter(b.Name, orders), nil\n}", "func GetOrder(id int) (order Orders, err error) {\r\n\tvar rows *sql.Rows\r\n\tif rows, err = Get(fmt.Sprintf(`select * from orders o \r\n\t\tinner join ordereditems i on o.id = i.orderid \r\n\t\twhere o.id = %d and o.deleted_at is null;`, id)); err != nil {\r\n\t\tCheckError(\"Error getting Order data.\", err, false)\r\n\t\treturn Orders{}, err\r\n\t}\r\n\tdefer rows.Close()\r\n\r\n\tvar items []OrderedItems\r\n\tfor rows.Next() {\r\n\t\titem := OrderedItems{}\r\n\r\n\t\tif err = rows.Scan(&order.ID, &order.DocEntry, &order.DocNum, &order.Canceled, &order.CardCode, &order.CardName, &order.VatSum,\r\n\t\t\t&order.DocTotal, &order.Synced, &order.CreatedBy, &order.CreatedAt, &order.UpdatedAt, &order.DeletedAt, &order.Comment,\r\n\t\t\t&order.Returned, &order.DiscountApprovedBy, &item.ID, &item.OrderID, &item.ItemCode, &item.ItemName, &item.Price,\r\n\t\t\t&item.Quantity, &item.Discount, &item.SerialNumber); err != nil {\r\n\t\t\tCheckError(\"Error Scanning Order.\", err, false)\r\n\t\t} else {\r\n\t\t\titems = append(items, item)\r\n\t\t}\r\n\t}\r\n\r\n\torder.Items = items\r\n\treturn\r\n}", "func (h *HUOBIHADAX) GetOrdersMatch(symbol, types, start, end, from, direct, size string) ([]OrderMatchInfo, error) {\n\ttype response struct {\n\t\tResponse\n\t\tOrders []OrderMatchInfo `json:\"data\"`\n\t}\n\n\tvals := url.Values{}\n\tvals.Set(\"symbol\", symbol)\n\n\tif types != \"\" {\n\t\tvals.Set(\"types\", types)\n\t}\n\n\tif start != \"\" {\n\t\tvals.Set(\"start-date\", start)\n\t}\n\n\tif end != \"\" {\n\t\tvals.Set(\"end-date\", end)\n\t}\n\n\tif from != \"\" {\n\t\tvals.Set(\"from\", from)\n\t}\n\n\tif direct != \"\" {\n\t\tvals.Set(\"direct\", direct)\n\t}\n\n\tif size != \"\" {\n\t\tvals.Set(\"size\", size)\n\t}\n\n\tvar result response\n\terr := h.SendAuthenticatedHTTPRequest(http.MethodGet, huobihadaxGetOrdersMatch, vals, &result)\n\n\tif result.ErrorMessage != \"\" {\n\t\treturn nil, errors.New(result.ErrorMessage)\n\t}\n\treturn result.Orders, err\n}", "func (h *HUOBI) GetOrdersMatch(ctx context.Context, symbol currency.Pair, types, start, end, from, direct, size string) ([]OrderMatchInfo, error) {\n\tresp := struct {\n\t\tOrders []OrderMatchInfo `json:\"data\"`\n\t}{}\n\n\tvals := url.Values{}\n\tsymbolValue, err := h.FormatSymbol(symbol, asset.Spot)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tvals.Set(\"symbol\", symbolValue)\n\n\tif types != \"\" {\n\t\tvals.Set(\"types\", types)\n\t}\n\n\tif start != \"\" {\n\t\tvals.Set(\"start-date\", start)\n\t}\n\n\tif end != \"\" {\n\t\tvals.Set(\"end-date\", end)\n\t}\n\n\tif from != \"\" {\n\t\tvals.Set(\"from\", from)\n\t}\n\n\tif direct != \"\" {\n\t\tvals.Set(\"direct\", direct)\n\t}\n\n\tif size != \"\" {\n\t\tvals.Set(\"size\", size)\n\t}\n\n\terr = h.SendAuthenticatedHTTPRequest(ctx, exchange.RestSpot, http.MethodGet, huobiGetOrdersMatch, vals, nil, &resp, false)\n\treturn resp.Orders, err\n}", "func (h *HUOBIHADAX) GetOpenOrders(accountID, symbol, side string, size int) ([]OrderInfo, error) {\n\ttype response struct {\n\t\tResponse\n\t\tOrders []OrderInfo `json:\"data\"`\n\t}\n\n\tvals := url.Values{}\n\tvals.Set(\"symbol\", symbol)\n\tvals.Set(\"accountID\", accountID)\n\tif len(side) > 0 {\n\t\tvals.Set(\"side\", side)\n\t}\n\tvals.Set(\"size\", fmt.Sprintf(\"%v\", size))\n\n\tvar result response\n\terr := h.SendAuthenticatedHTTPRequest(http.MethodGet, huobihadaxGetOpenOrders, vals, &result)\n\n\tif result.ErrorMessage != \"\" {\n\t\treturn nil, errors.New(result.ErrorMessage)\n\t}\n\n\treturn result.Orders, err\n}", "func (e *Huobi) GetOrders(stockType string) interface{} {\n\tstockType = strings.ToUpper(stockType)\n\tif _, ok := e.stockTypeMap[stockType]; !ok {\n\t\te.logger.Log(constant.ERROR, \"\", 0.0, 0.0, \"GetOrders() error, unrecognized stockType: \", stockType)\n\t\treturn false\n\t}\n\tresult, err := services.GetOrders(e.stockTypeMap[stockType] + \"usdt\")\n\tif err != nil {\n\t\te.logger.Log(constant.ERROR, \"\", 0.0, 0.0, \"GetOrders() error, \", err)\n\t\treturn false\n\t}\n\tif result.Status != \"ok\" {\n\t\te.logger.Log(constant.ERROR, \"\", 0.0, 0.0, \"GetOrders() error, \", result.ErrMsg)\n\t\treturn false\n\t}\n\torders := []Order{}\n\tcount := len(result.Data)\n\tfor i := 0; i < count; i++ {\n\t\torders = append(orders, Order{\n\t\t\tID: fmt.Sprint(result.Data[i].ID),\n\t\t\tPrice: conver.Float64Must(result.Data[i].Price),\n\t\t\tAmount: conver.Float64Must(result.Data[i].Amount),\n\t\t\tDealAmount: conver.Float64Must(result.Data[i].DealAmount),\n\t\t\tTradeType: e.tradeTypeMap[result.Data[i].TradeType],\n\t\t\tStockType: stockType,\n\t\t})\n\t}\n\treturn orders\n}", "func (r *OrdersService) Get(profileId int64, projectId int64, id int64) *OrdersGetCall {\n\tc := &OrdersGetCall{s: r.s, urlParams_: make(gensupport.URLParams)}\n\tc.profileId = profileId\n\tc.projectId = projectId\n\tc.id = id\n\treturn c\n}", "func (c *restClient) ListSnapshots(ctx context.Context, req *netapppb.ListSnapshotsRequest, opts ...gax.CallOption) *SnapshotIterator {\n\tit := &SnapshotIterator{}\n\treq = proto.Clone(req).(*netapppb.ListSnapshotsRequest)\n\tunm := protojson.UnmarshalOptions{AllowPartial: true, DiscardUnknown: true}\n\tit.InternalFetch = func(pageSize int, pageToken string) ([]*netapppb.Snapshot, string, error) {\n\t\tresp := &netapppb.ListSnapshotsResponse{}\n\t\tif pageToken != \"\" {\n\t\t\treq.PageToken = pageToken\n\t\t}\n\t\tif pageSize > math.MaxInt32 {\n\t\t\treq.PageSize = math.MaxInt32\n\t\t} else if pageSize != 0 {\n\t\t\treq.PageSize = int32(pageSize)\n\t\t}\n\t\tbaseUrl, err := url.Parse(c.endpoint)\n\t\tif err != nil {\n\t\t\treturn nil, \"\", err\n\t\t}\n\t\tbaseUrl.Path += fmt.Sprintf(\"/v1/%v/snapshots\", req.GetParent())\n\n\t\tparams := url.Values{}\n\t\tparams.Add(\"$alt\", \"json;enum-encoding=int\")\n\t\tif req.GetFilter() != \"\" {\n\t\t\tparams.Add(\"filter\", fmt.Sprintf(\"%v\", req.GetFilter()))\n\t\t}\n\t\tif req.GetOrderBy() != \"\" {\n\t\t\tparams.Add(\"orderBy\", fmt.Sprintf(\"%v\", req.GetOrderBy()))\n\t\t}\n\t\tif req.GetPageSize() != 0 {\n\t\t\tparams.Add(\"pageSize\", fmt.Sprintf(\"%v\", req.GetPageSize()))\n\t\t}\n\t\tif req.GetPageToken() != \"\" {\n\t\t\tparams.Add(\"pageToken\", fmt.Sprintf(\"%v\", req.GetPageToken()))\n\t\t}\n\n\t\tbaseUrl.RawQuery = params.Encode()\n\n\t\t// Build HTTP headers from client and context metadata.\n\t\thds := append(c.xGoogHeaders, \"Content-Type\", \"application/json\")\n\t\theaders := gax.BuildHeaders(ctx, hds...)\n\t\te := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {\n\t\t\tif settings.Path != \"\" {\n\t\t\t\tbaseUrl.Path = settings.Path\n\t\t\t}\n\t\t\thttpReq, err := http.NewRequest(\"GET\", baseUrl.String(), nil)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\thttpReq.Header = headers\n\n\t\t\thttpRsp, err := c.httpClient.Do(httpReq)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tdefer httpRsp.Body.Close()\n\n\t\t\tif err = googleapi.CheckResponse(httpRsp); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\tbuf, err := io.ReadAll(httpRsp.Body)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\tif err := unm.Unmarshal(buf, resp); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\treturn nil\n\t\t}, opts...)\n\t\tif e != nil {\n\t\t\treturn nil, \"\", e\n\t\t}\n\t\tit.Response = resp\n\t\treturn resp.GetSnapshots(), resp.GetNextPageToken(), nil\n\t}\n\n\tfetch := func(pageSize int, pageToken string) (string, error) {\n\t\titems, nextPageToken, err := it.InternalFetch(pageSize, pageToken)\n\t\tif err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\t\tit.items = append(it.items, items...)\n\t\treturn nextPageToken, nil\n\t}\n\n\tit.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf)\n\tit.pageInfo.MaxSize = int(req.GetPageSize())\n\tit.pageInfo.Token = req.GetPageToken()\n\n\treturn it\n}", "func (o *Orders) GetAllOrders(db *gorm.DB, c echo.Context) (*[]Orders, error) {\n\torders := []Orders{}\n\t// With pagination implemented\n\terr := db.Debug().Scopes(Paginate(c)).Find(&orders).Error\n\tif err != nil {\n\t\treturn &[]Orders{}, err\n\t}\n\tfor i := range orders {\n\t\terr = db.Debug().Model(&Customers{}).Where(\"id = ?\", orders[i].Customer_id).Take(&orders[i].Customers).Error\n\t\tif err != nil {\n\t\t\treturn &[]Orders{}, err\n\t\t}\n\t\terr = db.Debug().Model(&Cars{}).Where(\"id = ?\", orders[i].Car_id).Take(&orders[i].Cars).Error\n\t\tif err != nil {\n\t\t\treturn &[]Orders{}, err\n\t\t}\n\t}\n\terr = db.Debug().Scopes(Paginate(c)).Find(&orders).Error\n\tif err != nil {\n\t\treturn &[]Orders{}, err\n\t}\n\treturn &orders, err\n}", "func (n *NameCom) ListOrders(request *ListOrdersRequest) (*ListOrdersResponse, error) {\n\tendpoint := fmt.Sprintf(\"/v4/orders\")\n\n\tvalues := url.Values{}\n\tif request.PerPage != 0 {\n\t\tvalues.Set(\"perPage\", fmt.Sprintf(\"%d\", request.PerPage))\n\t}\n\tif request.Page != 0 {\n\t\tvalues.Set(\"page\", fmt.Sprintf(\"%d\", request.Page))\n\t}\n\n\tbody, err := n.get(endpoint, values)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tresp := &ListOrdersResponse{}\n\n\terr = json.NewDecoder(body).Decode(resp)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resp, nil\n}", "func (v *Kounta) GetOrders(token string, company string, siteID string) ([]Order, error) {\n\tclient := &http.Client{}\n\tclient.CheckRedirect = checkRedirectFunc\n\n\tu, _ := url.ParseRequestURI(baseURL)\n\tu.Path = fmt.Sprintf(ordersURL, company, siteID)\n\turlStr := fmt.Sprintf(\"%v\", u)\n\n\tr, err := http.NewRequest(\"GET\", urlStr, nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tr.Header = http.Header(make(map[string][]string))\n\tr.Header.Set(\"Accept\", \"application/json\")\n\tr.Header.Set(\"Authorization\", \"Bearer \"+token)\n\n\tres, err := client.Do(r)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\trawResBody, err := ioutil.ReadAll(res.Body)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif res.StatusCode == 200 {\n\t\tvar resp []Order\n\n\t\t//fmt.Println(string(rawResBody))\n\n\t\terr = json.Unmarshal(rawResBody, &resp)\n\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn resp, nil\n\t}\n\treturn nil, fmt.Errorf(\"Failed to get Kounta Categories %s\", res.Status)\n\n}", "func (h *HitBTC) GetActiveorders(ctx context.Context, currency string) ([]Order, error) {\n\tvar resp []Order\n\terr := h.SendAuthenticatedHTTPRequest(ctx, exchange.RestSpot, http.MethodGet,\n\t\torders+\"?symbol=\"+currency,\n\t\turl.Values{},\n\t\ttradingRequests,\n\t\t&resp)\n\n\treturn resp, err\n}", "func (c *Coinbene) GetSwapOpenOrders(symbol string, pageNum, pageSize int) (SwapOrders, error) {\n\tv := url.Values{}\n\tv.Set(\"symbol\", symbol)\n\tif pageNum != 0 {\n\t\tv.Set(\"pageNum\", strconv.Itoa(pageNum))\n\t}\n\tif pageSize != 0 {\n\t\tv.Set(\"pageSize\", strconv.Itoa(pageSize))\n\t}\n\ttype resp struct {\n\t\tData SwapOrders `json:\"data\"`\n\t}\n\tvar r resp\n\tpath := coinbeneAPIVersion + coinbeneOpenOrders\n\terr := c.SendAuthHTTPRequest(exchange.RestSwap, http.MethodGet,\n\t\tpath,\n\t\tcoinbeneOpenOrders,\n\t\ttrue,\n\t\tv,\n\t\t&r,\n\t\tcontractGetOpenOrders)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn r.Data, nil\n}", "func (keeper *PersistentOrderKeeper) GetOrdersAtHeight(ctx sdk.Context, height int64) []*types.Order {\n\tstore := ctx.KVStore(keeper.marketKey)\n\tvar result []*types.Order\n\tstart := myposchain.ConcatKeys(\n\t\tOrderQueueKeyPrefix,\n\t\t[]byte(keeper.symbol),\n\t\t[]byte{0x0},\n\t\tint64ToBigEndianBytes(height),\n\t)\n\tend := myposchain.ConcatKeys(\n\t\tOrderQueueKeyPrefix,\n\t\t[]byte(keeper.symbol),\n\t\t[]byte{0x0},\n\t\tint64ToBigEndianBytes(height+1),\n\t)\n\titer := store.Iterator(start, end)\n\tdefer iter.Close()\n\tfor ; iter.Valid(); iter.Next() {\n\t\tikey := iter.Key()\n\t\torderID := string(ikey[len(end):])\n\t\torder := keeper.getOrder(ctx, orderID)\n\t\tresult = append(result, order)\n\t}\n\treturn result\n}", "func (c *Coinbene) FetchClosedOrders(symbol, latestID string) (OrdersInfo, error) {\n\tparams := url.Values{}\n\tparams.Set(\"symbol\", symbol)\n\tparams.Set(\"latestOrderId\", latestID)\n\tpath := coinbeneAPIVersion + coinbeneClosedOrders\n\tvar orders OrdersInfo\n\tfor i := int64(1); ; i++ {\n\t\ttemp := struct {\n\t\t\tData OrdersInfo `json:\"data\"`\n\t\t}{}\n\t\tparams.Set(\"pageNum\", strconv.FormatInt(i, 10))\n\t\terr := c.SendAuthHTTPRequest(exchange.RestSpot, http.MethodGet,\n\t\t\tpath,\n\t\t\tcoinbeneClosedOrders,\n\t\t\tfalse,\n\t\t\tparams,\n\t\t\t&temp,\n\t\t\tspotQueryClosedOrders)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tfor j := range temp.Data {\n\t\t\torders = append(orders, temp.Data[j])\n\t\t}\n\t\tif len(temp.Data) != 20 {\n\t\t\tbreak\n\t\t}\n\t}\n\treturn orders, nil\n}", "func (m *Nitro) GetSnapshots() []*Snapshot {\n\tvar snaps []*Snapshot\n\tbuf := m.snapshots.MakeBuf()\n\tdefer m.snapshots.FreeBuf(buf)\n\titer := m.snapshots.NewIterator(CompareSnapshot, buf)\n\titer.SeekFirst()\n\tfor ; iter.Valid(); iter.Next() {\n\t\tsnaps = append(snaps, (*Snapshot)(iter.Get()))\n\t}\n\n\treturn snaps\n}", "func (s *SnapshotsServiceOp) get(ctx context.Context, ID string) (*Snapshot, *Response, error) {\n\tpath := fmt.Sprintf(\"%s/%s\", snapshotBasePath, ID)\n\n\treq, err := s.client.NewRequest(ctx, http.MethodGet, path, nil)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\troot := new(snapshotRoot)\n\tresp, err := s.client.Do(ctx, req, root)\n\tif err != nil {\n\t\treturn nil, resp, err\n\t}\n\n\treturn root.Snapshot, resp, err\n}", "func (h *Hbdm) HistoryOrders(symbol string, tradeType, orderType, status, create int, pageIndex, pageSize *int) (orders *OrdersResponse, err error) {\n\tpayload := make(map[string]interface{}, 7)\n\tpayload[\"symbol\"] = symbol\n\tpayload[\"trade_type\"] = tradeType\n\tpayload[\"type\"] = orderType\n\tpayload[\"status\"] = status\n\tpayload[\"create_date\"] = create\n\n\tif pageIndex != nil {\n\t\tpayload[\"page_index\"] = *pageIndex\n\t}\n\tif pageSize != nil {\n\t\tpayload[\"page_size\"] = *pageSize\n\t}\n\n\tr, err := h.client.do(\"POST\", \"contract_hisorders\", payload, true)\n\tif err != nil {\n\t\treturn\n\t}\n\n\tvar response interface{}\n\tif err = json.Unmarshal(r, &response); err != nil {\n\t\treturn\n\t}\n\n\tif err = handleErr(response); err != nil {\n\t\treturn\n\t}\n\n\terr = json.Unmarshal(r, &orders)\n\treturn\n}", "func returnAllOrders(w http.ResponseWriter, r *http.Request) {\n\tw.Header().Set(\"Content-Type\", \"application/json\")\n\n\tfmt.Fprintf(w, \"Welcome to the returnAllOrders!\")\n\tfmt.Println(\"Endpoint Hit: returnAllOrders\")\n\n\tvar orders []Orders\n\n\tresults, err := db.Query(\"select orders.id, users.username, stocks.symbol, shares from orders inner join users on orders.user_id = users.id inner join stocks on orders.stock_id = stocks.id\")\n\n\tif err != nil {\n\t\tpanic(err.Error())\n\t}\n\n\tfor results.Next() {\n\t\tvar order Orders\n\t\terr = results.Scan(&order.ID, &order.Username, &order.Symbol, &order.Shares)\n\t\tif err != nil {\n\t\t\tpanic(err.Error())\n\t\t}\n\t\torders = append(orders, order)\n\t}\n\tjson.NewEncoder(w).Encode(orders)\n}", "func (b *Bitmex) GetActiveOrders(ctx context.Context, req *order.MultiOrderRequest) (order.FilteredOrders, error) {\n\terr := req.Validate()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tparams := OrdersRequest{\n\t\tFilter: \"{\\\"open\\\":true}\",\n\t}\n\tresp, err := b.GetOrders(ctx, &params)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tformat, err := b.GetPairFormat(asset.PerpetualContract, false)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\torders := make([]order.Detail, len(resp))\n\tfor i := range resp {\n\t\tvar orderStatus order.Status\n\t\torderStatus, err = order.StringToOrderStatus(resp[i].OrdStatus)\n\t\tif err != nil {\n\t\t\tlog.Errorf(log.ExchangeSys, \"%s %v\", b.Name, err)\n\t\t}\n\t\tvar oType order.Type\n\t\toType, err = b.getOrderType(resp[i].OrdType)\n\t\tif err != nil {\n\t\t\tlog.Errorf(log.ExchangeSys, \"%s %v\", b.Name, err)\n\t\t}\n\t\torderDetail := order.Detail{\n\t\t\tDate: resp[i].Timestamp,\n\t\t\tPrice: resp[i].Price,\n\t\t\tAmount: resp[i].OrderQty,\n\t\t\tExecutedAmount: resp[i].CumQty,\n\t\t\tRemainingAmount: resp[i].LeavesQty,\n\t\t\tExchange: b.Name,\n\t\t\tOrderID: resp[i].OrderID,\n\t\t\tSide: orderSideMap[resp[i].Side],\n\t\t\tStatus: orderStatus,\n\t\t\tType: oType,\n\t\t\tPair: currency.NewPairWithDelimiter(resp[i].Symbol,\n\t\t\t\tresp[i].SettlCurrency,\n\t\t\t\tformat.Delimiter),\n\t\t}\n\n\t\torders[i] = orderDetail\n\t}\n\treturn req.Filter(b.Name, orders), nil\n}", "func (c *Coinbene) GetSwapOrderHistory(beginTime, endTime, symbol string, pageNum,\n\tpageSize int, direction, orderType string) (SwapOrders, error) {\n\tv := url.Values{}\n\tif beginTime != \"\" {\n\t\tv.Set(\"beginTime\", beginTime)\n\t}\n\tif endTime != \"\" {\n\t\tv.Set(\"endTime\", endTime)\n\t}\n\tv.Set(\"symbol\", symbol)\n\tif pageNum != 0 {\n\t\tv.Set(\"pageNum\", strconv.Itoa(pageNum))\n\t}\n\tif pageSize != 0 {\n\t\tv.Set(\"pageSize\", strconv.Itoa(pageSize))\n\t}\n\tif direction != \"\" {\n\t\tv.Set(\"direction\", direction)\n\t}\n\tif orderType != \"\" {\n\t\tv.Set(\"orderType\", orderType)\n\t}\n\n\ttype resp struct {\n\t\tData SwapOrders `json:\"data\"`\n\t}\n\n\tvar r resp\n\tpath := coinbeneAPIVersion + coinbeneClosedOrders\n\terr := c.SendAuthHTTPRequest(exchange.RestSwap, http.MethodGet,\n\t\tpath,\n\t\tcoinbeneClosedOrders,\n\t\ttrue,\n\t\tv,\n\t\t&r,\n\t\tcontractGetClosedOrders)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn r.Data, nil\n}", "func (s *SnapshotsServiceOp) get(ID interface{}) (*Snapshot, *Response, error) {\n\tpath := fmt.Sprintf(\"%s/%v\", snapshotBasePath, ID)\n\n\treq, err := s.client.NewRequest(\"GET\", path, nil)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\troot := new(snapshotRoot)\n\tresp, err := s.client.Do(req, root)\n\tif err != nil {\n\t\treturn nil, resp, err\n\t}\n\n\treturn root.Snapshot, resp, err\n}", "func (s *Service) GetOrder(w http.ResponseWriter, r *http.Request) {\n\t// Get the order ID\n\tvars := mux.Vars(r)\n\tid := vars[\"id\"]\n\n\t// Get the order\n\tentry, err := usecases.GetOrderByID(s.storage, id)\n\tif err != nil {\n\t\tlog.Errorf(\"GetOrder: %v\", err)\n\t\tutils.ResponseWithError(w, http.StatusBadRequest, err.Error())\n\t\treturn\n\t}\n\tutils.ResponseWithJSON(w, http.StatusOK, entry)\n}", "func (svc *svc) ListOrders(ctx context.Context, query model.OrderQuery) ([]model.Order, int64, error) {\n\torders, err := svc.repo.ListOrders(ctx, query)\n\tif err != nil {\n\t\treturn nil, 0, err\n\t}\n\n\ttotal, err := svc.repo.CountOrders(ctx, query)\n\tif err != nil {\n\t\treturn nil, 0, err\n\t}\n\n\treturn orders, total, nil\n}", "func GetCourierOrders(c buffalo.Context) error {\n\tcourier, err := GetCourierByUserID(c.Param(\"user_id\"), c)\n\tif err != nil {\n\t\treturn errors.WithStack(errors.New(\"could not find courier\"))\n\t}\n\ttx := c.Value(\"tx\").(*pop.Connection)\n\torders := &models.Orders{}\n\n\tif err := tx.Eager().Where(\"courier_id = ?\", courier.ID).All(orders); err != nil {\n\t\treturn c.Error(http.StatusInternalServerError, err)\n\t}\n\treturn c.Render(http.StatusOK, r.JSON(orders))\n}", "func (m manager) AllOrders() (acmeserverless.Orders, error) {\n\t// Create a map of DynamoDB Attribute Values containing the table keys\n\t// for the access pattern PK = ORDER\n\tkm := make(map[string]*dynamodb.AttributeValue)\n\tkm[\":type\"] = &dynamodb.AttributeValue{\n\t\tS: aws.String(\"ORDER\"),\n\t}\n\n\t// Create the QueryInput\n\tqi := &dynamodb.QueryInput{\n\t\tTableName: aws.String(os.Getenv(\"TABLE\")),\n\t\tKeyConditionExpression: aws.String(\"PK = :type\"),\n\t\tExpressionAttributeValues: km,\n\t}\n\n\tqo, err := dbs.Query(qi)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\torders := make(acmeserverless.Orders, len(qo.Items))\n\n\tfor idx, ord := range qo.Items {\n\t\tstr := ord[\"OrderString\"].S\n\t\to, err := acmeserverless.UnmarshalOrder(*str)\n\t\tif err != nil {\n\t\t\tlog.Println(fmt.Sprintf(\"error unmarshalling order data: %s\", err.Error()))\n\t\t\tcontinue\n\t\t}\n\t\torders[idx] = o\n\t}\n\n\treturn orders, nil\n}", "func (c *Coinbene) GetSwapOpenOrdersByPage(symbol string, latestOrderID int64) (SwapOrders, error) {\n\tv := url.Values{}\n\tif symbol != \"\" {\n\t\tv.Set(\"symbol\", symbol)\n\t}\n\tif latestOrderID != 0 {\n\t\tv.Set(\"latestOrderId\", strconv.FormatInt(latestOrderID, 10))\n\t}\n\ttype resp struct {\n\t\tData SwapOrders `json:\"data\"`\n\t}\n\tvar r resp\n\tpath := coinbeneAPIVersion + coinbeneOpenOrdersByPage\n\terr := c.SendAuthHTTPRequest(exchange.RestSwap, http.MethodGet,\n\t\tpath,\n\t\tcoinbeneOpenOrdersByPage,\n\t\ttrue,\n\t\tv,\n\t\t&r,\n\t\tcontractOpenOrdersByPage)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn r.Data, nil\n}", "func RunSnapshotGet(c *CmdConfig) error {\n\tif len(c.Args) == 0 {\n\t\treturn doctl.NewMissingArgsErr(c.NS)\n\t}\n\n\tss := c.Snapshots()\n\tids := c.Args\n\n\tmatchedList := make([]do.Snapshot, 0, len(ids))\n\n\tfor _, id := range ids {\n\t\ts, err := ss.Get(id)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tmatchedList = append(matchedList, *s)\n\t}\n\titem := &displayers.Snapshot{Snapshots: matchedList}\n\treturn c.Display(item)\n}", "func (bc backendClient) QueryOpenOrders(addrStr, product, side string, start, end, page, perPage int) (orders []types.Order,\n\terr error) {\n\tperPageNum, err := params.CheckQueryOrdersParams(addrStr, product, side, start, end, page, perPage)\n\tif err != nil {\n\t\treturn\n\t}\n\n\t// field hideNoFill fixed by false\n\tjsonBytes, err := bc.GetCodec().MarshalJSON(\n\t\tbackendtypes.NewQueryOrderListParams(addrStr, product, side, page, perPageNum, int64(start), int64(end), false),\n\t)\n\tif err != nil {\n\t\treturn orders, utils.ErrMarshalJSON(err.Error())\n\t}\n\n\tpath := fmt.Sprintf(\"custom/%s/%s/open\", backendtypes.QuerierRoute, backendtypes.QueryOrderList)\n\tres, _, err := bc.Query(path, jsonBytes)\n\tif err != nil {\n\t\treturn orders, utils.ErrClientQuery(err.Error())\n\t}\n\n\tif err = utils.UnmarshalListResponse(res, &orders); err != nil {\n\t\treturn orders, utils.ErrFilterDataFromListResponse(\"open orders\", err.Error())\n\t}\n\n\treturn\n}", "func (a *Client) GetAssetOrdersAsync(params *GetAssetOrdersAsyncParams, authInfo runtime.ClientAuthInfoWriter) (*GetAssetOrdersAsyncOK, error) {\n\t// TODO: Validate the params before sending\n\tif params == nil {\n\t\tparams = NewGetAssetOrdersAsyncParams()\n\t}\n\n\tresult, err := a.transport.Submit(&runtime.ClientOperation{\n\t\tID: \"GetAssetOrdersAsync\",\n\t\tMethod: \"GET\",\n\t\tPathPattern: \"/api/v1/Assets/orders\",\n\t\tProducesMediaTypes: []string{\"application/json\", \"text/json\", \"text/plain\"},\n\t\tConsumesMediaTypes: []string{\"application/json\"},\n\t\tSchemes: []string{\"https\"},\n\t\tParams: params,\n\t\tReader: &GetAssetOrdersAsyncReader{formats: a.formats},\n\t\tAuthInfo: authInfo,\n\t\tContext: params.Context,\n\t\tClient: params.HTTPClient,\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tsuccess, ok := result.(*GetAssetOrdersAsyncOK)\n\tif ok {\n\t\treturn success, nil\n\t}\n\t// unexpected success response\n\t// safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue\n\tmsg := fmt.Sprintf(\"unexpected success response for GetAssetOrdersAsync: API contract not enforced by server. Client expected to get an error, but got: %T\", result)\n\tpanic(msg)\n}", "func (v *Kounta) GetOrdersSingle(token string, company string, orderID string) (*Order, error) {\n\tclient := &http.Client{}\n\tclient.CheckRedirect = checkRedirectFunc\n\n\tu, _ := url.ParseRequestURI(baseURL)\n\tu.Path = fmt.Sprintf(ordersSingleURL, company, orderID)\n\turlStr := fmt.Sprintf(\"%v\", u)\n\n\tfmt.Println(urlStr)\n\n\tr, err := http.NewRequest(\"GET\", urlStr, nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tr.Header = http.Header(make(map[string][]string))\n\tr.Header.Set(\"Accept\", \"application/json\")\n\tr.Header.Set(\"Authorization\", \"Bearer \"+token)\n\n\tres, err := client.Do(r)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\trawResBody, err := ioutil.ReadAll(res.Body)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif res.StatusCode == 200 {\n\t\tresp := Order{}\n\n\t\t//\tfmt.Println(string(rawResBody))\n\n\t\terr = json.Unmarshal(rawResBody, &resp)\n\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn &resp, nil\n\t}\n\tfmt.Println(string(rawResBody))\n\treturn nil, fmt.Errorf(\"Failed to get Kounta Sale %s\", res.Status)\n\n}", "func (c *client) GetClosedOrders(query *ClosedOrdersQuery) (*CloseOrders, error) {\n\terr := query.Check()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tqp, err := common.QueryParamToMap(*query)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tresp, err := c.baseClient.Get(\"/orders/closed\", qp)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar orders CloseOrders\n\tif err := json.Unmarshal(resp, &orders); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn &orders, nil\n}", "func (c *CoinbasePro) GetOrderHistory(ctx context.Context, req *order.MultiOrderRequest) (order.FilteredOrders, error) {\n\terr := req.Validate()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tvar respOrders []GeneralizedOrderResponse\n\tif len(req.Pairs) > 0 {\n\t\tvar fPair currency.Pair\n\t\tvar resp []GeneralizedOrderResponse\n\t\tfor i := range req.Pairs {\n\t\t\tfPair, err = c.FormatExchangeCurrency(req.Pairs[i], asset.Spot)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\tresp, err = c.GetOrders(ctx, []string{\"done\"}, fPair.String())\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\trespOrders = append(respOrders, resp...)\n\t\t}\n\t} else {\n\t\trespOrders, err = c.GetOrders(ctx, []string{\"done\"}, \"\")\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\tformat, err := c.GetPairFormat(asset.Spot, false)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\torders := make([]order.Detail, len(respOrders))\n\tfor i := range respOrders {\n\t\tvar curr currency.Pair\n\t\tcurr, err = currency.NewPairDelimiter(respOrders[i].ProductID,\n\t\t\tformat.Delimiter)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tvar side order.Side\n\t\tside, err = order.StringToOrderSide(respOrders[i].Side)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tvar orderStatus order.Status\n\t\torderStatus, err = order.StringToOrderStatus(respOrders[i].Status)\n\t\tif err != nil {\n\t\t\tlog.Errorf(log.ExchangeSys, \"%s %v\", c.Name, err)\n\t\t}\n\t\tvar orderType order.Type\n\t\torderType, err = order.StringToOrderType(respOrders[i].Type)\n\t\tif err != nil {\n\t\t\tlog.Errorf(log.ExchangeSys, \"%s %v\", c.Name, err)\n\t\t}\n\t\tdetail := order.Detail{\n\t\t\tOrderID: respOrders[i].ID,\n\t\t\tAmount: respOrders[i].Size,\n\t\t\tExecutedAmount: respOrders[i].FilledSize,\n\t\t\tRemainingAmount: respOrders[i].Size - respOrders[i].FilledSize,\n\t\t\tCost: respOrders[i].ExecutedValue,\n\t\t\tCostAsset: curr.Quote,\n\t\t\tType: orderType,\n\t\t\tDate: respOrders[i].CreatedAt,\n\t\t\tCloseTime: respOrders[i].DoneAt,\n\t\t\tFee: respOrders[i].FillFees,\n\t\t\tFeeAsset: curr.Quote,\n\t\t\tSide: side,\n\t\t\tStatus: orderStatus,\n\t\t\tPair: curr,\n\t\t\tPrice: respOrders[i].Price,\n\t\t\tExchange: c.Name,\n\t\t}\n\t\tdetail.InferCostsAndTimes()\n\t\torders[i] = detail\n\t}\n\treturn req.Filter(c.Name, orders), nil\n}", "func (c *Client) GetUserOrders(u *User, limit, offset int) ([]*Order, error, bool) {\n\tif u == nil {\n\t\treturn nil, errors.New(\"user can't be nil\"), false\n\t}\n\n\tuserIDStr := strconv.Itoa(u.ID)\n\tlimitStr := strconv.Itoa(limit)\n\toffsetStr := strconv.Itoa(offset)\n\n\torders := &ListOrderResponse{}\n\terr := c.apiget(\"/users/\"+userIDStr+\"/orders?limit=\"+limitStr+\"&offset=\"+offsetStr, orders)\n\tif err != nil {\n\t\treturn nil, err, false\n\t}\n\n\treturn orders.Orders, nil, orders.Meta.Next == \"\"\n}", "func GetCustomerOrders(c buffalo.Context) error {\n\tcustomer, err := GetUserByID(c.Param(\"user_id\"), c)\n\tif err != nil {\n\t\treturn errors.WithStack(errors.New(\"could not find user\"))\n\t}\n\ttx := c.Value(\"tx\").(*pop.Connection)\n\torders := &models.Orders{}\n\n\tif err := tx.Eager().Where(\"user_id = ? AND status = ?\", customer.ID, Paid).All(orders); err != nil {\n\t\treturn c.Error(http.StatusInternalServerError, err)\n\t}\n\treturn c.Render(http.StatusOK, r.JSON(map[string]interface{}{\n\t\t\"message\": \"successful\",\n\t\t\"orders\": orders,\n\t}))\n}", "func GetOrderByIDs(c Client, symbol string, ID string, refID string) (*t.Order, error) {\n\tif symbol == \"\" || (ID == \"\" && refID == \"\") {\n\t\treturn nil, nil\n\t}\n\n\tvar payload, url strings.Builder\n\n\tBuildBaseQS(&payload, symbol)\n\tif refID != \"\" {\n\t\tfmt.Fprintf(&payload, \"&orderId=%s\", refID)\n\t}\n\tif ID != \"\" {\n\t\tfmt.Fprintf(&payload, \"&origClientOrderId=%s\", ID)\n\t}\n\n\tsignature := Sign(payload.String(), c.SecretKey)\n\n\tfmt.Fprintf(&url, \"%s/order?%s&signature=%s\", c.BaseURL, payload.String(), signature)\n\tdata, err := h.GetH(url.String(), NewHeader(c.ApiKey))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tr := gjson.ParseBytes(data)\n\n\tif r.Get(\"code\").Int() < 0 {\n\t\th.Log(\"GetOrderByIDs\", r)\n\t\treturn nil, errors.New(r.Get(\"msg\").String())\n\t}\n\n\treturn &t.Order{\n\t\tID: ID,\n\t\tRefID: refID,\n\t\tSymbol: symbol,\n\t\tStatus: r.Get(\"status\").String(),\n\t\tUpdateTime: r.Get(\"updateTime\").Int(),\n\t}, nil\n}", "func (t *TauAPI) GetMarketOrders(market string) (MarketOrders, error) {\n\tvar mo MarketOrders\n\tvar maxBid, minAsk float64\n\tjsonData, err := t.doTauRequest(&TauReq{\n\t\tVersion: 1,\n\t\tMethod: \"GET\",\n\t\tPath: \"trading/orders?market=\" + strings.ToLower(market),\n\t})\n\tif err != nil {\n\t\treturn mo, fmt.Errorf(\"TauGetMarketOrders ->%s\", err.Error())\n\t}\n\tif err := json.Unmarshal(jsonData, &mo); err != nil {\n\t\treturn mo, err\n\t}\n\tmaxBid = 0.0\n\tfor _, b := range mo.Bids {\n\t\tbid, _ := strconv.ParseFloat(b.Price.String(), 64)\n\t\tmaxBid = math.Max(bid, maxBid)\n\t}\n\tif len(mo.Asks) == 0 {\n\t\tminAsk = maxBid + 0.01\n\t} else {\n\t\tminAsk, _ = strconv.ParseFloat(mo.Asks[0].Price.String(), 64)\n\t\tfor _, a := range mo.Asks {\n\t\t\task, _ := strconv.ParseFloat(a.Price.String(), 64)\n\t\t\tminAsk = math.Min(ask, minAsk)\n\t\t}\n\t}\n\tmo.MaxBid = maxBid\n\tmo.MinAsk = minAsk\n\treturn mo, nil\n}", "func (c *client) GetOrderDetail(ctx context.Context, orderID string) (*GetOrderDetailResult, error) {\n\tif orderID == \"\" {\n\t\treturn nil, errors.InvalidParameterError{Parameter: \"orderID\", Reason: \"cannot be empty\"}\n\t}\n\n\tvar (\n\t\tid = c.idGenerator.Generate()\n\t\ttimestamp = c.clock.Now().UnixMilli()\n\t\tparams = make(map[string]interface{})\n\t)\n\n\tparams[\"order_id\"] = orderID\n\n\tsignature, err := c.signatureGenerator.GenerateSignature(auth.SignatureRequest{\n\t\tAPIKey: c.apiKey,\n\t\tSecretKey: c.secretKey,\n\t\tID: id,\n\t\tMethod: methodGetOrderDetail,\n\t\tTimestamp: timestamp,\n\t\tParams: params,\n\t})\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to create signature: %w\", err)\n\t}\n\n\tbody := api.Request{\n\t\tID: id,\n\t\tMethod: methodGetOrderDetail,\n\t\tNonce: timestamp,\n\t\tParams: params,\n\t\tSignature: signature,\n\t\tAPIKey: c.apiKey,\n\t}\n\n\tvar getOrderDetailResponse GetOrderDetailResponse\n\tstatusCode, err := c.requester.Post(ctx, body, methodGetOrderDetail, &getOrderDetailResponse)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to execute post request: %w\", err)\n\t}\n\n\tif err := c.requester.CheckErrorResponse(statusCode, getOrderDetailResponse.Code); err != nil {\n\t\treturn nil, fmt.Errorf(\"error received in response: %w\", err)\n\t}\n\n\treturn &getOrderDetailResponse.Result, nil\n}", "func (c *Client) Get(ctx context.Context, id string, params *razorpay.GetParams) (*razorpay.Order, error) {\n\tif params == nil {\n\t\tparams = &razorpay.GetParams{}\n\t}\n\n\torder := &razorpay.Order{}\n\terr := c.Call(ctx, http.MethodGet, \"/orders/\"+id, params, order)\n\treturn order, err\n}", "func (a *API) GetSnapshot(id string) (*bytes.Buffer, error) {\r\n\treq, err := a.newAPIRequest(\"GET\", \"cameraThumbnail\")\r\n\tif err != nil {\r\n\t\treturn nil, fmt.Errorf(\"GetSnapshot failed: %v\", err)\r\n\t}\r\n\tq := req.URL.Query()\r\n\tq.Add(\"cameraId\", id)\r\n\treq.URL.RawQuery = q.Encode()\r\n\r\n\tclient := http.DefaultClient\r\n\tresp, err := client.Do(req)\r\n\tif err != nil {\r\n\t\treturn nil, fmt.Errorf(\"GetSnapshot failed: %v\", err)\r\n\t}\r\n\r\n\tvar buf bytes.Buffer\r\n\t_, err = buf.ReadFrom(resp.Body)\r\n\tif err != nil {\r\n\t\treturn nil, fmt.Errorf(\"GetSnapshot failed: %v\", err)\r\n\t}\r\n\tresp.Body.Close()\r\n\r\n\treturn &buf, nil\r\n}", "func (d *Db) GetOrders(name string, col string) []Order {\n\tvar query string\n\tif col != \"none\" {\n\t\tquery = \"SELECT restaurant, orderDate, details, userName, client, lastName FROM orders_view; WHERE soa.orders.\" + col + \"=\" + name\n\t} else {\n\t\tquery = \"SELECT restaurant, orderDate, details, userName, client, lastName FROM orders_view;\"\n\t}\n\tstmt, err := d.Prepare(query)\n\tif err != nil {\n\t\tfmt.Println(\"GetOrders Preperation Err: \", err)\n\t}\n\n\t// Make query with our stmt, passing in name argument\n\trows, err := stmt.Query()\n\tif err != nil {\n\t\tfmt.Println(\"GetOrders Query Err: \", err)\n\t}\n\n\t// Create User struct for holding each row's data\n\tvar client Client\n\tvar user User\n\tvar rest Restaurant\n\tvar order Order\n\tvar date string\n\tpreset := \"2006-01-02 15:04:05\"\n\t// Create slice of Client for our response\n\torders := []Order{}\n\t// Copy the columns from row into the values pointed at by r (Client)\n\tfor rows.Next() {\n\t\terr = rows.Scan(\n\t\t\t&rest.Name,\n\t\t\t&date,\n\t\t\t&order.Details,\n\t\t\t&user.Name,\n\t\t\t&client.FName,\n\t\t\t&client.LName,\n\t\t)\n\t\torder.Restaurant = rest\n\t\torder.User = user\n\t\torder.Client = client\n\t\torder.OrderDate, _ = time.Parse(preset, date)\n\t\tif err != nil {\n\t\t\tfmt.Println(\"Error scanning rows: \", err)\n\t\t}\n\t\torders = append(orders, order)\n\t}\n\n\treturn orders\n}", "func (ob *OrderBookGroup) Get() (books []schemas.OrderBook, err error) {\n\tvar b []byte\n\tvar resp orderbook\n\tif len(ob.symbols) == 0 {\n\t\terr = errors.New(\"[POLONIEX] Symbol is empty\")\n\t\treturn\n\t}\n\n\tfor _, symb := range ob.symbols {\n\t\tsymbol := symb.OriginalName\n\t\tquery := httpclient.Params()\n\t\tquery.Set(\"command\", commandOrderBook)\n\t\tquery.Set(\"currencyPair\", symbol)\n\t\tquery.Set(\"depth\", \"200\")\n\n\t\tif b, err = ob.httpClient.Get(restURL, query, false); err != nil {\n\t\t\treturn\n\t\t}\n\t\tif err = json.Unmarshal(b, &resp); err != nil {\n\t\t\treturn\n\t\t}\n\t\tbooks = append(books, ob.mapHTTPSnapshot(symb.Name, resp))\n\t\ttime.Sleep(1 * time.Second)\n\t}\n\n\treturn\n}", "func (env Env) ListOrders(filter checkout.OrderFilter, page gorest.Pagination) (checkout.CMSOrderList, error) {\n\tdefer env.logger.Sync()\n\tsugar := env.logger.Sugar()\n\n\twhere := filter.SQLWhere()\n\tcountCh := make(chan int64)\n\tlistCh := make(chan checkout.CMSOrderList)\n\n\tgo func() {\n\t\tdefer close(countCh)\n\t\tn, err := env.countOrder(where)\n\t\tif err != nil {\n\t\t\tsugar.Error(err)\n\t\t}\n\n\t\tcountCh <- n\n\t}()\n\n\tgo func() {\n\t\tdefer close(listCh)\n\n\t\torders, err := env.listOrders(where, page)\n\n\t\tlistCh <- checkout.CMSOrderList{\n\t\t\tPagedList: pkg.PagedList{\n\t\t\t\tTotal: 0,\n\t\t\t\tPagination: gorest.Pagination{},\n\t\t\t\tErr: err,\n\t\t\t},\n\t\t\tData: orders,\n\t\t}\n\t}()\n\n\tcount, listResult := <-countCh, <-listCh\n\tif listResult.Err != nil {\n\t\treturn checkout.CMSOrderList{}, listResult.Err\n\t}\n\n\treturn checkout.CMSOrderList{\n\t\tPagedList: pkg.PagedList{\n\t\t\tTotal: count,\n\t\t\tPagination: page,\n\t\t\tErr: nil,\n\t\t},\n\t\tData: listResult.Data,\n\t}, nil\n}", "func (s *Store) GetAllOrders() (l []interface{}, err error) {\n\treturn s.w.Get(s.orders, models.Order{})\n}", "func (keeper *PersistentGlobalOrderKeeper) GetAllOrders(ctx sdk.Context) []*types.Order {\n\tstore := ctx.KVStore(keeper.marketKey)\n\tvar result []*types.Order\n\tstart := myposchain.ConcatKeys(OrderBookKeyPrefix, []byte{0x0})\n\tend := myposchain.ConcatKeys(OrderBookKeyPrefix, []byte{0x1})\n\n\titer := store.Iterator(start, end)\n\tdefer iter.Close()\n\tfor ; iter.Valid(); iter.Next() {\n\t\torder := &types.Order{}\n\t\tkeeper.codec.MustUnmarshalBinaryBare(iter.Value(), order)\n\t\tresult = append(result, order)\n\t}\n\treturn result\n}", "func GetEntriesAndNextToken(req *csi.ListSnapshotsRequest, snapshots []compute.Snapshot) (*csi.ListSnapshotsResponse, error) {\n\tif req == nil {\n\t\treturn nil, status.Errorf(codes.Aborted, \"request is nil\")\n\t}\n\n\tvar err error\n\tstart := 0\n\tif req.StartingToken != \"\" {\n\t\tstart, err = strconv.Atoi(req.StartingToken)\n\t\tif err != nil {\n\t\t\treturn nil, status.Errorf(codes.Aborted, \"ListSnapshots starting token(%s) parsing with error: %v\", req.StartingToken, err)\n\n\t\t}\n\t\tif start >= len(snapshots) {\n\t\t\treturn nil, status.Errorf(codes.Aborted, \"ListSnapshots starting token(%d) is greater than total number of snapshots\", start)\n\t\t}\n\t\tif start < 0 {\n\t\t\treturn nil, status.Errorf(codes.Aborted, \"ListSnapshots starting token(%d) can not be negative\", start)\n\t\t}\n\t}\n\n\tmaxEntries := len(snapshots) - start\n\tif req.MaxEntries > 0 && int(req.MaxEntries) < maxEntries {\n\t\tmaxEntries = int(req.MaxEntries)\n\t}\n\tentries := []*csi.ListSnapshotsResponse_Entry{}\n\tfor count := 0; start < len(snapshots) && count < maxEntries; start++ {\n\t\tif (req.SourceVolumeId != \"\" && req.SourceVolumeId == GetSourceVolumeID(&snapshots[start])) || req.SourceVolumeId == \"\" {\n\t\t\tcsiSnapshot, err := GenerateCSISnapshot(req.SourceVolumeId, &snapshots[start])\n\t\t\tif err != nil {\n\t\t\t\treturn nil, fmt.Errorf(\"failed to generate snapshot entry: %v\", err)\n\t\t\t}\n\t\t\tentries = append(entries, &csi.ListSnapshotsResponse_Entry{Snapshot: csiSnapshot})\n\t\t\tcount++\n\t\t}\n\t}\n\n\tnextToken := len(snapshots)\n\tif start < len(snapshots) {\n\t\tnextToken = start\n\t}\n\n\tlistSnapshotResp := &csi.ListSnapshotsResponse{\n\t\tEntries: entries,\n\t\tNextToken: strconv.Itoa(nextToken),\n\t}\n\n\treturn listSnapshotResp, nil\n}", "func (o *V3SetErrorOrderInput) GetOrders() []V3OrderIntegrationError {\n\tif o == nil || o.Orders == nil {\n\t\tvar ret []V3OrderIntegrationError\n\t\treturn ret\n\t}\n\treturn *o.Orders\n}", "func (h *HUOBIHADAX) GetOrderMatchResults(orderID int64) ([]OrderMatchInfo, error) {\n\ttype response struct {\n\t\tResponse\n\t\tOrders []OrderMatchInfo `json:\"data\"`\n\t}\n\n\tvar result response\n\tendpoint := fmt.Sprintf(huobihadaxGetOrderMatch, strconv.FormatInt(orderID, 10))\n\terr := h.SendAuthenticatedHTTPRequest(http.MethodGet, endpoint, url.Values{}, &result)\n\n\tif result.ErrorMessage != \"\" {\n\t\treturn nil, errors.New(result.ErrorMessage)\n\t}\n\treturn result.Orders, err\n}", "func (c *Client) OpenOrders(symbol Symbol) ([]Order, error) {\n\tparams := []func(url.Values){}\n\n\tif symbol != zeroSymbol {\n\t\tparams = append(params, param(\"symbol\", symbol))\n\t}\n\n\tresults := make([]Order, 0, 100)\n\terr := c.signedCall(&results, \"GET\", \"/api/v3/openOrders\", params...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn results, nil\n}", "func NewGetOrdersRequest(server string, params *GetOrdersParams) (*http.Request, error) {\n\tvar err error\n\n\tserverURL, err := url.Parse(server)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\toperationPath := fmt.Sprintf(\"/orders\")\n\tif operationPath[0] == '/' {\n\t\toperationPath = operationPath[1:]\n\t}\n\toperationURL := url.URL{\n\t\tPath: operationPath,\n\t}\n\n\tqueryURL := serverURL.ResolveReference(&operationURL)\n\n\tqueryValues := queryURL.Query()\n\n\tif params.Symbol != nil {\n\n\t\tif queryFrag, err := runtime.StyleParamWithLocation(\"form\", true, \"symbol\", runtime.ParamLocationQuery, *params.Symbol); err != nil {\n\t\t\treturn nil, err\n\t\t} else if parsed, err := url.ParseQuery(queryFrag); err != nil {\n\t\t\treturn nil, err\n\t\t} else {\n\t\t\tfor k, v := range parsed {\n\t\t\t\tfor _, v2 := range v {\n\t\t\t\t\tqueryValues.Add(k, v2)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t}\n\n\tif params.From != nil {\n\n\t\tif queryFrag, err := runtime.StyleParamWithLocation(\"form\", true, \"from\", runtime.ParamLocationQuery, *params.From); err != nil {\n\t\t\treturn nil, err\n\t\t} else if parsed, err := url.ParseQuery(queryFrag); err != nil {\n\t\t\treturn nil, err\n\t\t} else {\n\t\t\tfor k, v := range parsed {\n\t\t\t\tfor _, v2 := range v {\n\t\t\t\t\tqueryValues.Add(k, v2)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t}\n\n\tif params.To != nil {\n\n\t\tif queryFrag, err := runtime.StyleParamWithLocation(\"form\", true, \"to\", runtime.ParamLocationQuery, *params.To); err != nil {\n\t\t\treturn nil, err\n\t\t} else if parsed, err := url.ParseQuery(queryFrag); err != nil {\n\t\t\treturn nil, err\n\t\t} else {\n\t\t\tfor k, v := range parsed {\n\t\t\t\tfor _, v2 := range v {\n\t\t\t\t\tqueryValues.Add(k, v2)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t}\n\n\tif params.Status != nil {\n\n\t\tif queryFrag, err := runtime.StyleParamWithLocation(\"form\", true, \"status\", runtime.ParamLocationQuery, *params.Status); err != nil {\n\t\t\treturn nil, err\n\t\t} else if parsed, err := url.ParseQuery(queryFrag); err != nil {\n\t\t\treturn nil, err\n\t\t} else {\n\t\t\tfor k, v := range parsed {\n\t\t\t\tfor _, v2 := range v {\n\t\t\t\t\tqueryValues.Add(k, v2)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t}\n\n\tif params.Limit != nil {\n\n\t\tif queryFrag, err := runtime.StyleParamWithLocation(\"form\", true, \"limit\", runtime.ParamLocationQuery, *params.Limit); err != nil {\n\t\t\treturn nil, err\n\t\t} else if parsed, err := url.ParseQuery(queryFrag); err != nil {\n\t\t\treturn nil, err\n\t\t} else {\n\t\t\tfor k, v := range parsed {\n\t\t\t\tfor _, v2 := range v {\n\t\t\t\t\tqueryValues.Add(k, v2)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t}\n\n\tqueryURL.RawQuery = queryValues.Encode()\n\n\treq, err := http.NewRequest(\"GET\", queryURL.String(), nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn req, nil\n}", "func (mdb *memdbSlice) GetSnapshots() ([]SnapshotInfo, error) {\n\tvar infos []SnapshotInfo\n\tvar err error\n\n\tinfos, _, err = mdb.getSnapshots()\n\treturn infos, err\n}", "func GetOrder(c Client, o t.Order) (*t.Order, error) {\n\texo, err := GetOrderByIDs(c, o.Symbol, o.ID, o.RefID)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif exo == nil {\n\t\treturn nil, nil\n\t}\n\to.Status = exo.Status\n\to.UpdateTime = exo.UpdateTime\n\treturn &o, nil\n}", "func (s *SnapshotsServiceOp) List(ctx context.Context, opt *ListOptions) ([]Snapshot, *Response, error) {\n\treturn s.list(ctx, opt, nil)\n}", "func (s *OsdCsiServer) listMultipleSnapshots(\n\tctx context.Context,\n\treq *csi.ListSnapshotsRequest,\n) (*csi.ListSnapshotsResponse, error) {\n\tsourceVolumeId := req.GetSourceVolumeId()\n\tstartingToken := req.GetStartingToken()\n\tmaxEntries := req.GetMaxEntries()\n\n\tclogger.WithContext(ctx).Infof(\"ListSnapshots for multiple snapshots received. sourceVolumeId: %s, startingToken: %s, maxEntries: %v\",\n\t\tsourceVolumeId,\n\t\tstartingToken,\n\t\tmaxEntries,\n\t)\n\n\t// Get grpc connection\n\tconn, err := s.getConn()\n\tif err != nil {\n\t\treturn nil, status.Errorf(\n\t\t\tcodes.Unavailable,\n\t\t\t\"Unable to connect to SDK server: %v\", err)\n\t}\n\n\t// Get secret if any was passed\n\tctx = s.setupContext(ctx, req.GetSecrets())\n\tctx, cancel := grpcutil.WithDefaultTimeout(ctx)\n\tdefer cancel()\n\tvolumes := api.NewOpenStorageVolumeClient(conn)\n\n\t// Get all SnapshotIDs. Filter by source ID if provided.\n\tsnapshotsReq := &api.SdkVolumeSnapshotEnumerateWithFiltersRequest{\n\t\tVolumeId: sourceVolumeId,\n\t}\n\tsnapshotsResp, err := volumes.SnapshotEnumerateWithFilters(ctx, snapshotsReq)\n\tif err != nil {\n\t\terrStatus, ok := status.FromError(err)\n\t\tif ok && errStatus.Code() == codes.NotFound {\n\t\t\treturn &csi.ListSnapshotsResponse{}, nil\n\t\t}\n\t\treturn nil, status.Errorf(codes.Internal, \"Unable to get all snapshots: %v\", err)\n\t}\n\n\t// InspectWithFilters for all volumes\n\tvolumesResp, err := volumes.InspectWithFilters(ctx, &api.SdkVolumeInspectWithFiltersRequest{})\n\tif err != nil {\n\t\treturn nil, status.Errorf(codes.Internal, \"Unable to get all volumes: %v\", err)\n\t}\n\n\t// Sort snapshot IDs for repeatable results\n\tsortedSnapshotIds := sort.StringSlice(snapshotsResp.VolumeSnapshotIds)\n\tsort.Sort(sortedSnapshotIds)\n\n\t// Keep track of which volumes are snapshots\n\tvolumeForSnapId := make(map[string]*api.Volume)\n\tfor _, volResp := range volumesResp.Volumes {\n\t\tfor _, snapId := range sortedSnapshotIds {\n\t\t\tif volResp.Volume.Id == snapId {\n\t\t\t\tvolumeForSnapId[snapId] = volResp.Volume\n\t\t\t}\n\t\t}\n\t}\n\n\t// Generate response for all snapshots.\n\tlistSnapshotsResp := &csi.ListSnapshotsResponse{}\n\n\t// If starting token is provided, start skipping entries\n\t// until we hit the starting token.\n\tvar skipEntries bool\n\tif len(startingToken) > 0 {\n\t\tskipEntries = true\n\t}\n\tfor _, snapId := range sortedSnapshotIds {\n\t\t// Skip entries until we hit the starting token.\n\t\tif skipEntries && startingToken != snapId {\n\t\t\tcontinue\n\t\t}\n\t\tskipEntries = false\n\n\t\t// Before adding new object to response, check if we're at the max entries.\n\t\t// If we are at max entries, return with current iteration as NextToken.\n\t\t// This allows for calls to ListSnapshots to begin where we left off.\n\t\tvol := volumeForSnapId[snapId]\n\t\tif maxEntries > 0 && len(listSnapshotsResp.Entries) >= int(maxEntries) {\n\t\t\tlistSnapshotsResp.NextToken = vol.Id\n\t\t\treturn listSnapshotsResp, nil\n\t\t}\n\n\t\t// Populate entry with volume info\n\t\tentry := &csi.ListSnapshotsResponse_Entry{\n\t\t\tSnapshot: &csi.Snapshot{\n\t\t\t\tSizeBytes: int64(vol.GetSpec().GetSize()),\n\t\t\t\tSnapshotId: vol.Id,\n\t\t\t\tSourceVolumeId: vol.GetSource().Parent,\n\t\t\t\tCreationTime: vol.Ctime,\n\t\t\t\tReadyToUse: isSnapshotReady(vol),\n\t\t\t},\n\t\t}\n\n\t\tlistSnapshotsResp.Entries = append(listSnapshotsResp.Entries, entry)\n\t}\n\n\treturn listSnapshotsResp, nil\n}", "func GetOrderDetailLog(orderDetailID int64, dateString, dateEnd string) ([]*ResultAccessLog, error) {\n\tif orderDetailID <= 0 {\n\t\treturn nil, rest.NewError(422, \"invalid order_detail_id\", nil)\n\t}\n\tdb, err := pqx.Open()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tparam := []interface{}{orderDetailID}\n\tstrWhere := \"\"\n\tif dateString != \"\" {\n\t\tparam = append(param, dateString)\n\t\tstrWhere += \" AND a.created_at >= $\" + strconv.Itoa(len(param))\n\t}\n\tif dateEnd != \"\" {\n\t\tparam = append(param, dateString+\" 23:59:59\")\n\t\tstrWhere += \" AND a.created_at <= $\" + strconv.Itoa(len(param))\n\t}\n\n\tif strWhere != \"\" {\n\t\tSQL_GetOrderDetailLog = strings.Replace(SQL_GetOrderDetailLog, \"--WHERE\", strWhere, 1)\n\t}\n\n\trows, err := db.Query(SQL_GetOrderDetailLog, param...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar (\n\t\tid sql.NullInt64\n\t\taccess_time time.Time\n\t\tagent_user sql.NullString\n\t\tuser sql.NullString\n\t\tserver_agent_user sql.NullString\n\t\tservice sql.NullString\n\t\tservice_method sql.NullString\n\t\thost sql.NullString\n\t\trequest_url sql.NullString\n\t\taccess_duration sql.NullInt64\n\t\treply_code sql.NullInt64\n\t\tclient_ip sql.NullString\n\t\treply_resaon sql.NullString\n\t)\n\n\tdata := make([]*ResultAccessLog, 0)\n\t// loop for get data from table to struct for return\n\tfor rows.Next() {\n\t\taccessLog := new(ResultAccessLog)\n\t\t// scan data from database\n\t\trows.Scan(&id, &access_time, &agent_user, &user, &server_agent_user, &service, &service_method, &host, &request_url, &access_duration, &reply_code, &client_ip, &reply_resaon)\n\t\taccessLog.ID = id.Int64\n\t\taccessLog.AccessTime = access_time.Format(setting.GetSystemSetting(\"setting.Default.DatetimeFormat\"))\n\t\taccessLog.AgentUser = agent_user.String\n\t\taccessLog.User = user.String\n\t\taccessLog.ServerAgentUser = server_agent_user.String\n\t\taccessLog.Service = service.String\n\t\taccessLog.ServiceMethod = service_method.String\n\t\taccessLog.Host = host.String\n\t\taccessLog.RequestURL = request_url.String\n\t\taccessLog.AccessDuration = access_duration.Int64\n\t\taccessLog.ReplyCode = reply_code.Int64\n\t\taccessLog.ClientIP = client_ip.String\n\t\taccessLog.ReplyReason = reply_resaon.String\n\t\t// add data row to array result\n\t\tdata = append(data, accessLog)\n\t}\n\n\treturn data, nil\n}", "func (a *Client) BatchRetrieveOrders(params *BatchRetrieveOrdersParams, authInfo runtime.ClientAuthInfoWriter, opts ...ClientOption) (*BatchRetrieveOrdersOK, error) {\n\t// TODO: Validate the params before sending\n\tif params == nil {\n\t\tparams = NewBatchRetrieveOrdersParams()\n\t}\n\top := &runtime.ClientOperation{\n\t\tID: \"BatchRetrieveOrders\",\n\t\tMethod: \"POST\",\n\t\tPathPattern: \"/v2/orders/batch-retrieve\",\n\t\tProducesMediaTypes: []string{\"application/json\"},\n\t\tConsumesMediaTypes: []string{\"application/json\"},\n\t\tSchemes: []string{\"https\"},\n\t\tParams: params,\n\t\tReader: &BatchRetrieveOrdersReader{formats: a.formats},\n\t\tAuthInfo: authInfo,\n\t\tContext: params.Context,\n\t\tClient: params.HTTPClient,\n\t}\n\tfor _, opt := range opts {\n\t\topt(op)\n\t}\n\n\tresult, err := a.transport.Submit(op)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tsuccess, ok := result.(*BatchRetrieveOrdersOK)\n\tif ok {\n\t\treturn success, nil\n\t}\n\t// unexpected success response\n\t// safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue\n\tmsg := fmt.Sprintf(\"unexpected success response for BatchRetrieveOrders: API contract not enforced by server. Client expected to get an error, but got: %T\", result)\n\tpanic(msg)\n}", "func (c *Coinbene) FetchOpenSpotOrders(symbol string) (OrdersInfo, error) {\n\tparams := url.Values{}\n\tparams.Set(\"symbol\", symbol)\n\tpath := coinbeneAPIVersion + coinbeneOpenOrders\n\tvar orders OrdersInfo\n\tfor i := int64(1); ; i++ {\n\t\ttemp := struct {\n\t\t\tData OrdersInfo `json:\"data\"`\n\t\t}{}\n\t\tparams.Set(\"pageNum\", strconv.FormatInt(i, 10))\n\t\terr := c.SendAuthHTTPRequest(exchange.RestSpot, http.MethodGet,\n\t\t\tpath,\n\t\t\tcoinbeneOpenOrders,\n\t\t\tfalse,\n\t\t\tparams,\n\t\t\t&temp,\n\t\t\tspotQueryOpenOrders)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tfor j := range temp.Data {\n\t\t\torders = append(orders, temp.Data[j])\n\t\t}\n\n\t\tif len(temp.Data) != 20 {\n\t\t\tbreak\n\t\t}\n\t}\n\treturn orders, nil\n}", "func (h *HUOBI) GetSwapOpenOrders(ctx context.Context, contractCode currency.Pair, pageIndex, pageSize int64) (SwapOpenOrdersData, error) {\n\tvar resp SwapOpenOrdersData\n\treq := make(map[string]interface{})\n\tcodeValue, err := h.FormatSymbol(contractCode, asset.CoinMarginedFutures)\n\tif err != nil {\n\t\treturn resp, err\n\t}\n\treq[\"contract_code\"] = codeValue\n\tif pageIndex != 0 {\n\t\treq[\"page_index\"] = pageIndex\n\t}\n\tif pageSize > 0 && pageSize <= 50 {\n\t\treq[\"page_size\"] = pageSize\n\t}\n\treturn resp, h.FuturesAuthenticatedHTTPRequest(ctx, exchange.RestFutures, http.MethodPost, huobiSwapOpenOrders, nil, req, &resp)\n}" ]
[ "0.62642765", "0.6221663", "0.6110406", "0.6087194", "0.60676306", "0.6066977", "0.58781767", "0.5827594", "0.57684326", "0.57366395", "0.5724347", "0.57100785", "0.570299", "0.56688213", "0.56568563", "0.56561035", "0.56210774", "0.55994815", "0.559785", "0.55757976", "0.5574437", "0.55573726", "0.5491141", "0.5434595", "0.54259443", "0.5410566", "0.5403307", "0.5379588", "0.5379407", "0.5322249", "0.5311861", "0.52895033", "0.5283188", "0.5278635", "0.5277134", "0.5262063", "0.52613986", "0.5250676", "0.5215701", "0.5208436", "0.52043295", "0.51798797", "0.5178497", "0.5174823", "0.51650417", "0.5160975", "0.5151957", "0.5140391", "0.51320875", "0.5129072", "0.5101566", "0.5076907", "0.5061806", "0.5048427", "0.5041031", "0.50399005", "0.50278497", "0.5023008", "0.50150967", "0.5006589", "0.4997502", "0.4988607", "0.49776056", "0.49592417", "0.4934743", "0.4903546", "0.49029368", "0.48552808", "0.48519504", "0.48468277", "0.4843141", "0.48115864", "0.48102045", "0.48015746", "0.4790196", "0.47850633", "0.4778626", "0.47716495", "0.4763686", "0.47463194", "0.47447476", "0.4742757", "0.47407433", "0.47291133", "0.47236305", "0.47206658", "0.47182703", "0.47067198", "0.46980417", "0.46959078", "0.46897995", "0.4688731", "0.46771705", "0.46762642", "0.46705168", "0.46688494", "0.46636513", "0.46542174", "0.46512344", "0.46449423" ]
0.8332623
0
AddOrders can be used to add orders to Mesh. It validates the given orders and if they are valid, will store and eventually broadcast the orders to peers.
func (app *App) AddOrders(signedOrdersRaw []*json.RawMessage) (*zeroex.ValidationResults, error) { allValidationResults := &zeroex.ValidationResults{ Accepted: []*zeroex.AcceptedOrderInfo{}, Rejected: []*zeroex.RejectedOrderInfo{}, } schemaValidOrders := []*zeroex.SignedOrder{} for _, signedOrderRaw := range signedOrdersRaw { signedOrderBytes := []byte(*signedOrderRaw) result, err := app.schemaValidateOrder(signedOrderBytes) if err != nil { signedOrder := &zeroex.SignedOrder{} if err := signedOrder.UnmarshalJSON(signedOrderBytes); err != nil { signedOrder = nil } log.WithField("signedOrderRaw", string(signedOrderBytes)).Info("Unexpected error while attempting to validate signedOrderJSON against schema") allValidationResults.Rejected = append(allValidationResults.Rejected, &zeroex.RejectedOrderInfo{ SignedOrder: signedOrder, Kind: MeshValidation, Status: zeroex.RejectedOrderStatus{ Code: ROInvalidSchemaCode, Message: "order did not pass JSON-schema validation: Malformed JSON or empty payload", }, }) continue } if !result.Valid() { log.WithField("signedOrderRaw", string(signedOrderBytes)).Info("Order failed schema validation") status := zeroex.RejectedOrderStatus{ Code: ROInvalidSchemaCode, Message: fmt.Sprintf("order did not pass JSON-schema validation: %s", result.Errors()), } signedOrder := &zeroex.SignedOrder{} if err := signedOrder.UnmarshalJSON(signedOrderBytes); err != nil { signedOrder = nil } allValidationResults.Rejected = append(allValidationResults.Rejected, &zeroex.RejectedOrderInfo{ SignedOrder: signedOrder, Kind: MeshValidation, Status: status, }) continue } signedOrder := &zeroex.SignedOrder{} if err := signedOrder.UnmarshalJSON(signedOrderBytes); err != nil { // This error should never happen since the signedOrder already passed the JSON schema validation above log.WithField("signedOrderRaw", string(signedOrderBytes)).Panic("Failed to unmarshal SignedOrder") } schemaValidOrders = append(schemaValidOrders, signedOrder) } validationResults, err := app.validateOrders(schemaValidOrders) if err != nil { return nil, err } for _, orderInfo := range validationResults.Accepted { allValidationResults.Accepted = append(allValidationResults.Accepted, orderInfo) } for _, orderInfo := range validationResults.Rejected { allValidationResults.Rejected = append(allValidationResults.Rejected, orderInfo) } for _, acceptedOrderInfo := range allValidationResults.Accepted { err = app.orderWatcher.Watch(acceptedOrderInfo) if err != nil { return nil, err } } return allValidationResults, nil }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (handler *rpcHandler) AddOrders(orders []*zeroex.SignedOrder) (*zeroex.ValidationResults, error) {\n\tlog.Debug(\"received AddOrders request via RPC\")\n\tvalidationResults, err := handler.app.AddOrders(orders)\n\tif err != nil {\n\t\t// We don't want to leak internal error details to the RPC client.\n\t\tlog.WithField(\"error\", err.Error()).Error(\"internal error in AddOrders RPC call\")\n\t\treturn nil, errInternal\n\t}\n\treturn validationResults, nil\n}", "func (s Service) AddOrders(o OrdersCreateReq) error {\n\ttx, err := s.db.Begin()\n\tif err != nil {\n\t\tlog.Println(\"Failed to start a transaction\", err.Error())\n\t\treturn err\n\t}\n\n\torderTx, err := tx.Prepare(\"INSERT INTO api_db.order(id, email, total_price, total_weight_grams, order_number) VALUES(?,?,?,?,?)\")\n\tif err != nil {\n\t\tlog.Println(\"Failed to prepare the order transaction\", err.Error())\n\t\treturn err\n\t}\n\n\taddrTx, err := tx.Prepare(\"INSERT INTO api_db.order_shipping_address(order_id, first_name, address1, postcode) VALUES(?, ?, ?, ?)\")\n\tif err != nil {\n\t\tlog.Println(\"Failed to prepare the Address transaction\", err.Error())\n\t\treturn err\n\t}\n\n\tshippingLineTx, err := tx.Prepare(\"INSERT INTO api_db.order_to_shipping_line(order_id, shipping_line_id, title, price) VALUES(?, ?, ?, ?)\")\n\tif err != nil {\n\t\tlog.Println(\"Failed to prepare the Shipping Line transaction\", err.Error())\n\n\t\treturn err\n\t}\n\n\toLen := len(o)\n\tfor i := 0; i < oLen; i++ {\n\n\t\tcurrOrder := o[i]\n\t\tlog.Printf(\"Processing order #%d 📤\\n\", currOrder.ID)\n\t\t_, err = orderTx.Exec(\n\t\t\tcurrOrder.ID,\n\t\t\tcurrOrder.Email,\n\t\t\tcurrOrder.TotalPrice,\n\t\t\tcurrOrder.TotalWeightGrams,\n\t\t\tcurrOrder.OrderNumber,\n\t\t)\n\t\tif err != nil {\n\t\t\ttx.Rollback()\n\t\t\tlog.Printf(\"Failed to save order #%d: %s\", currOrder.ID, err.Error())\n\t\t\treturn err\n\t\t}\n\n\t\tshippingAddr := currOrder.ShippingAddress\n\n\t\t// insert the shipping adddress\n\t\t_, err := addrTx.Exec(currOrder.ID, shippingAddr.FirstName, shippingAddr.Address1, shippingAddr.PostCode)\n\t\tif err != nil {\n\t\t\tlog.Println(\"Failed to save the shipping address\", err.Error())\n\t\t\treturn err\n\t\t}\n\n\t\tslLen := len(currOrder.ShippingLines)\n\t\tfor j := 0; j < slLen; j++ {\n\n\t\t\tsl := currOrder.ShippingLines[j]\n\t\t\t_, err := shippingLineTx.Exec(currOrder.ID, sl.ID, sl.Title, sl.Price)\n\t\t\tif err != nil {\n\t\t\t\tlog.Println(\"Failed to save a shipping line\", err.Error())\n\t\t\t\ttx.Rollback()\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t}\n\n\tif err := tx.Commit(); err != nil {\n\t\tlog.Println(\"Couldn't commit the transaction\")\n\t\treturn err\n\t}\n\n\tfor i := 0; i < oLen; i++ {\n\t\tcurrOrder := o[i]\n\t\tlog.Printf(\"Delivering order #%d\\n\", currOrder.ID)\n\t\ts.deliverOrderChan <- currOrder\n\t\tlog.Printf(\"Delivered order #%d\\n\", currOrder.ID)\n\t}\n\n\treturn nil\n}", "func (lu *LocationUpdate) AddWorkOrders(w ...*WorkOrder) *LocationUpdate {\n\tids := make([]string, len(w))\n\tfor i := range w {\n\t\tids[i] = w[i].ID\n\t}\n\treturn lu.AddWorkOrderIDs(ids...)\n}", "func (luo *LocationUpdateOne) AddWorkOrders(w ...*WorkOrder) *LocationUpdateOne {\n\tids := make([]string, len(w))\n\tfor i := range w {\n\t\tids[i] = w[i].ID\n\t}\n\treturn luo.AddWorkOrderIDs(ids...)\n}", "func (w *Watcher) ValidateAndStoreValidOrdersV4(ctx context.Context, orders []*zeroex.SignedOrderV4, chainID int, pinned bool, opts *types.AddOrdersOpts) (*ordervalidator.ValidationResults, error) {\n\tif len(orders) == 0 {\n\t\treturn &ordervalidator.ValidationResults{}, nil\n\t}\n\tresults, validMeshOrders, err := w.meshSpecificOrderValidationV4(orders, chainID, pinned)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvalidationBlock, zeroexResults, err := w.onchainOrderValidationV4(ctx, validMeshOrders)\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tresults.Accepted = append(results.Accepted, zeroexResults.Accepted...)\n\tresults.Rejected = append(results.Rejected, zeroexResults.Rejected...)\n\n\t// Filter out only the new orders.\n\tnewOrderInfos := []*ordervalidator.AcceptedOrderInfo{}\n\tfor _, acceptedOrderInfo := range results.Accepted {\n\t\t// If the order isn't new, we don't add to OrderWatcher.\n\t\tif acceptedOrderInfo.IsNew {\n\t\t\tnewOrderInfos = append(newOrderInfos, acceptedOrderInfo)\n\t\t}\n\t}\n\n\tif opts.KeepCancelled || opts.KeepExpired || opts.KeepFullyFilled || opts.KeepUnfunded {\n\t\tfor _, rejectedOrderInfo := range zeroexResults.Rejected {\n\t\t\t// NOTE(jalextowle): We can use the rejectedOrderInfo.Status\n\t\t\t// field to see whether or not the order is new or not. If\n\t\t\t// the order has already been stored, the rejectedOrderInfo.Status\n\t\t\t// field will be ordervalidator.ROOrderAlreadyStoredAndUnfillable.\n\t\t\t// If the rejection reason involves on-chain validation, then the\n\t\t\t// order is new.\n\t\t\tif (opts.KeepCancelled && rejectedOrderInfo.Status.Code == ordervalidator.ROCancelled.Code) ||\n\t\t\t\t(opts.KeepExpired && rejectedOrderInfo.Status.Code == ordervalidator.ROExpired.Code) ||\n\t\t\t\t(opts.KeepFullyFilled && rejectedOrderInfo.Status.Code == ordervalidator.ROFullyFilled.Code) ||\n\t\t\t\t(opts.KeepUnfunded && rejectedOrderInfo.Status.Code == ordervalidator.ROUnfunded.Code) {\n\t\t\t\tnewOrderInfos = append(newOrderInfos, &ordervalidator.AcceptedOrderInfo{\n\t\t\t\t\tOrderHash: rejectedOrderInfo.OrderHash,\n\t\t\t\t\tSignedOrder: rejectedOrderInfo.SignedOrder,\n\t\t\t\t\tSignedOrderV4: rejectedOrderInfo.SignedOrderV4,\n\t\t\t\t\t// TODO(jalextowle): Verify that this is consistent with the OrderWatcher\n\t\t\t\t\tFillableTakerAssetAmount: big.NewInt(0),\n\t\t\t\t\tIsNew: true,\n\t\t\t\t})\n\t\t\t}\n\t\t}\n\t}\n\n\t// Add the order to the OrderWatcher. This also saves the order in the\n\t// database.\n\tallOrderEvents := []*zeroex.OrderEvent{}\n\torderEvents, err := w.add(newOrderInfos, validationBlock, pinned, opts)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tallOrderEvents = append(allOrderEvents, orderEvents...)\n\n\tif len(allOrderEvents) > 0 {\n\t\t// NOTE(albrow): Send can block if the subscriber(s) are slow. Blocking here can cause problems when Mesh is\n\t\t// shutting down, so to prevent that, we call Send in a goroutine and return immediately if the context\n\t\t// is done.\n\t\tdone := make(chan interface{})\n\t\tgo func() {\n\t\t\tw.orderFeed.Send(allOrderEvents)\n\t\t\tdone <- struct{}{}\n\t\t}()\n\t\tselect {\n\t\tcase <-done:\n\t\t\treturn results, nil\n\t\tcase <-ctx.Done():\n\t\t\treturn results, nil\n\t\t}\n\t}\n\n\treturn results, nil\n}", "func (s *ApiService) PostOrders(ctx context.Context, order ordf.Order) (ordf.ImplResponse, error) {\n\torderID, err := s.ordersService.CreateOrder(ctx, order)\n\tif err != nil {\n\t\treturn ordf.Response(500, nil), err\n\t}\n\n\treturn ordf.Response(200, ordf.OrderCreated{\n\t\tOrderId: orderID,\n\t}), nil\n}", "func (w *Watcher) Add(orderInfo *ordervalidator.AcceptedOrderInfo, pinned bool) error {\n\tif err := w.decreaseMaxExpirationTimeIfNeeded(); err != nil {\n\t\treturn err\n\t}\n\n\t// TODO(albrow): technically we should count the current number of orders,\n\t// remove some if needed, and then insert the order in a single transaction to\n\t// ensure that we don't accidentally exceed the maximum. In practice, and\n\t// because of the way OrderWatcher works, the distinction shouldn't matter.\n\ttxn := w.meshDB.Orders.OpenTransaction()\n\tdefer func() {\n\t\t_ = txn.Discard()\n\t}()\n\n\t// Final expiration time check before inserting the order. We might have just\n\t// changed max expiration time above.\n\tif !pinned && orderInfo.SignedOrder.ExpirationTimeSeconds.Cmp(w.maxExpirationTime) == 1 {\n\t\t// HACK(albrow): This is technically not the ideal way to respond to this\n\t\t// situation, but it is a lot easier to implement for the time being. In the\n\t\t// future, we should return an error and then react to that error\n\t\t// differently depending on whether the order was received via RPC or from a\n\t\t// peer. In the former case, we should return an RPC error response\n\t\t// indicating that the order was not in fact added. In the latter case, we\n\t\t// should effectively no-op, neither penalizing the peer or emitting any\n\t\t// order events. For now, we respond by emitting an ADDED event immediately\n\t\t// followed by a STOPPED_WATCHING event. If this order was submitted via\n\t\t// RPC, the RPC client will see a response that indicates the order was\n\t\t// successfully added, and then it will look like we immediately stopped\n\t\t// watching it. This is not too far off from what really happened but is\n\t\t// slightly inefficient.\n\t\taddedEvent := &zeroex.OrderEvent{\n\t\t\tOrderHash: orderInfo.OrderHash,\n\t\t\tSignedOrder: orderInfo.SignedOrder,\n\t\t\tFillableTakerAssetAmount: orderInfo.FillableTakerAssetAmount,\n\t\t\tEndState: zeroex.ESOrderAdded,\n\t\t}\n\t\tstoppedWatchingEvent := &zeroex.OrderEvent{\n\t\t\tOrderHash: orderInfo.OrderHash,\n\t\t\tSignedOrder: orderInfo.SignedOrder,\n\t\t\tFillableTakerAssetAmount: orderInfo.FillableTakerAssetAmount,\n\t\t\tEndState: zeroex.ESStoppedWatching,\n\t\t}\n\t\tw.orderFeed.Send([]*zeroex.OrderEvent{addedEvent, stoppedWatchingEvent})\n\t\treturn nil\n\t}\n\n\torder := &meshdb.Order{\n\t\tHash: orderInfo.OrderHash,\n\t\tSignedOrder: orderInfo.SignedOrder,\n\t\tLastUpdated: time.Now().UTC(),\n\t\tFillableTakerAssetAmount: orderInfo.FillableTakerAssetAmount,\n\t\tIsRemoved: false,\n\t\tIsPinned: pinned,\n\t}\n\terr := txn.Insert(order)\n\tif err != nil {\n\t\tif _, ok := err.(db.AlreadyExistsError); ok {\n\t\t\t// If we're already watching the order, that's fine in this case. Don't\n\t\t\t// return an error.\n\t\t\treturn nil\n\t\t}\n\t\treturn err\n\t}\n\tif err := txn.Commit(); err != nil {\n\t\treturn err\n\t}\n\n\terr = w.setupInMemoryOrderState(orderInfo.SignedOrder)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\torderEvent := &zeroex.OrderEvent{\n\t\tOrderHash: orderInfo.OrderHash,\n\t\tSignedOrder: orderInfo.SignedOrder,\n\t\tFillableTakerAssetAmount: orderInfo.FillableTakerAssetAmount,\n\t\tEndState: zeroex.ESOrderAdded,\n\t}\n\tw.orderFeed.Send([]*zeroex.OrderEvent{orderEvent})\n\n\treturn nil\n}", "func (o *V3SetErrorOrderInput) HasOrders() bool {\n\tif o != nil && o.Orders != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}", "func AddOrder(ctx *fasthttp.RequestCtx) {\n\tord, err := acmeserverless.UnmarshalOrder(string(ctx.Request.Body()))\n\tif err != nil {\n\t\tErrorHandler(ctx, \"AddOrder\", \"UnmarshalOrder\", err)\n\t\treturn\n\t}\n\tord.OrderID = uuid.Must(uuid.NewV4()).String()\n\n\tord, err = db.AddOrder(ord)\n\tif err != nil {\n\t\tErrorHandler(ctx, \"AddOrder\", \"AddOrder\", err)\n\t\treturn\n\t}\n\n\tprEvent := acmeserverless.PaymentRequestedEvent{\n\t\tMetadata: acmeserverless.Metadata{\n\t\t\tDomain: acmeserverless.OrderDomain,\n\t\t\tSource: \"AddOrder\",\n\t\t\tType: acmeserverless.PaymentRequestedEventName,\n\t\t\tStatus: acmeserverless.DefaultSuccessStatus,\n\t\t},\n\t\tData: acmeserverless.PaymentRequestDetails{\n\t\t\tOrderID: ord.OrderID,\n\t\t\tCard: ord.Card,\n\t\t\tTotal: ord.Total,\n\t\t},\n\t}\n\n\t// Send a breadcrumb to Sentry with the payment request\n\tsentry.AddBreadcrumb(&sentry.Breadcrumb{\n\t\tCategory: acmeserverless.PaymentRequestedEventName,\n\t\tTimestamp: time.Now(),\n\t\tLevel: sentry.LevelInfo,\n\t\tData: acmeserverless.ToSentryMap(prEvent.Data),\n\t})\n\n\t// Create payment payload\n\tpayload, err := prEvent.Marshal()\n\tif err != nil {\n\t\tErrorHandler(ctx, \"AddOrder\", \"Marshal\", err)\n\t\treturn\n\t}\n\n\t// Send to Payment\n\treq, err := http.NewRequest(\"POST\", os.Getenv(\"PAYMENT_URL\"), bytes.NewReader(payload))\n\tif err != nil {\n\t\tErrorHandler(ctx, \"AddOrder\", \"NewRequest\", err)\n\t\treturn\n\t}\n\n\treq.Header.Add(\"content-type\", \"application/json\")\n\treq.Header.Add(\"host\", os.Getenv(\"PAYMENT_HOST\"))\n\n\tres, err := http.DefaultClient.Do(req)\n\tif err != nil {\n\t\tErrorHandler(ctx, \"AddOrder\", \"DefaultClient.Do\", err)\n\t\treturn\n\t}\n\n\tdefer res.Body.Close()\n\tbody, _ := ioutil.ReadAll(res.Body)\n\n\tif res.StatusCode != 200 {\n\t\tErrorHandler(ctx, \"AddOrder\", \"Payment\", fmt.Errorf(string(body)))\n\t\treturn\n\t}\n\n\tstatus := acmeserverless.OrderStatus{\n\t\tOrderID: ord.OrderID,\n\t\tUserID: ord.UserID,\n\t\tPayment: acmeserverless.CreditCardValidationDetails{\n\t\t\tMessage: \"pending payment\",\n\t\t\tSuccess: false,\n\t\t},\n\t}\n\n\t// Send a breadcrumb to Sentry with the shipment request\n\tsentry.AddBreadcrumb(&sentry.Breadcrumb{\n\t\tCategory: acmeserverless.PaymentRequestedEventName,\n\t\tTimestamp: time.Now(),\n\t\tLevel: sentry.LevelInfo,\n\t\tData: acmeserverless.ToSentryMap(status.Payment),\n\t})\n\n\tpayload, err = status.Marshal()\n\tif err != nil {\n\t\tErrorHandler(ctx, \"AddOrder\", \"Marshal\", err)\n\t\treturn\n\t}\n\n\treq, err = http.NewRequest(\"POST\", os.Getenv(\"SHIPMENT_URL\"), bytes.NewReader(payload))\n\tif err != nil {\n\t\tErrorHandler(ctx, \"AddOrder\", \"NewRequest\", err)\n\t\treturn\n\t}\n\n\treq.Header.Add(\"content-type\", \"application/json\")\n\treq.Header.Add(\"host\", os.Getenv(\"SHIPMENT_HOST\"))\n\n\t_, err = http.DefaultClient.Do(req)\n\tif err != nil {\n\t\tErrorHandler(ctx, \"AddOrder\", \"DefaultClient.Do\", err)\n\t\treturn\n\t}\n\n\tctx.SetStatusCode(http.StatusOK)\n\tctx.Write(payload)\n}", "func updateOrders(orders *def.Orders, externalButtonPress def.Order, elevatorState def.ElevatorState) {\n\tif externalButtonPress.Direction == def.DIR_STOP {\n\t\t/*Detected internal button press*/\n\t\tdistributeInternalOrderToOrderList(externalButtonPress, orders, elevatorState)\n\t}\n\tif CheckForDuplicateOrder(orders, externalButtonPress.Floor) { // TODO: DO NOT REMOVE ORDERS ALONG THE SAME DIRECTION\n\t\tfindAndReplaceOrderIfSameDirection(orders, externalButtonPress, elevatorState.Direction) //TODO\n\t\treturn\n\t}\n\n\tif len(orders.Orders) > 0 { // For safety\n\t\t// Check to see if order should be placed first based on current elevator state\n\t\tif elevatorState.Direction == externalButtonPress.Direction && FloorIsInbetween(orders.Orders[0].Floor, externalButtonPress.Floor, elevatorState.LastFloor, elevatorState.Direction) {\n\t\t\t// Insert Order in first position\n\n\t\t\torders.Orders = append(orders.Orders, def.Order{})\n\t\t\tcopy(orders.Orders[1:], orders.Orders[:])\n\t\t\torders.Orders[0] = externalButtonPress\n\t\t\treturn\n\t\t}\n\n\t}\n\n\tfor i := 1; i < len(orders.Orders); i++ {\n\t\tdirection := orders.Orders[i].Direction\n\t\tif externalButtonPress.Direction == direction { // Elevator is moving in the right direction\n\t\t\tswitch direction {\n\t\t\tcase def.DIR_UP:\n\t\t\t\tif externalButtonPress.Floor < orders.Orders[i].Floor {\n\t\t\t\t\t// Insert Order in position (i)\n\t\t\t\t\torders.Orders = append(orders.Orders, def.Order{})\n\t\t\t\t\tcopy(orders.Orders[i+1:], orders.Orders[i:])\n\t\t\t\t\torders.Orders[i] = externalButtonPress\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\tcase def.DIR_DOWN:\n\t\t\t\tif externalButtonPress.Floor > orders.Orders[i].Floor {\n\t\t\t\t\t// Insert Order in position (i+1)\n\n\t\t\t\t\torders.Orders = append(orders.Orders, def.Order{})\n\t\t\t\t\tcopy(orders.Orders[i+1:], orders.Orders[i:])\n\t\t\t\t\torders.Orders[i] = externalButtonPress\n\t\t\t\t\treturn\n\n\t\t\t\t}\n\t\t\tdefault:\n\t\t\t\tfmt.Println(\"Something weird is up, buddy\")\n\t\t\t}\n\t\t}\n\t}\n\t// Place order at back of orderList\n\torders.Orders = append(orders.Orders, externalButtonPress)\n}", "func (s *OrderService) AddOrder(order spec.Order) error {\n\tjsonPayload, err := json.Marshal(order)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif err := s.client.SaveState(context.Background(), s.storeName, order.ID, jsonPayload, nil); err != nil {\n\t\treturn err\n\t}\n\n\t// This is a list of orderIDs for the user\n\tuserOrders := []string{}\n\t// NOTE We use the userID as a key in the orders state set, to hold an index of orders\n\tdata, err := s.client.GetState(context.Background(), s.storeName, order.ForUserID, nil)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// Ignore any problem, it's possible it doesn't exist yet (user's first order)\n\t_ = json.Unmarshal(data.Value, &userOrders)\n\n\talreadyExists := false\n\n\tfor _, oid := range userOrders {\n\t\tif order.ID == oid {\n\t\t\talreadyExists = true\n\t\t}\n\t}\n\n\tif !alreadyExists {\n\t\tuserOrders = append(userOrders, order.ID)\n\t} else {\n\t\tlog.Printf(\"### Warning, duplicate order '%s' for user '%s' detected\", order.ID, order.ForUserID)\n\t}\n\n\t// Save updated order list back, again keyed using user id\n\tjsonPayload, err = json.Marshal(userOrders)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif err := s.client.SaveState(context.Background(), s.storeName, order.ForUserID, jsonPayload, nil); err != nil {\n\t\tlog.Printf(\"### Error!, unable to save order list for user '%s'\", order.ForUserID)\n\t\treturn err\n\t}\n\n\treturn nil\n}", "func (db *DatabaseService) AddOrder(order *models.Order) error {\n\t_, err := db.db.Model(order).Insert()\n\treturn err\n}", "func (a *API) AddOrder(o *Order) (r *OrderResponse, err error) {\n\n\terr = o.Error()\n\tif err != nil {\n\t\terr = fmt.Errorf(\"gowhmcs addorder error: %v\", err)\n\t\treturn\n\t}\n\n\tbody, err := a.Do(\"addorder\", o)\n\tif err != nil {\n\t\terr = fmt.Errorf(\"gowhmcs addorder error: %v (%s)\", err, string(body))\n\t\treturn\n\t}\n\n\tif err = json.Unmarshal(body, &r); err != nil {\n\t\terr = fmt.Errorf(\"gowhmcs addorder error: %s\", string(body))\n\t\treturn\n\t}\n\treturn\n\n}", "func (_EtherDelta *EtherDeltaSession) Orders(arg0 common.Address, arg1 [32]byte) (bool, error) {\n\treturn _EtherDelta.Contract.Orders(&_EtherDelta.CallOpts, arg0, arg1)\n}", "func (_EtherDelta *EtherDeltaCaller) Orders(opts *bind.CallOpts, arg0 common.Address, arg1 [32]byte) (bool, error) {\n\tvar (\n\t\tret0 = new(bool)\n\t)\n\tout := ret0\n\terr := _EtherDelta.contract.Call(opts, out, \"orders\", arg0, arg1)\n\treturn *ret0, err\n}", "func (svc *svc) UpdateOrders(ctx context.Context, query model.OrderQuery, updates model.OrderUpdates) error {\n\terr := svc.repo.UpdateOrders(ctx, query, updates)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}", "func (e *Elevator) addOrder(order ButtonSignal) {\n\te.orderMatrix[order.Floor][order.Button] = true\n}", "func (r *CompanySalesOrdersCollectionRequest) Add(ctx context.Context, reqObj *SalesOrder) (resObj *SalesOrder, err error) {\n\terr = r.JSONRequest(ctx, \"POST\", \"\", reqObj, &resObj)\n\treturn\n}", "func (m manager) AddOrder(o acmeserverless.Order) (acmeserverless.Order, error) {\n\t// Generate and assign a new orderID\n\to.OrderID = uuid.Must(uuid.NewV4()).String()\n\to.Status = aws.String(\"Pending Payment\")\n\n\t// Marshal the newly updated product struct\n\tpayload, err := o.Marshal()\n\tif err != nil {\n\t\treturn o, fmt.Errorf(\"error marshalling order: %s\", err.Error())\n\t}\n\n\t// Create a map of DynamoDB Attribute Values containing the table keys\n\tkm := make(map[string]*dynamodb.AttributeValue)\n\tkm[\"PK\"] = &dynamodb.AttributeValue{\n\t\tS: aws.String(\"ORDER\"),\n\t}\n\tkm[\"SK\"] = &dynamodb.AttributeValue{\n\t\tS: aws.String(o.OrderID),\n\t}\n\n\t// Create a map of DynamoDB Attribute Values containing the table data elements\n\tem := make(map[string]*dynamodb.AttributeValue)\n\tem[\":keyid\"] = &dynamodb.AttributeValue{\n\t\tS: aws.String(o.UserID),\n\t}\n\tem[\":payload\"] = &dynamodb.AttributeValue{\n\t\tS: aws.String(string(payload)),\n\t}\n\n\tuii := &dynamodb.UpdateItemInput{\n\t\tTableName: aws.String(os.Getenv(\"TABLE\")),\n\t\tKey: km,\n\t\tExpressionAttributeValues: em,\n\t\tUpdateExpression: aws.String(\"SET Payload = :payload, KeyID = :keyid\"),\n\t}\n\n\t_, err = dbs.UpdateItem(uii)\n\tif err != nil {\n\t\treturn o, fmt.Errorf(\"error updating dynamodb: %s\", err.Error())\n\t}\n\n\treturn o, nil\n}", "func (_EtherDelta *EtherDeltaCallerSession) Orders(arg0 common.Address, arg1 [32]byte) (bool, error) {\n\treturn _EtherDelta.Contract.Orders(&_EtherDelta.CallOpts, arg0, arg1)\n}", "func AddOrdersSubCmds(rootCmd *cobra.Command, cfg *config.Config) error {\n\tfound := false\n\n\tfor _, cmd := range rootCmd.Commands() {\n\t\tif cmd.Use == \"orders\" {\n\t\t\tfound = true\n\n\t\t\t// Remove the autogenerated `create` and `update` command(s).\n\t\t\tcommands := cmd.Commands()\n\t\t\tfor _, c := range commands {\n\t\t\t\tif c.Use == \"create\" {\n\t\t\t\t\tcmd.RemoveCommand(c)\n\t\t\t\t} else if c.Use == \"update\" {\n\t\t\t\t\tcmd.RemoveCommand(c)\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tNewOrdersCreateCmd(cmd, cfg)\n\t\t\tNewOrdersUpdateCmd(cmd, cfg)\n\n\t\t\tbreak\n\t\t}\n\t}\n\n\tif !found {\n\t\treturn errors.New(\"Could not find orders command\")\n\t}\n\n\treturn nil\n}", "func (m *RestaurantMutation) AddOrderIDs(ids ...int) {\n\tif m.orders == nil {\n\t\tm.orders = make(map[int]struct{})\n\t}\n\tfor i := range ids {\n\t\tm.orders[ids[i]] = struct{}{}\n\t}\n}", "func NewOrder(order map[string]interface{}) (err error) {\r\n\tmaster := \"\"\r\n\tdetail := \"\"\r\n\tpayment := \"\"\r\n\tinventory := \"\"\r\n\r\n\t// Get a new reference to the ordered items and remove it from the map.\r\n\tpayments := order[\"payments\"]\r\n\titemsOrdered := order[\"items\"]\r\n\tdelete(order, \"items\")\r\n\tdelete(order, \"payments\")\r\n\r\n\t// Get the master insert query\r\n\tif master, err = MaptoInsert(order, \"orders\"); err != nil {\r\n\t\tCheckError(\"Error Mapping the Order to SQL.\", err, false)\r\n\t\treturn err\r\n\t}\r\n\r\n\tmaster += \"SET @last_id := (SELECT LAST_INSERT_ID());\"\r\n\r\n\t// Get the details insert query\r\n\tfor _, _value := range itemsOrdered.([]interface{}) {\r\n\t\tif detail, err = MaptoInsert(_value.(map[string]interface{}), \"ordereditems\"); err != nil {\r\n\t\t\tCheckError(\"Error Mapping the Ordered Items to SQL.\", err, false)\r\n\t\t\treturn err\r\n\t\t}\r\n\r\n\t\t// Build out the needed queries\r\n\t\tinventory += fmt.Sprintf(`UPDATE products SET onhand = onhand - %v, serialnumbers = replace(serialnumbers, '%s', '') WHERE itemcode = \"%s\";`, _value.(map[string]interface{})[\"quantity\"], _value.(map[string]interface{})[\"serialnumber\"], _value.(map[string]interface{})[\"itemcode\"])\r\n\t\tmaster += strings.Replace(fmt.Sprintf(\"%v\", detail), `\"\"`, \"@last_id\", -1)\r\n\t\tmaster = strings.Replace(master, `\"null\"`, `\"\"`, -1)\r\n\t}\r\n\r\n\t// Get the payments insert query\r\n\tfor _, _value := range payments.([]interface{}) {\r\n\t\tif detail, err = MaptoInsert(_value.(map[string]interface{}), \"payments\"); err != nil {\r\n\t\t\tCheckError(\"Error Mapping the Payments to SQL.\", err, false)\r\n\t\t\treturn err\r\n\t\t}\r\n\r\n\t\t// Build out the needed queries\r\n\t\tpayment += strings.Replace(fmt.Sprintf(\"%v\", detail), `\"\"`, \"@last_id\", -1)\r\n\t}\r\n\r\n\t// Save the Order and Reduce inventory\r\n\tif err = Modify(master + payment + inventory); err != nil {\r\n\t\tCheckError(\"Error creating the Order.\", err, false)\r\n\t\treturn err\r\n\t}\r\n\r\n\treturn\r\n}", "func (c *Contract) AddOrder(tx pgx.Tx, ctx context.Context, o OrderEnt) (OrderEnt, error) {\n\tvar lastInsID int32\n\ttimeStamp := time.Now().In(time.UTC)\n\n\tsql := `INSERT INTO orders(title, paid_by, order_code, order_status, total_price, tc_id, order_type, created_date, details, chat_id, description, total_price_ppn) VALUES($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11,$12) RETURNING id`\n\n\terr := tx.QueryRow(ctx, sql, o.Title, o.PaidBy, o.OrderCode, o.OrderStatus, o.TotalPrice, o.TcID, o.OrderType, timeStamp, o.Details, o.ChatID, o.Description, o.TotalPricePpn).Scan(&lastInsID)\n\n\to.ID = lastInsID\n\to.CreatedDate = timeStamp\n\n\treturn o, err\n}", "func (c *Client) CreateOrder(orderItems []OrderItem) (*Order, error) {\n\trb, err := json.Marshal(orderItems)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treq, err := http.NewRequest(\"POST\", fmt.Sprintf(\"%s/orders\", c.HostURL), strings.NewReader(string(rb)))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tbody, err := c.doRequest(req)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\torder := Order{}\n\terr = json.Unmarshal(body, &order)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn &order, nil\n}", "func ControlOrders(ch c.Channels) {\n\tnewOrders := make(chan msgs.OrderMsg, 1000)\n\tgo handleNewOrder(newOrders, ch)\n\tgo listenForNewOrders(newOrders, ch)\n\tgo checkForAcceptedOrders(newOrders, ch)\n\tfor {\n\t\tselect {\n\t\tcase newOrder := <-ch.DelegateOrder:\n\t\t\torderMsg := msgs.OrderMsg{Order: newOrder}\n\t\t\torderMsg.Id = (<-ch.MetaData).Id\n\t\t\tdelegateOrder(orderMsg, ch)\n\t\t\tnewOrders <- orderMsg\n\t\tcase orderCompleted := <-ch.CompletedOrder: // the external order has been taken\n\t\t\torderTensorDiffMsg := msgs.OrderTensorDiffMsg{\n\t\t\t\tOrder: orderCompleted,\n\t\t\t\tDiff: msgs.DIFF_REMOVE,\n\t\t\t\tId: (<-ch.MetaData).Id}\n\t\t\tfor i := 0; i < 5; i++ {\n\t\t\t\torderTensorDiffMsg.Send()\n\t\t\t\ttime.Sleep(1 * time.Millisecond)\n\t\t\t}\n\t\t}\n\t}\n}", "func addOrder(orderSlice []order.OrderItem, pizzaNo int, orderQty int) []order.OrderItem {\r\n\torderItem := order.OrderItem{\r\n\t\tPizzaNo: pizzaNo,\r\n\t\tOrderQty: orderQty,\r\n\t}\r\n\r\n\torderSlice = append(orderSlice, orderItem)\r\n\r\n\treturn orderSlice\r\n}", "func addOrder(pressedButton elevio.ButtonEvent) {\n\tfmt.Println(\"Er inn i addOrder\")\n\tif pressedButton.Button == elevio.BT_Cab {\n\t\televio.SetButtonLamp(pressedButton.Button, pressedButton.Floor, true)\n\t}\n\tif elevator.AssignedRequests[pressedButton.Floor][pressedButton.Button] == false {\n\t\televator.AssignedRequests[pressedButton.Floor][pressedButton.Button] = true\n\t\t//elevio.SetButtonLamp(pressedButton.Button, pressedButton.Floor, true)\n\n\t}\n}", "func (mds *marketDepthStream) AddOrder(isMarket bool, isAsk bool, price uint64, stockQuantity uint64) {\n\t// Do not add Market orders to depth\n\tif isMarket {\n\t\tprice = 0 // special value for market orders\n\t}\n\n\tvar l = mds.logger.WithFields(logrus.Fields{\n\t\t\"method\": \"MarketDepth.AddOrder\",\n\t\t\"param_isMarket\": isMarket,\n\t\t\"param_isAsk\": isAsk,\n\t\t\"param_price\": price,\n\t\t\"param_stockQuantity\": stockQuantity,\n\t})\n\n\tl.Debugf(\"Adding\")\n\n\tif isAsk {\n\t\tmds.askDepthLock.Lock()\n\t\tmds.askDepth[price] += stockQuantity\n\t\tmds.askDepthDiff[price] += int64(stockQuantity)\n\t\tmds.askDepthLock.Unlock()\n\n\t\tl.Debugf(\"Added\")\n\t\treturn\n\t}\n\tmds.bidDepthLock.Lock()\n\tmds.bidDepth[price] += stockQuantity\n\tmds.bidDepthDiff[price] += int64(stockQuantity)\n\tmds.bidDepthLock.Unlock()\n\n\tl.Debugf(\"Added\")\n}", "func (e *PostOrders) PostOrders(ctx context.Context, req *pb.Request, rsp *pb.Response) error {\n\tlogs.Info(\"---------------- api/v1.0/orders PostOrders 发布订单 ----------------\")\n\n\t// 创建返回空间\n\trsp.Errno = utils.RECODE_OK\n\trsp.Errmsg = utils.RecodeText(rsp.Errno)\n\n\t// 1根据session得到当前用户的 user_id\n\t// 构建连接缓存数据\n\tredisConfigMap := map[string]string{\n\t\t\"key\": utils.G_server_name,\n\t\t\"conn\": utils.G_redis_addr + \":\" + utils.G_redis_port,\n\t\t\"dbNum\": utils.G_redis_dbnum,\n\t}\n\tlogs.Info(redisConfigMap)\n\tredisConfig, _ := json.Marshal(redisConfigMap)\n\n\t//连接redis数据库 创建句柄\n\tbm, err := cache.NewCache(\"redis\", string(redisConfig))\n\tif err != nil {\n\t\tlogs.Info(\"缓存创建失败\", err)\n\t\trsp.Errno = utils.RECODE_DBERR\n\t\trsp.Errmsg = utils.RecodeText(rsp.Errno)\n\t\treturn nil\n\t}\n\n\t//拼接key\n\tsessionIdUserId := req.Sessionid + \"user_id\"\n\n\tvalueId, _ := bm.Get(context.TODO(), sessionIdUserId)\n\tif valueId == nil {\n\t\tlogs.Info(\"获取登录缓存失败\", err)\n\t\trsp.Errno = utils.RECODE_SESSIONERR\n\t\trsp.Errmsg = utils.RecodeText(rsp.Errno)\n\t\treturn nil\n\t}\n\tuserId := int(valueId.([]uint8)[0])\n\tlogs.Info(userId, reflect.TypeOf(userId))\n\n\t// 2得到你用户请求的json数据并校验合法性\n\t// 获取得到用户请求Response数据的name\n\tvar requestMap = make(map[string]interface{})\n\terr = json.Unmarshal(req.Body, &requestMap)\n\n\tif err != nil {\n\t\trsp.Errno = utils.RECODE_REQERR\n\t\trsp.Errmsg = utils.RecodeText(rsp.Errno)\n\t\treturn nil\n\t}\n\tlogs.Info(requestMap)\n\n\t// 校验合法性\n\t// 用户参数做合法判断\n\tif requestMap[\"house_id\"] == \"\" || requestMap[\"start_date\"] == \"\" || requestMap[\"end_data\"] == \"\" {\n\t\trsp.Errno = utils.RECODE_REQERR\n\t\trsp.Errmsg = utils.RecodeText(rsp.Errno)\n\t\treturn nil\n\t}\n\n\t// 3确定end_date 在 start_date之后\n\t// 格式化日期时间\n\tstartDateTime, _ := time.Parse(\"2006-01-02 15:04:05\", requestMap[\"start_date\"].(string))\n\tendDateTime, _ := time.Parse(\"2006-01-02 15:04:05\", requestMap[\"end_date\"].(string))\n\n\t//4得到一共入住的天数\n\tlogs.Info(startDateTime, endDateTime)\n\tdays := endDateTime.Sub(startDateTime).Hours()/24 + 1\n\tlogs.Info(days)\n\n\t// 5根据order_id 得到关联的房源信息\n\thouseId, _ := strconv.Atoi(requestMap[\"house_id\"].(string))\n\n\t// 房屋对象\n\thouse := models.House{Id: houseId}\n\to := orm.NewOrm()\n\tif err := o.Read(&house); err != nil {\n\t\trsp.Errno = utils.RECODE_NODATA\n\t\trsp.Errmsg = utils.RecodeText(rsp.Errno)\n\t\treturn nil\n\t}\n\t_, _ = o.LoadRelated(&house, \"user\")\n\n\t// 6确保当前的user_id不是房源信息所关联的user_id\n\tif userId == house.User.Id {\n\t\trsp.Errno = utils.RECODE_ROLEERR\n\t\trsp.Errmsg = utils.RecodeText(rsp.Errno)\n\t\treturn nil\n\t}\n\n\t// 7确保用户选择的房租未预定,日期没有冲突\n\tif endDateTime.Before(startDateTime) {\n\t\trsp.Errno = utils.RECODE_ROLEERR\n\t\trsp.Errmsg = \"结束时间在开始时间之前\"\n\t\treturn nil\n\t}\n\n\t// 7.1添加征信步骤\n\t// 8封装order订单\n\tamount := days * float64(house.Price)\n\torder := models.OrderHouse{}\n\torder.House = &house\n\tuser := models.User{Id: userId}\n\torder.User = &user\n\torder.BeginDate = startDateTime\n\torder.EndDate = endDateTime\n\torder.Days = int(days)\n\torder.HousePrice = house.Price\n\torder.Amount = int(amount)\n\torder.Status = models.ORDER_STATUS_WAIT_ACCEPT\n\t// 征信\n\torder.Credit = false\n\n\tlogs.Info(order)\n\t// 9将订单信息入库表中\n\tif _, err := o.Insert(&order); err != nil {\n\t\trsp.Errno = utils.RECODE_DBERR\n\t\trsp.Errmsg = utils.RecodeText(rsp.Errno)\n\t\treturn nil\n\t}\n\t// 10返回order_id\n\t_ = bm.Put(context.TODO(), sessionIdUserId, string(userId), time.Second*7200)\n\trsp.OrderId = int64(order.Id)\n\treturn nil\n}", "func (r MysqlOrderRepository) AddOrder(id int) error {\n\tfmt.Printf(\"adding order %d\", id)\n\n\treturn nil\n}", "func (handler *rpcHandler) SubscribeToOrders(ctx context.Context) (*ethRpc.Subscription, error) {\n\tlog.Debug(\"received order event subscription request via RPC\")\n\tsubscription, err := SetupOrderStream(ctx, handler.app)\n\tif err != nil {\n\t\tlog.WithField(\"error\", err.Error()).Error(\"internal error in `mesh_subscribe` to `orders` RPC call\")\n\t\treturn nil, errInternal\n\t}\n\treturn subscription, nil\n}", "func (m *TodoItemMutation) AddOrder(i int) {\n\tif m.add_order != nil {\n\t\t*m.add_order += i\n\t} else {\n\t\tm.add_order = &i\n\t}\n}", "func (c *Coinbene) PlaceSpotOrders(orders []PlaceOrderRequest) ([]OrderPlacementResponse, error) {\n\tif len(orders) == 0 {\n\t\treturn nil, errors.New(\"orders is nil\")\n\t}\n\n\ttype ord struct {\n\t\tSymbol string `json:\"symbol\"`\n\t\tDirection string `json:\"direction\"`\n\t\tPrice string `json:\"price\"`\n\t\tQuantity string `json:\"quantity\"`\n\t\tOrderType string `json:\"orderType\"`\n\t\tNotional string `json:\"notional,omitempty\"`\n\t\tClientID string `json:\"clientId,omitempty\"`\n\t}\n\n\tvar reqOrders []ord\n\tfor x := range orders {\n\t\to := ord{\n\t\t\tSymbol: orders[x].Symbol,\n\t\t\tPrice: strconv.FormatFloat(orders[x].Price, 'f', -1, 64),\n\t\t\tQuantity: strconv.FormatFloat(orders[x].Quantity, 'f', -1, 64),\n\t\t}\n\t\tswitch orders[x].Direction {\n\t\tcase order.Buy.Lower():\n\t\t\to.Direction = buyDirection\n\t\tcase order.Sell.Lower():\n\t\t\to.Direction = sellDirection\n\t\tdefault:\n\t\t\treturn nil,\n\t\t\t\tfmt.Errorf(\"invalid direction '%v', must be either 'buy' or 'sell'\",\n\t\t\t\t\torders[x].Direction)\n\t\t}\n\n\t\tswitch orders[x].OrderType {\n\t\tcase order.Limit.Lower():\n\t\t\to.OrderType = limitOrder\n\t\tcase order.Market.Lower():\n\t\t\to.OrderType = marketOrder\n\t\tdefault:\n\t\t\treturn nil,\n\t\t\t\terrors.New(\"invalid order type, must be either 'limit' or 'market'\")\n\t\t}\n\n\t\tif orders[x].ClientID != \"\" {\n\t\t\to.ClientID = orders[x].ClientID\n\t\t}\n\t\tif orders[x].Notional != 0 {\n\t\t\to.Notional = strconv.Itoa(orders[x].Notional)\n\t\t}\n\t\treqOrders = append(reqOrders, o)\n\t}\n\n\tresp := struct {\n\t\tData []OrderPlacementResponse `json:\"data\"`\n\t}{}\n\tpath := coinbeneAPIVersion + coinbeneBatchPlaceOrder\n\terr := c.SendAuthHTTPRequest(exchange.RestSpot, http.MethodPost,\n\t\tpath,\n\t\tcoinbeneBatchPlaceOrder,\n\t\tfalse,\n\t\treqOrders,\n\t\t&resp,\n\t\tspotBatchOrder)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn resp.Data, nil\n}", "func (o *GenericSorting) Add(order string) *GenericSorting {\n\tif strings.TrimSpace(order) != \"\" {\n\t\to.Ordering = append(o.Ordering, strings.TrimSpace(order))\n\t}\n\treturn o\n}", "func (o *ExchangeCurrency) AddUserOrdersG(insert bool, related ...*UserOrder) error {\n\treturn o.AddUserOrders(boil.GetDB(), insert, related...)\n}", "func (oiu *OrderInfoUpdate) AddOrderAddress(o ...*OrderAddress) *OrderInfoUpdate {\n\tids := make([]int, len(o))\n\tfor i := range o {\n\t\tids[i] = o[i].ID\n\t}\n\treturn oiu.AddOrderAddresIDs(ids...)\n}", "func (oiuo *OrderInfoUpdateOne) AddOrderAddress(o ...*OrderAddress) *OrderInfoUpdateOne {\n\tids := make([]int, len(o))\n\tfor i := range o {\n\t\tids[i] = o[i].ID\n\t}\n\treturn oiuo.AddOrderAddresIDs(ids...)\n}", "func (m NoMDEntries) SetNumberOfOrders(v int) {\n\tm.Set(field.NewNumberOfOrders(v))\n}", "func (oiuo *OrderInfoUpdateOne) AddOrderAddresIDs(ids ...int) *OrderInfoUpdateOne {\n\toiuo.mutation.AddOrderAddresIDs(ids...)\n\treturn oiuo\n}", "func (luo *LocationUpdateOne) AddWorkOrderIDs(ids ...string) *LocationUpdateOne {\n\tif luo.work_orders == nil {\n\t\tluo.work_orders = make(map[string]struct{})\n\t}\n\tfor i := range ids {\n\t\tluo.work_orders[ids[i]] = struct{}{}\n\t}\n\treturn luo\n}", "func (_WyvernExchange *WyvernExchangeSession) ValidateOrder(addrs [7]common.Address, uints [9]*big.Int, feeMethod uint8, side uint8, saleKind uint8, howToCall uint8, calldata []byte, replacementPattern []byte, staticExtradata []byte, v uint8, r [32]byte, s [32]byte) (bool, error) {\n\treturn _WyvernExchange.Contract.ValidateOrder(&_WyvernExchange.CallOpts, addrs, uints, feeMethod, side, saleKind, howToCall, calldata, replacementPattern, staticExtradata, v, r, s)\n}", "func (a *Client) SearchOrders(params *SearchOrdersParams, authInfo runtime.ClientAuthInfoWriter, opts ...ClientOption) (*SearchOrdersOK, error) {\n\t// TODO: Validate the params before sending\n\tif params == nil {\n\t\tparams = NewSearchOrdersParams()\n\t}\n\top := &runtime.ClientOperation{\n\t\tID: \"SearchOrders\",\n\t\tMethod: \"POST\",\n\t\tPathPattern: \"/v2/orders/search\",\n\t\tProducesMediaTypes: []string{\"application/json\"},\n\t\tConsumesMediaTypes: []string{\"application/json\"},\n\t\tSchemes: []string{\"https\"},\n\t\tParams: params,\n\t\tReader: &SearchOrdersReader{formats: a.formats},\n\t\tAuthInfo: authInfo,\n\t\tContext: params.Context,\n\t\tClient: params.HTTPClient,\n\t}\n\tfor _, opt := range opts {\n\t\topt(op)\n\t}\n\n\tresult, err := a.transport.Submit(op)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tsuccess, ok := result.(*SearchOrdersOK)\n\tif ok {\n\t\treturn success, nil\n\t}\n\t// unexpected success response\n\t// safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue\n\tmsg := fmt.Sprintf(\"unexpected success response for SearchOrders: API contract not enforced by server. Client expected to get an error, but got: %T\", result)\n\tpanic(msg)\n}", "func (lu *LocationUpdate) AddWorkOrderIDs(ids ...string) *LocationUpdate {\n\tif lu.work_orders == nil {\n\t\tlu.work_orders = make(map[string]struct{})\n\t}\n\tfor i := range ids {\n\t\tlu.work_orders[ids[i]] = struct{}{}\n\t}\n\treturn lu\n}", "func (o *V3SetErrorOrderInput) GetOrders() []V3OrderIntegrationError {\n\tif o == nil || o.Orders == nil {\n\t\tvar ret []V3OrderIntegrationError\n\t\treturn ret\n\t}\n\treturn *o.Orders\n}", "func Add(redisclient *redis.Client, objtoinsert Order) helper.Resultado {\n\n\tdatabase := new(helper.DatabaseX)\n\tdatabase.Collection = \"orders\"\n\tdatabase.Database, _ = redisclient.Get(\"API.MongoDB.Database\").Result()\n\tdatabase.Location, _ = redisclient.Get(\"API.MongoDB.Location\").Result()\n\n\tsession, err := mgo.Dial(database.Location)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tdefer session.Close()\n\n\t// Optional. Switch the session to a monotonic behavior.\n\tsession.SetMode(mgo.Monotonic, true)\n\n\tcollection := session.DB(database.Database).C(database.Collection)\n\n\terr = collection.Insert(objtoinsert)\n\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\tvar res helper.Resultado\n\tres.ErrorCode = \"0001\"\n\tres.ErrorDescription = \"Order added\"\n\tres.IsSuccessful = \"Y\"\n\n\treturn res\n}", "func (o *Item) AddOrderItems(ctx context.Context, exec boil.ContextExecutor, insert bool, related ...*OrderItem) error {\n\tvar err error\n\tfor _, rel := range related {\n\t\tif insert {\n\t\t\tqueries.Assign(&rel.ItemID, o.ID)\n\t\t\tif err = rel.Insert(ctx, exec, boil.Infer()); err != nil {\n\t\t\t\treturn errors.Wrap(err, \"failed to insert into foreign table\")\n\t\t\t}\n\t\t} else {\n\t\t\tupdateQuery := fmt.Sprintf(\n\t\t\t\t\"UPDATE \\\"order_items\\\" SET %s WHERE %s\",\n\t\t\t\tstrmangle.SetParamNames(\"\\\"\", \"\\\"\", 0, []string{\"item_id\"}),\n\t\t\t\tstrmangle.WhereClause(\"\\\"\", \"\\\"\", 0, orderItemPrimaryKeyColumns),\n\t\t\t)\n\t\t\tvalues := []interface{}{o.ID, rel.ID}\n\n\t\t\tif boil.IsDebug(ctx) {\n\t\t\t\twriter := boil.DebugWriterFrom(ctx)\n\t\t\t\tfmt.Fprintln(writer, updateQuery)\n\t\t\t\tfmt.Fprintln(writer, values)\n\t\t\t}\n\t\t\tif _, err = exec.ExecContext(ctx, updateQuery, values...); err != nil {\n\t\t\t\treturn errors.Wrap(err, \"failed to update foreign table\")\n\t\t\t}\n\n\t\t\tqueries.Assign(&rel.ItemID, o.ID)\n\t\t}\n\t}\n\n\tif o.R == nil {\n\t\to.R = &itemR{\n\t\t\tOrderItems: related,\n\t\t}\n\t} else {\n\t\to.R.OrderItems = append(o.R.OrderItems, related...)\n\t}\n\n\tfor _, rel := range related {\n\t\tif rel.R == nil {\n\t\t\trel.R = &orderItemR{\n\t\t\t\tItem: o,\n\t\t\t}\n\t\t} else {\n\t\t\trel.R.Item = o\n\t\t}\n\t}\n\treturn nil\n}", "func (queue *Queue) Add(order *Order) {\n\tif len(*queue) == 0 {\n\t\t*queue = append(*queue, order)\n\t} else {\n\n\t\tvar appended bool\n\t\tappended = false\n\t\tvar i int\n\t\tvar addedOrder *Order\n\t\tfor i, addedOrder = range *queue {\n\t\t\tif order.priority > addedOrder.priority {\n\t\t\t\t*queue = append((*queue)[:i], append(Queue{order}, (*queue)[i:]...)...)\n\t\t\t\tappended = true\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t\tif !appended {\n\t\t\t*queue = append(*queue, order)\n\t\t}\n\t}\n}", "func HasOrders() predicate.Product {\n\treturn predicate.Product(func(s *sql.Selector) {\n\t\tstep := sqlgraph.NewStep(\n\t\t\tsqlgraph.From(Table, FieldID),\n\t\t\tsqlgraph.To(OrdersTable, FieldID),\n\t\t\tsqlgraph.Edge(sqlgraph.M2M, false, OrdersTable, OrdersPrimaryKey...),\n\t\t)\n\t\tsqlgraph.HasNeighbors(s, step)\n\t})\n}", "func (o *ExchangeCurrency) AddUserOrders(exec boil.Executor, insert bool, related ...*UserOrder) error {\n\tvar err error\n\tfor _, rel := range related {\n\t\tif insert {\n\t\t\trel.ExchangeCurrencyID = o.ID\n\t\t\tif err = rel.Insert(exec, boil.Infer()); err != nil {\n\t\t\t\treturn errors.Wrap(err, \"failed to insert into foreign table\")\n\t\t\t}\n\t\t} else {\n\t\t\tupdateQuery := fmt.Sprintf(\n\t\t\t\t\"UPDATE \\\"user_order\\\" SET %s WHERE %s\",\n\t\t\t\tstrmangle.SetParamNames(\"\\\"\", \"\\\"\", 1, []string{\"exchange_currency_id\"}),\n\t\t\t\tstrmangle.WhereClause(\"\\\"\", \"\\\"\", 2, userOrderPrimaryKeyColumns),\n\t\t\t)\n\t\t\tvalues := []interface{}{o.ID, rel.ID}\n\n\t\t\tif boil.DebugMode {\n\t\t\t\tfmt.Fprintln(boil.DebugWriter, updateQuery)\n\t\t\t\tfmt.Fprintln(boil.DebugWriter, values)\n\t\t\t}\n\n\t\t\tif _, err = exec.Exec(updateQuery, values...); err != nil {\n\t\t\t\treturn errors.Wrap(err, \"failed to update foreign table\")\n\t\t\t}\n\n\t\t\trel.ExchangeCurrencyID = o.ID\n\t\t}\n\t}\n\n\tif o.R == nil {\n\t\to.R = &exchangeCurrencyR{\n\t\t\tUserOrders: related,\n\t\t}\n\t} else {\n\t\to.R.UserOrders = append(o.R.UserOrders, related...)\n\t}\n\n\tfor _, rel := range related {\n\t\tif rel.R == nil {\n\t\t\trel.R = &userOrderR{\n\t\t\t\tExchangeCurrency: o,\n\t\t\t}\n\t\t} else {\n\t\t\trel.R.ExchangeCurrency = o\n\t\t}\n\t}\n\treturn nil\n}", "func NewOrdersAPI(handlerConfig handlers.HandlerConfig) *ordersoperations.MymoveAPI {\n\n\t// Wire up the handlers to the ordersAPIMux\n\tordersSpec, err := loads.Analyzed(ordersapi.SwaggerJSON, \"\")\n\tif err != nil {\n\t\tlog.Fatalln(err)\n\t}\n\n\tordersAPI := ordersoperations.NewMymoveAPI(ordersSpec)\n\tordersAPI.ServeError = handlers.ServeCustomError\n\tordersAPI.GetOrdersHandler = GetOrdersHandler{handlerConfig}\n\tordersAPI.GetOrdersByIssuerAndOrdersNumHandler = GetOrdersByIssuerAndOrdersNumHandler{handlerConfig}\n\tordersAPI.IndexOrdersForMemberHandler = IndexOrdersForMemberHandler{handlerConfig}\n\tordersAPI.PostRevisionHandler = PostRevisionHandler{handlerConfig}\n\tordersAPI.PostRevisionToOrdersHandler = PostRevisionToOrdersHandler{handlerConfig}\n\treturn ordersAPI\n}", "func (r *CompanySalesOrderLinesCollectionRequest) Add(ctx context.Context, reqObj *SalesOrderLine) (resObj *SalesOrderLine, err error) {\n\terr = r.JSONRequest(ctx, \"POST\", \"\", reqObj, &resObj)\n\treturn\n}", "func UpdateOrders(UpdatedAllStates chan<- AllStates, MsgToNetwork chan<- NetworkMessage) {\n\t//Inits the channel for receiving button information and a AllStates variable\n\tbuttonAllStates := AllStates{}\n\tbuttonPressed := make(chan elevio.ButtonEvent)\n\tgo elevio.PollButtons(buttonPressed)\n\n\tfor {\n\t\tselect {\n\t\tcase NewOrderLocal := <-buttonPressed: //When a button is pressed\n\t\t\t//Copy the share AllStates (LocalAllStates) varaible to a one that is only used locally in this func (networkAllStates)\n\t\t\tbuttonAllStates = copyAllState(LocalAllStates)\n\t\t\t//Check what type of button was pressed\n\t\t\tswitch ButtonType := NewOrderLocal.Button; ButtonType {\n\t\t\tcase 0: //up, Sets hall request up for right floor to true\n\t\t\t\tbuttonAllStates.HallRequests[NewOrderLocal.Floor][0] = true\n\t\t\tcase 1: //down, Sets hall request down for right floor to true\n\t\t\t\tbuttonAllStates.HallRequests[NewOrderLocal.Floor][1] = true\n\t\t\tcase 2: //cab, Sets cab request for right floor to true\n\t\t\t\tbuttonAllStates.States[ID].CabRequests[NewOrderLocal.Floor] = true\n\t\t\t}\n\n\t\t\t//Saves to file, LocalALlStates, and sends the update to DistributeOrders and Network\n\t\t\tThisNetworkMessage.MessageType = \"StateUpdate\" //\"This elevator has had an update in its state!\"\n\t\t\tThisNetworkMessage.HallRequests = buttonAllStates.HallRequests\n\t\t\tThisNetworkMessage.RemoteState = buttonAllStates.States[ID]\n\n\t\t\tif len(buttonAllStates.States) == 1 { //Sets lights after FSM event if it is the only elevator on network -- Single elevator operation\n\t\t\t\tSetLights(buttonAllStates, ID)\n\t\t\t}\n\n\t\t\tsavingFile(buttonAllStates, ID)\n\t\t\tLocalAllStates = buttonAllStates\n\t\t\tMsgToNetwork <- ThisNetworkMessage\n\t\t\tUpdatedAllStates <- buttonAllStates\n\n\t\t}\n\t}\n}", "func (api API) MarketOrders(charID int64) (*MarketOrdersResult, error) {\n\toutput := MarketOrdersResult{}\n\targs := url.Values{}\n\targs.Add(\"characterID\", strconv.FormatInt(charID,10))\n\terr := api.Call(MarketOrdersURL, args, &output)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif output.Error != nil {\n\t\treturn nil, output.Error\n\t}\n\treturn &output, nil\n}", "func (_WyvernExchange *WyvernExchangeCallerSession) ValidateOrder(addrs [7]common.Address, uints [9]*big.Int, feeMethod uint8, side uint8, saleKind uint8, howToCall uint8, calldata []byte, replacementPattern []byte, staticExtradata []byte, v uint8, r [32]byte, s [32]byte) (bool, error) {\n\treturn _WyvernExchange.Contract.ValidateOrder(&_WyvernExchange.CallOpts, addrs, uints, feeMethod, side, saleKind, howToCall, calldata, replacementPattern, staticExtradata, v, r, s)\n}", "func (oiu *OrderInfoUpdate) AddOrderAddresIDs(ids ...int) *OrderInfoUpdate {\n\toiu.mutation.AddOrderAddresIDs(ids...)\n\treturn oiu\n}", "func New() *Orders {\n\treturn &Orders{\n\t\tcache: cache.New(),\n\t\titems: newItems(),\n\t}\n}", "func (h *Hbdm) OpenOrders(symbol string, pageIndex, pageSize *int) (orders *OrdersResponse, err error) {\n\tpayload := make(map[string]interface{}, 3)\n\tif symbol != \"\" {\n\t\tpayload[\"symbol\"] = symbol\n\t}\n\tif pageIndex != nil {\n\t\tpayload[\"page_index\"] = *pageIndex\n\t}\n\tif pageSize != nil {\n\t\tpayload[\"page_size\"] = *pageSize\n\t}\n\n\tr, err := h.client.do(\"POST\", \"contract_openorders\", payload, true)\n\tif err != nil {\n\t\treturn\n\t}\n\n\tvar response interface{}\n\tif err = json.Unmarshal(r, &response); err != nil {\n\t\treturn\n\t}\n\n\tif err = handleErr(response); err != nil {\n\t\treturn\n\t}\n\n\terr = json.Unmarshal(r, &orders)\n\treturn\n}", "func (m *Order) Validate(formats strfmt.Registry) error {\n\tvar res []error\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}", "func (q *Queue) Add(order *Order) *list.Element {\n\tq.quantity += order.quantity\n\treturn q.orders.PushBack(order)\n}", "func (c *Client) CreateOrder() (*Order, error) {\n\turl := c.option.buildURL(\"orders\")\n\n\tresp, body, errs := c.request.Clone().\n\t\tPost(url).\n\t\tSet(\"Content-Type\", \"application/json\").\n\t\tSetBasicAuth(c.option.Key, c.option.Secret).\n\t\tEndBytes()\n\tif len(errs) > 0 {\n\t\treturn nil, errs[0]\n\t}\n\n\tif resp.StatusCode >= 400 {\n\t\tvar apiErr ApiError\n\t\terr := json.Unmarshal(body, &apiErr)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn nil, &apiErr\n\t}\n\n\tvar order Order\n\terr := json.Unmarshal(body, &order)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn &order, nil\n\n}", "func CreateOrder(ctx echo.Context) error {\n\tmetrics.CaptureDelay(\"CreateOrderHandler\")()\n\tvar req CreateOrderRequest\n\tif err := ctx.Bind(&req); err != nil {\n\t\tlog.Println(\"Error binding request\", err)\n\t\treturn ctx.JSON(ErrBadRequestInvalidBody())\n\t}\n\t//validate request\n\tif httpCode, err := req.Validate(); err != nil {\n\t\treturn ctx.JSON(httpCode, err)\n\t}\n\n\t// execute\n\tord, err := order.Create(ctx.Request().Context(), req.Origin, req.Destination)\n\tif err != nil {\n\t\tlog.Println(\"Error creating order \", err)\n\t\t// check for error and return appropriately\n\t\treturn ctx.JSON(ErrInternalServerError(err.Error()))\n\t}\n\treturn ctx.JSON(http.StatusOK, ord)\n}", "func (a *OrdersApiService) V1OrdersPost(ctx _context.Context, orderNewSingleRequest OrderNewSingleRequest) (OrderExecutionReport, *_nethttp.Response, error) {\n\tvar (\n\t\tlocalVarHTTPMethod = _nethttp.MethodPost\n\t\tlocalVarPostBody interface{}\n\t\tlocalVarFormFileName string\n\t\tlocalVarFileName string\n\t\tlocalVarFileBytes []byte\n\t\tlocalVarReturnValue OrderExecutionReport\n\t)\n\n\t// create path and map variables\n\tlocalVarPath := a.client.cfg.BasePath + \"/v1/orders\"\n\tlocalVarHeaderParams := make(map[string]string)\n\tlocalVarQueryParams := _neturl.Values{}\n\tlocalVarFormParams := _neturl.Values{}\n\n\t// to determine the Content-Type header\n\tlocalVarHTTPContentTypes := []string{\"application/json\"}\n\n\t// set Content-Type header\n\tlocalVarHTTPContentType := selectHeaderContentType(localVarHTTPContentTypes)\n\tif localVarHTTPContentType != \"\" {\n\t\tlocalVarHeaderParams[\"Content-Type\"] = localVarHTTPContentType\n\t}\n\n\t// to determine the Accept header\n\tlocalVarHTTPHeaderAccepts := []string{\"application/json\", \"appliction/json\"}\n\n\t// set Accept header\n\tlocalVarHTTPHeaderAccept := selectHeaderAccept(localVarHTTPHeaderAccepts)\n\tif localVarHTTPHeaderAccept != \"\" {\n\t\tlocalVarHeaderParams[\"Accept\"] = localVarHTTPHeaderAccept\n\t}\n\t// body params\n\tlocalVarPostBody = &orderNewSingleRequest\n\tr, err := a.client.prepareRequest(ctx, localVarPath, localVarHTTPMethod, localVarPostBody, localVarHeaderParams, localVarQueryParams, localVarFormParams, localVarFormFileName, localVarFileName, localVarFileBytes)\n\tif err != nil {\n\t\treturn localVarReturnValue, nil, err\n\t}\n\n\tlocalVarHTTPResponse, err := a.client.callAPI(r)\n\tif err != nil || localVarHTTPResponse == nil {\n\t\treturn localVarReturnValue, localVarHTTPResponse, err\n\t}\n\n\tlocalVarBody, err := _ioutil.ReadAll(localVarHTTPResponse.Body)\n\tlocalVarHTTPResponse.Body.Close()\n\tif err != nil {\n\t\treturn localVarReturnValue, localVarHTTPResponse, err\n\t}\n\n\tif localVarHTTPResponse.StatusCode >= 300 {\n\t\tnewErr := GenericOpenAPIError{\n\t\t\tbody: localVarBody,\n\t\t\terror: localVarHTTPResponse.Status,\n\t\t}\n\t\tif localVarHTTPResponse.StatusCode == 400 {\n\t\t\tvar v ValidationError\n\t\t\terr = a.client.decode(&v, localVarBody, localVarHTTPResponse.Header.Get(\"Content-Type\"))\n\t\t\tif err != nil {\n\t\t\t\tnewErr.error = err.Error()\n\t\t\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t\t\t}\n\t\t\tnewErr.model = v\n\t\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t\t}\n\t\tif localVarHTTPResponse.StatusCode == 490 {\n\t\t\tvar v Message\n\t\t\terr = a.client.decode(&v, localVarBody, localVarHTTPResponse.Header.Get(\"Content-Type\"))\n\t\t\tif err != nil {\n\t\t\t\tnewErr.error = err.Error()\n\t\t\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t\t\t}\n\t\t\tnewErr.model = v\n\t\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t\t}\n\t\tif localVarHTTPResponse.StatusCode == 504 {\n\t\t\tvar v Message\n\t\t\terr = a.client.decode(&v, localVarBody, localVarHTTPResponse.Header.Get(\"Content-Type\"))\n\t\t\tif err != nil {\n\t\t\t\tnewErr.error = err.Error()\n\t\t\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t\t\t}\n\t\t\tnewErr.model = v\n\t\t}\n\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t}\n\n\terr = a.client.decode(&localVarReturnValue, localVarBody, localVarHTTPResponse.Header.Get(\"Content-Type\"))\n\tif err != nil {\n\t\tnewErr := GenericOpenAPIError{\n\t\t\tbody: localVarBody,\n\t\t\terror: err.Error(),\n\t\t}\n\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t}\n\n\treturn localVarReturnValue, localVarHTTPResponse, nil\n}", "func (m OrdersType) Validate(formats strfmt.Registry) error {\n\tvar res []error\n\n\t// value enum\n\tif err := m.validateOrdersTypeEnum(\"\", \"body\", m); err != nil {\n\t\treturn err\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}", "func (m *Marketplace) CreateOrder(ctx context.Context, req *pb.Order) (*pb.Order, error) {\n\tif err := CheckPermissions(ctx, req); err != nil {\n\t\treturn nil, err\n\t}\n\n\tif err := m.validate(req); err != nil {\n\t\treturn nil, err\n\t}\n\n\torder := *req\n\t// generate a unique ID if it's empty\n\tif order.Id == \"\" {\n\t\torder.Id = IDGenerator()\n\t}\n\n\tlogger := ctx_zap.Extract(ctx)\n\tlogger.Info(\"Creating order\", zap.Any(\"order\", order))\n\n\tif err := m.createOrder(order); err != nil {\n\t\treturn nil, status.Errorf(codes.Internal, \"cannot create order: %v\", err)\n\t}\n\n\tlogger.Info(\"Order created\", zap.String(\"id\", order.Id))\n\treturn m.GetOrderByID(ctx, &pb.ID{Id: order.Id})\n}", "func (pu *PharmacistUpdate) AddOrderpharmacist(o ...*Order) *PharmacistUpdate {\n\tids := make([]int, len(o))\n\tfor i := range o {\n\t\tids[i] = o[i].ID\n\t}\n\treturn pu.AddOrderpharmacistIDs(ids...)\n}", "func (o *V3SetErrorOrderInput) GetOrdersOk() (*[]V3OrderIntegrationError, bool) {\n\tif o == nil || o.Orders == nil {\n\t\treturn nil, false\n\t}\n\treturn o.Orders, true\n}", "func orderadd(httpwriter http.ResponseWriter, req *http.Request) {\n\n\t// _, _ = security.ValidateTokenV2(redisclient, req)\n\n\terror, _ := security.ValidateTokenV2(redisclient, req)\n\n\tif error == \"NotOkToLogin\" {\n\t\thttp.Redirect(httpwriter, req, \"/login\", 303)\n\t\treturn\n\t}\n\n\tordershandler.Add(httpwriter, req, redisclient, sysid)\n}", "func (_WyvernExchange *WyvernExchangeCaller) ValidateOrder(opts *bind.CallOpts, addrs [7]common.Address, uints [9]*big.Int, feeMethod uint8, side uint8, saleKind uint8, howToCall uint8, calldata []byte, replacementPattern []byte, staticExtradata []byte, v uint8, r [32]byte, s [32]byte) (bool, error) {\n\tvar (\n\t\tret0 = new(bool)\n\t)\n\tout := ret0\n\terr := _WyvernExchange.contract.Call(opts, out, \"validateOrder_\", addrs, uints, feeMethod, side, saleKind, howToCall, calldata, replacementPattern, staticExtradata, v, r, s)\n\treturn *ret0, err\n}", "func (s *ApiService) GetOrders(ctx context.Context, orderId string) (ordf.ImplResponse, error) {\n\t// TODO: implement long polling on separate polling API\n\t// will need to update SDK to pass in last known state and check for change\n\torder, err := s.ordersService.GetOrder(ctx, orderId)\n\tif err != nil {\n\t\treturn ordf.Response(500, nil), err\n\t}\n\n\treturn ordf.Response(200, order), nil\n}", "func (c *Client) UpdateOrder(orderID string, orderItems []OrderItem) (*Order, error) {\n\trb, err := json.Marshal(orderItems)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treq, err := http.NewRequest(\"PUT\", fmt.Sprintf(\"%s/orders/%s\", c.HostURL, orderID), strings.NewReader(string(rb)))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tbody, err := c.doRequest(req)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\torder := Order{}\n\terr = json.Unmarshal(body, &order)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn &order, nil\n}", "func (t *RedisDb) AddOrderToSet(order *Order) error {\n\t/// use Hash to store order detail\n\tm := map[string]string{\n\t\t\"ID\": strconv.FormatInt(order.ID, 10),\n\t\t\"Who\": order.Who,\n\t\t\"AorB\": strconv.FormatInt(int64(order.AorB), 10),\n\t\t\"Symbol\": order.Symbol,\n\t\t\"Timestamp\": strconv.FormatInt(order.Timestamp, 10),\n\t\t\"Price\": strconv.FormatFloat(order.Price, 'f', -1, 64),\n\t\t\"Volume\": strconv.FormatFloat(order.Volume, 'f', -1, 64),\n\t\t\"TotalVolume\": strconv.FormatFloat(order.TotalVolume, 'f', -1, 64),\n\t\t\"Fee\": strconv.FormatFloat(order.Fee, 'f', -1, 64),\n\t\t\"Status\": strconv.FormatInt(int64(order.Status), 10),\n\t}\n\n\terr := t.Send(\"MULTI\")\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\terr = t.Send(\"SADD\", orderSetKey(order.Symbol), orderHashKey(order.Who, order.ID))\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\terr = t.Send(\"HMSET\", redis.Args{}.Add(orderHashKey(order.Who, order.ID)).AddFlat(m)...)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\t_, err = t.Do(\"EXEC\")\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\treturn nil\n}", "func (o *Item) AddOrderItemsG(ctx context.Context, insert bool, related ...*OrderItem) error {\n\treturn o.AddOrderItems(ctx, boil.GetContextDB(), insert, related...)\n}", "func AddRoutes(e *echo.Echo) {\n\te.GET(\"/\", func(ctx echo.Context) error { return ctx.JSON(http.StatusOK, Heartbeat()) })\n\te.POST(\"/order\", handlers.CreateOrder)\n\te.PUT(\"/order/:id\", handlers.UpdateOrder)\n\te.PATCH(\"/order/:id\", handlers.UpdateOrder)\n\te.GET(\"/orders\", handlers.ListOrders)\n}", "func (s *Server) CreateOrder(ctx context.Context, in *orderPb.CreateOrderRequest) (*orderPb.CreateOrderResponse, error) {\n\tt := time.Now()\n\trpcRequestCount.With(prometheus.Labels{\"method\": \"CreateOrderTotal\"}).Inc()\n\n\t// Check input params.\n\taddress := in.GetAddress()\n\trequestId := in.GetRequestId()\n\tfileName := in.GetFileName()\n\tfileSize := in.GetFileSize()\n\tif address == \"\" || requestId == \"\" || fileName == \"\" || fileSize <= 0 {\n\t\trpcRequestCount.With(prometheus.Labels{\"method\": \"CreateOrderFailed\"}).Inc()\n\t\treturn nil, errorm.RequestParamEmpty\n\t}\n\n\tdefer func(t time.Time) {\n\t\tdefer rpcRequestDuration.With(prometheus.Labels{\"method\": \"CreateOrder\"}).Observe(float64(time.Since(t).Microseconds()) / 1000)\n\t}(t)\n\n\t// Create order by address, requestId, fileName and fileSize.\n\tid, err := s.CreateOrderController(address, requestId, fileName, fileSize)\n\tif err != nil {\n\t\trpcRequestCount.With(prometheus.Labels{\"method\": \"CreateOrderError\"}).Inc()\n\t\treturn nil, err\n\t}\n\n\trpcRequestCount.With(prometheus.Labels{\"method\": \"CreateOrderSuccess\"}).Inc()\n\treturn &orderPb.CreateOrderResponse{OrderId: *id, SaveDays: int64(s.Time)}, nil\n}", "func (ot *OrderTable) AddJSONOrderFromHTTP(w http.ResponseWriter, r *http.Request) {\n\tvar oj orderJSON\n\t// parse form from request\n\terr := r.ParseForm()\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t\treturn\n\t}\n\t// deserialise the JSON into a struct\n\tdecoder := json.NewDecoder(r.Body)\n\terr = decoder.Decode(&oj)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\t// add order to table\n\taddOrderToTable(newOrderFromJSON(&oj), ot)\n}", "func (s *Store) CreateOrder(o *models.Order) (po *models.Order, err error) {\n\tc := s.orders // Basic Validation\n\n\to.Time = primitive.NewDateTimeFromTime(time.Now())\n\n\tif err = o.Validate(); err != nil {\n\t\treturn po, err\n\t}\n\n\tif err = s.processOrder(o); err != nil {\n\t\treturn po, err\n\t}\n\n\tif id, err := s.w.Add(c, o); err != nil {\n\t\treturn nil, err\n\t} else if o.ID, err = primitive.ObjectIDFromHex(id); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn o, err\n}", "func UpdateGlobalOrders(receivedOrder Order) {\n\n\tif receivedOrder.OrderHandledAtFloor {\n\t\n\t\tglobalOrders[receivedOrder.Floor] = false\n\t\tglobalOrders[N_FLOORS-2 + receivedOrder.Floor] = false\n\t\t\n\t\tif (receivedOrder.Floor < N_FLOORS-1) {\n\t\t\tElev_set_button_lamp(BUTTON_CALL_UP, receivedOrder.Floor, 0)\n\t\t}\n\t\tif (receivedOrder.Floor > 0) {\n\t\t\tElev_set_button_lamp(BUTTON_CALL_DOWN, receivedOrder.Floor, 0)\n\t\t}\n\t\t\n\t} else {\n\t\n\t\tif receivedOrder.Direction == 1 {\n\t\t\tglobalOrders[receivedOrder.Floor] = true\n\t\t\tElev_set_button_lamp(BUTTON_CALL_UP, receivedOrder.Floor, 1)\n\t\t} else if receivedOrder.Direction == 0 {\n\t\t\tglobalOrders[N_FLOORS-2 + receivedOrder.Floor] = true\n\t\t\tElev_set_button_lamp(BUTTON_CALL_DOWN, receivedOrder.Floor, 1)\n\t\t} else {\n\t\t\tprintln(\"Not valid direction, or unvalid floor\")\n\t\t}\n\t\t\n\t}\n}", "func (rrq *ReserveRoomQuery) Order(o ...OrderFunc) *ReserveRoomQuery {\n\trrq.order = append(rrq.order, o...)\n\treturn rrq\n}", "func (trading *TradingProvider) Orders(symbols []schemas.Symbol) (orders []schemas.Order, err error) {\n\tif len(symbols) > 0 {\n\t\tfor _, symb := range symbols {\n\t\t\tordrs, err := trading.ordersBySymbol(symb.OriginalName)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\torders = append(orders, ordrs...)\n\t\t}\n\t\treturn\n\t}\n\n\treturn trading.allOrders()\n}", "func updateTaskOrders(c *gin.Context) {\r\n\treq, serr := getUpdateTaskOrdersRequest(c)\r\n\tif serr != nil {\r\n\t\tapi.SetErrorStatus(c, serr)\r\n\t\treturn\r\n\t}\r\n\ttx := orm.GetDB().Begin()\r\n\tsrvc := service.NewTaskService(tx)\r\n\tserr = srvc.UpdateTaskOrders(\r\n\t\treq.TaskID, req.FromBoardID, req.FromDispOrder, req.ToBoardID, req.ToDispOrder,\r\n\t)\r\n\tif serr != nil {\r\n\t\tapi.Rollback(tx)\r\n\t\tapi.SetErrorStatus(c, serr)\r\n\t\treturn\r\n\t}\r\n\tserr = api.Commit(tx)\r\n\tif serr != nil {\r\n\t\tapi.SetErrorStatus(c, serr)\r\n\t\treturn\r\n\t}\r\n\tc.Status(http.StatusOK)\r\n}", "func (c *OrderClient) Use(hooks ...Hook) {\n\tc.hooks.Order = append(c.hooks.Order, hooks...)\n}", "func (puo *PharmacistUpdateOne) AddOrderpharmacist(o ...*Order) *PharmacistUpdateOne {\n\tids := make([]int, len(o))\n\tfor i := range o {\n\t\tids[i] = o[i].ID\n\t}\n\treturn puo.AddOrderpharmacistIDs(ids...)\n}", "func (gc grpcClient) createOrder(order order.Order) error {\n\tconn, err := grpc.Dial(grpcUri, grpc.WithInsecure())\n\tif err != nil {\n\t\tlog.Fatalf(\"Unable to connect: %v\", err)\n\t}\n\tdefer conn.Close()\n\tclient := eventstore.NewEventStoreClient(conn)\n\torderJSON, _ := json.Marshal(order)\n\n\teventid, _ := uuid.NewUUID()\n\tevent := &eventstore.Event{\n\t\tEventId: eventid.String(),\n\t\tEventType: event,\n\t\tAggregateId: order.ID,\n\t\tAggregateType: aggregate,\n\t\tEventData: string(orderJSON),\n\t\tStream: \"ORDERS\",\n\t}\n\n\tcreateEventRequest := &eventstore.CreateEventRequest{Event: event}\n\tresp, err := client.CreateEvent(context.Background(), createEventRequest)\n\tif err != nil {\n\t\tif st, ok := status.FromError(err); ok {\n\t\t\treturn fmt.Errorf(\"error from RPC server with: status code:%s message:%s\", st.Code().String(), st.Message())\n\t\t}\n\t\treturn fmt.Errorf(\"error from RPC server: %w\", err)\n\t}\n\tif resp.IsSuccess {\n\t\treturn nil\n\t}\n\treturn errors.New(\"error from RPC server\")\n}", "func NewListOrdersRequestBody(p *restapi.ListOrdersPayload) *ListOrdersRequestBody {\n\tbody := &ListOrdersRequestBody{\n\t\tStatus: p.Status,\n\t\tCollection: p.Collection,\n\t\tTradePairHash: p.TradePairHash,\n\t\tSortByVdf: p.SortByVdf,\n\t\tLimit: p.Limit,\n\t}\n\treturn body\n}", "func (rq *ReceiptQuery) Order(o ...OrderFunc) *ReceiptQuery {\n\trq.order = append(rq.order, o...)\n\treturn rq\n}", "func (a *App) CreateOrder(o *model.Order, shipAddr *model.Address, billAddr *model.Address) (*model.Order, *model.AppErr) {\n\to.PreSave()\n\treturn a.Srv().Store.Order().Save(o, shipAddr, billAddr)\n}", "func GetOrders(c *gin.Context) {\n\tid := c.Params.ByName(\"id\")\n\n\tif id == \"\" {\n\t\terrors.ErrRequiredParam(c.Writer, http.StatusBadRequest, \"order id is required\")\n\t\treturn\n\t}\n\n\torder, err := s.client.GetOrder(id)\n\tif err != nil {\n\t\ts.l.Printf(\"failed to request order information: %s\\n\", err)\n\t\treturn\n\t}\n\n\tmodels.Respond(c.Writer, order)\n\treturn\n}", "func (ouo *OrderUpdateOne) AddCustomerIDs(ids ...int) *OrderUpdateOne {\n\touo.mutation.AddCustomerIDs(ids...)\n\treturn ouo\n}", "func OrdersHandler(c buffalo.Context) error {\n\ttx := c.Value(\"tx\").(*pop.Connection)\n\n\tfilters := ordersFilters{\n\t\tStartDate: time.Now().AddDate(0, -1, 0),\n\t\tEndDate: time.Now(),\n\t}\n\n\tif err := c.Bind(&filters); err != nil {\n\t\treturn errors.Wrap(err, \"failed to bind arguments\")\n\t}\n\n\tfmt.Printf(\"params: %+v\\n\", c.Params())\n\tfmt.Printf(\"filters(after bind): %+v\\n\", filters)\n\n\t// Reset times so we do not get results based on current time\n\tfilters.StartDate = time.Date(\n\t\tfilters.StartDate.Year(),\n\t\tfilters.StartDate.Month(),\n\t\tfilters.StartDate.Day(),\n\t\t0,\n\t\t0,\n\t\t0,\n\t\t0,\n\t\tfilters.StartDate.Location(),\n\t)\n\tfilters.EndDate = time.Date(\n\t\tfilters.EndDate.Year(),\n\t\tfilters.EndDate.Month(),\n\t\tfilters.EndDate.Day(),\n\t\t0,\n\t\t0,\n\t\t0,\n\t\t0,\n\t\tfilters.EndDate.Location(),\n\t)\n\n\torders := models.Orders{}\n\n\tquery := tx.Where(\"date >= ?\", filters.StartDate)\n\tquery = query.Where(\"date < ?\", filters.EndDate.AddDate(0, 0, 1)) // We want to include end of the day, meaning starting of next day\n\terr := query.All(&orders)\n\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"failed to fetch all the orders\")\n\t}\n\n\tc.Set(\"orders\", orders)\n\tc.Set(\"filters\", filters)\n\n\ttotalWithoutVAT := 0.0\n\ttotalWithVAT := 0.0\n\ttotalVAT := 0.0\n\tfor i := range orders {\n\n\t\tfmt.Printf(\"wut %+v\\n\", orders[i])\n\t\tif err := tx.Load(&orders[i], \"Rows\"); err != nil {\n\t\t\treturn errors.Wrap(err, \"could not load order rows\")\n\t\t}\n\t\ttotalWithoutVAT += orders[i].TotalWithoutVAT()\n\t\ttotalWithVAT += orders[i].TotalWithVAT()\n\t\ttotalVAT += orders[i].TotalVAT()\n\t}\n\n\tc.Set(\"totalWithoutVAT\", totalWithoutVAT)\n\tc.Set(\"totalWithVAT\", totalWithVAT)\n\tc.Set(\"totalVAT\", totalVAT)\n\n\treturn c.Render(200, r.HTML(\"orders.html\"))\n}", "func TestAddOrder(t *testing.T) {\n\tt.Parallel()\n\targs := AddOrderOptions{OrderFlags: \"fcib\"}\n\tcp, err := currency.NewPairFromString(\"XXBTZUSD\")\n\tif err != nil {\n\t\tt.Error(err)\n\t}\n\t_, err = k.AddOrder(context.Background(),\n\t\tcp,\n\t\torder.Sell.Lower(), order.Limit.Lower(),\n\t\t0.00000001, 0, 0, 0, &args)\n\tif err == nil {\n\t\tt.Error(\"AddOrder() Expected error\")\n\t}\n}", "func GetOrders(db *sqlx.DB) gin.HandlerFunc {\n\n\treturn func(c *gin.Context) {\n\n\t\tvar user1 User\n\t\tuserName, exists := c.Get(\"user\")\n\t\tif !exists {\n\t\t\tc.AbortWithError(503, errors.NewAPIError(503, \"failed to get user\", \"internal server error\", c))\n\t\t\treturn\n\t\t}\n\n\t\tdbErr := db.Get(&user1, \"SELECT * FROM gaea.user WHERE user_name=$1\", userName)\n\t\tif dbErr != nil {\n\t\t\tc.AbortWithError(503, errors.NewAPIError(503, \"failed to get user\", \"internal server error\", c))\n\t\t\treturn\n\t\t}\n\n\t\tvar memberStatus bool\n\t\tswitch {\n\t\tcase user1.Role == \"nonmember\":\n\t\t\tmemberStatus = false\n\t\tdefault:\n\t\t\tmemberStatus = true\n\t\t}\n\n\t\tvar ords []Order\n\t\tvar retOrds []Order\n\t\tvar qtyOrd int\n\n\t\terr1 := db.Get(&qtyOrd, `SELECT COUNT(*) FROM gaea.order WHERE user_name=$1`,\n\t\t\tuserName)\n\t\tif err1 != nil {\n\t\t\tfmt.Println(err1)\n\t\t\tc.AbortWithError(503, errors.NewAPIError(503, \"failed to get orders\", \"internal server error\", c))\n\t\t\treturn\n\t\t}\n\t\tif qtyOrd > 0 {\n\t\t\terr2 := db.Select(&ords, `SELECT * FROM gaea.order WHERE user_name=$1`,\n\t\t\t\tuserName)\n\t\t\tif err2 != nil {\n\t\t\t\tfmt.Println(err2)\n\t\t\t\tc.AbortWithError(503, errors.NewAPIError(503, \"failed to get orders\", \"internal server error\", c))\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tvar amtErr error\n\n\t\t\tfor _, order := range ords {\n\t\t\t\torder.ItemQty, order.AmountTotal, amtErr = CalcOrderTotals(order.OrderId, memberStatus, db)\n\t\t\t\tif amtErr != nil {\n\t\t\t\t\tfmt.Printf(\"%s\", amtErr)\n\t\t\t\t}\n\t\t\t\tretOrds = append(retOrds, order)\n\t\t\t}\n\t\t}\n\n\t\tc.JSON(200, gin.H{\"qty\": qtyOrd, \"orders\": retOrds})\n\t}\n}", "func GetOrders() (orders []Orders, err error) {\r\n\tvar rows *sql.Rows\r\n\tif rows, err = Get(`select * from orders where deleted_at is null order by created_at desc;`); err != nil {\r\n\t\tCheckError(\"Error getting Orders.\", err, false)\r\n\t\treturn nil, err\r\n\t}\r\n\r\n\tdefer rows.Close()\r\n\tfor rows.Next() {\r\n\t\torder := Orders{}\r\n\t\tif err = rows.Scan(&order.ID, &order.DocEntry, &order.DocNum, &order.Canceled, &order.CardCode, &order.CardName, &order.VatSum, &order.DocTotal, &order.Synced, &order.CreatedBy, &order.CreatedAt, &order.UpdatedAt, &order.DeletedAt, &order.Comment, &order.Returned, &order.DiscountApprovedBy); err != nil {\r\n\t\t\tCheckError(\"Error Scanning Orders.\", err, false)\r\n\t\t} else {\r\n\t\t\torders = append(orders, order)\r\n\t\t}\r\n\t}\r\n\r\n\treturn\r\n}", "func ParseOrders(sort string) []*Order {\n\tif sort == \"\" {\n\t\treturn []*Order{}\n\t}\n\n\torders := make([]*Order, 0)\n\to := sort\n\tfor _i := strings.IndexAny(o, \"+- \"); _i == 0; {\n\t\tcol := \"\"\n\t\t_o := \"\"\n\t\t// 次に + or - が現れる位置を判定\n\t\tif i := strings.IndexAny(o[1:], \"+- \"); i == -1 {\n\t\t\tcol = o[1:]\n\t\t} else {\n\t\t\tcol = o[1 : i+1]\n\t\t\t_o = o[i+1:]\n\t\t}\n\n\t\t// カラム名が空の場合はループを抜ける\n\t\tif col == \"\" {\n\t\t\tbreak\n\t\t}\n\n\t\t// ソート\b条件を設定する\n\t\tvar d Direction\n\t\tif o[0] == '+' || o[0] == ' ' {\n\t\t\td = DirectionAsc\n\t\t} else if o[0] == '-' {\n\t\t\td = DirectionDesc\n\t\t}\n\n\t\torders = append(orders, &Order{d, col})\n\n\t\tif _o == \"\" {\n\t\t\tbreak\n\t\t}\n\t\to = _o\n\t}\n\treturn orders\n}", "func (sq *ServerQuery) Order(o ...OrderFunc) *ServerQuery {\n\tsq.order = append(sq.order, o...)\n\treturn sq\n}", "func (h *Hbdm) HistoryOrders(symbol string, tradeType, orderType, status, create int, pageIndex, pageSize *int) (orders *OrdersResponse, err error) {\n\tpayload := make(map[string]interface{}, 7)\n\tpayload[\"symbol\"] = symbol\n\tpayload[\"trade_type\"] = tradeType\n\tpayload[\"type\"] = orderType\n\tpayload[\"status\"] = status\n\tpayload[\"create_date\"] = create\n\n\tif pageIndex != nil {\n\t\tpayload[\"page_index\"] = *pageIndex\n\t}\n\tif pageSize != nil {\n\t\tpayload[\"page_size\"] = *pageSize\n\t}\n\n\tr, err := h.client.do(\"POST\", \"contract_hisorders\", payload, true)\n\tif err != nil {\n\t\treturn\n\t}\n\n\tvar response interface{}\n\tif err = json.Unmarshal(r, &response); err != nil {\n\t\treturn\n\t}\n\n\tif err = handleErr(response); err != nil {\n\t\treturn\n\t}\n\n\terr = json.Unmarshal(r, &orders)\n\treturn\n}", "func (c *Client) BuildOrdersRequest(ctx context.Context, v interface{}) (*http.Request, error) {\n\tu := &url.URL{Scheme: c.scheme, Host: c.host, Path: OrdersRelayerAPIPath()}\n\treq, err := http.NewRequest(\"GET\", u.String(), nil)\n\tif err != nil {\n\t\treturn nil, goahttp.ErrInvalidURL(\"RelayerAPI\", \"orders\", u.String(), err)\n\t}\n\tif ctx != nil {\n\t\treq = req.WithContext(ctx)\n\t}\n\n\treturn req, nil\n}", "func (svc *svc) ListOrders(ctx context.Context, query model.OrderQuery) ([]model.Order, int64, error) {\n\torders, err := svc.repo.ListOrders(ctx, query)\n\tif err != nil {\n\t\treturn nil, 0, err\n\t}\n\n\ttotal, err := svc.repo.CountOrders(ctx, query)\n\tif err != nil {\n\t\treturn nil, 0, err\n\t}\n\n\treturn orders, total, nil\n}", "func (db *queueDatabase) addToOrderIndex(m *persistence.QueueMessage) {\n\t// Find the index in the ordered queue where the message belongs. It's the\n\t// index of the first message that has a NextAttemptedAt greater than\n\t// that of the new message.\n\tindex := sort.Search(\n\t\tlen(db.order),\n\t\tfunc(i int) bool {\n\t\t\treturn m.NextAttemptAt.Before(\n\t\t\t\tdb.order[i].NextAttemptAt,\n\t\t\t)\n\t\t},\n\t)\n\n\t// Expand the size of the queue.\n\tdb.order = append(db.order, nil)\n\n\t// Shift lower-priority messages further back to make space for the new one.\n\tcopy(db.order[index+1:], db.order[index:])\n\n\t// Insert the new message at it's sorted index.\n\tdb.order[index] = m\n}", "func (s *Server) UpdateOrder(ctx context.Context, in *orderPb.UpdateOrderRequest) (*orderPb.Null, error) {\n\tt := time.Now()\n\trpcRequestCount.With(prometheus.Labels{\"method\": \"UpdateOrderTotal\"}).Inc()\n\n\t// Check input params.\n\tfileHash := in.GetFileHash()\n\tsessionId := in.GetSessionId()\n\tnodeIp := in.GetNodeIp()\n\torderId := in.GetOrderId()\n\tif fileHash == \"\" || sessionId == \"\" || nodeIp == \"\" || orderId == 0 {\n\t\trpcRequestCount.With(prometheus.Labels{\"method\": \"UpdateOrderFailed\"}).Inc()\n\t\treturn nil, errorm.RequestParamEmpty\n\t}\n\n\tdefer func(t time.Time) {\n\t\tdefer rpcRequestDuration.With(prometheus.Labels{\"method\": \"UpdateOrder\"}).Observe(float64(time.Since(t).Microseconds()) / 1000)\n\t}(t)\n\n\t// Update order by fileHash, sessionId, nodeIp and orderId.\n\terr := s.UpdateOrderController(fileHash, sessionId, nodeIp, orderId)\n\tif err != nil {\n\t\trpcRequestCount.With(prometheus.Labels{\"method\": \"UpdateOrderError\"}).Inc()\n\t\treturn nil, err\n\t}\n\n\trpcRequestCount.With(prometheus.Labels{\"method\": \"UpdateOrderSuccess\"}).Inc()\n\treturn &orderPb.Null{}, nil\n}" ]
[ "0.8132826", "0.7341831", "0.6482184", "0.64495546", "0.6072182", "0.60590833", "0.6049601", "0.6042386", "0.5945999", "0.5942374", "0.5904764", "0.5832294", "0.58172303", "0.57699704", "0.5737549", "0.57374513", "0.5730064", "0.5726372", "0.5662563", "0.56367064", "0.56074196", "0.5596845", "0.5575592", "0.5547106", "0.55414325", "0.54952633", "0.5423503", "0.5401132", "0.53992605", "0.53917754", "0.53834456", "0.53820443", "0.5310311", "0.52806044", "0.5278413", "0.5272337", "0.52604574", "0.52274245", "0.5218854", "0.5206314", "0.5205867", "0.5197996", "0.51702607", "0.5161369", "0.51567686", "0.5155367", "0.51320314", "0.5127299", "0.5126089", "0.51204795", "0.5115652", "0.51122034", "0.5072472", "0.5065326", "0.5055864", "0.5030511", "0.50258327", "0.5006663", "0.4976488", "0.49625587", "0.49489784", "0.4929671", "0.49258882", "0.49250355", "0.49138355", "0.49103957", "0.4910106", "0.49088594", "0.49051225", "0.48923957", "0.48917332", "0.48481536", "0.48418725", "0.4838625", "0.48366904", "0.48284402", "0.48281682", "0.48242566", "0.48242426", "0.48226628", "0.4821663", "0.48216605", "0.48186252", "0.48124072", "0.48105955", "0.4787828", "0.4779204", "0.47755122", "0.47734475", "0.47704363", "0.4764705", "0.47641551", "0.47613007", "0.4759636", "0.47420812", "0.47321975", "0.47173935", "0.4711652", "0.47010675", "0.4688823" ]
0.7472578
1
AddPeer can be used to manually connect to a new peer.
func (app *App) AddPeer(peerInfo peerstore.PeerInfo) error { ctx, cancel := context.WithTimeout(context.Background(), peerConnectTimeout) defer cancel() return app.node.Connect(ctx, peerInfo) }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func AddPeer(w http.ResponseWriter, r *http.Request) {\n\t// Connect to the peer\n\tvar newPeers peerStr\n\n\terr := json.NewDecoder(r.Body).Decode(&newPeers)\n\tif err != nil {\n\t\tlog.Println(\"AddPeer: could not decode peer\")\n\t}\n\tlog.Println(newPeers)\n\tlog.Printf(\"AddPeer: adding=%s\", newPeers.Peer)\n\n\tConnectToPeers(newPeers.Peer)\n}", "func (g *Gossiper) AddPeer(peerAddr *(net.UDPAddr)) {\n\tpeerConn, err := net.DialUDP(\"udp4\", nil, peerAddr)\n\tif err != nil {\n\t\tfmt.Printf(\"Error: could not connect to given peer: %v\", err)\n\t\tos.Exit(-1)\n\t}\n\tg.peers.AddPeer(peerAddr.String(), peerConn)\n\n\tg.newNodes = append(g.newNodes, peerAddr.String())\n}", "func (c *Core) AddPeer(addr network.Address) error {\n\treturn c.server.AddPeer(addr)\n}", "func (s *KVRaftService) AddPeer(peer string) {\n\trpcCmd := RpcCmd{\n\t\tOp: CmdJoin,\n\t\tValue: []byte(peer),\n\t}\n\trpcAddr := fmt.Sprintf(\"%s:%s\", ShCache.LeaderRpcAddr, ShCache.LeaderRpcPort)\n\ts.log.Println(\"[Join Raft RPC] rpcAddr:\", rpcAddr)\n\tdoJoin(rpcCmd, rpcAddr)\n\n}", "func (gossiper *Gossiper) AddPeer(address string) {\n\tgossiper.peerMutex.Lock()\n\tgossiper.Peers = append(gossiper.Peers, address)\n\tgossiper.peerMutex.Unlock()\n\tgossiper.statusWaiting.Store(address, make(chan *messages.StatusPacket))\n\tgossiper.expected.Store(address, make(chan bool))\n}", "func (s *server) AddPeer(name string, connectiongString string) error {\n\ts.debugln(\"server.peer.add: \", name, len(s.peers))\n\n\t// Do not allow peers to be added twice.\n\tif s.peers[name] != nil {\n\t\treturn nil\n\t}\n\n\t// Skip the Peer if it has the same name as the Server\n\tif s.name != name {\n\t\tpeer := newPeer(s, name, connectiongString, s.heartbeatInterval)\n\n\t\tif s.State() == Leader {\n\t\t\tpeer.startHeartbeat()\n\t\t}\n\n\t\ts.peers[peer.Name] = peer\n\n\t\t//s.DispatchEvent(newEvent(AddPeerEventType, name, nil))\n\t}\n\n\t// Write the configuration to file.\n\t//s.writeConf()\n\n\treturn nil\n}", "func (pr *PlumtreeRouter) AddPeer(pid peer.ID, ptoid protocol.ID) {\n\tpr.log.Debugf(\"PEERUP: Add new peer %s using %s\", pid, ptoid)\n\tpr.peers[pid] = ptoid\n}", "func (m *Manager) AddPeer(peer *peer.Peer) error {\n\tm.mutex.Lock()\n\tdefer m.mutex.Unlock()\n\tif !peer.IsAllowed() {\n\t\terrr := errors.New(\"denied\")\n\t\tlog.Println(errr)\n\t\treturn errr\n\t}\n\tif !peer.IsAllowed() || !m.peers.Add(peer) {\n\t\treturn nil\n\t}\n\treturn m.self.WritePeers(m.peers)\n}", "func (rs *ReactorShim) AddPeer(peer Peer) {\n\tpeerID, err := PeerIDFromString(string(peer.ID()))\n\tif err != nil {\n\t\trs.Logger.Error(\"failed to add peer\", \"peer\", peer.ID(), \"err\", err)\n\t\treturn\n\t}\n\n\tselect {\n\tcase rs.PeerUpdates.updatesCh <- PeerUpdate{PeerID: peerID, Status: PeerStatusUp}:\n\t\trs.Logger.Debug(\"sent peer update\", \"reactor\", rs.Name, \"peer\", peerID.String(), \"status\", PeerStatusUp)\n\n\tcase <-rs.PeerUpdates.Done():\n\t\t// NOTE: We explicitly DO NOT close the PeerUpdatesCh's updateCh go channel.\n\t\t// This is because there may be numerous spawned goroutines that are\n\t\t// attempting to send on the updateCh go channel and when the reactor stops\n\t\t// we do not want to preemptively close the channel as that could result in\n\t\t// panics sending on a closed channel. This also means that reactors MUST\n\t\t// be certain there are NO listeners on the updateCh channel when closing or\n\t\t// stopping.\n\t}\n}", "func (bcR *BlockchainReactor) AddPeer(peer p2p.Peer) {\n\tp2p.SendEnvelopeShim(peer, p2p.Envelope{ //nolint: staticcheck\n\t\tChannelID: BlockchainChannel,\n\t\tMessage: &bcproto.StatusResponse{\n\t\t\tBase: bcR.store.Base(),\n\t\t\tHeight: bcR.store.Height(),\n\t\t},\n\t}, bcR.Logger)\n\t// it's OK if send fails. will try later in poolRoutine\n\n\t// peer is added to the pool once we receive the first\n\t// bcStatusResponseMessage from the peer and call pool.updatePeer()\n}", "func (g *Gateway) addPeer(p *peer) {\n\tg.peers[p.addr] = p\n\tg.addNode(p.addr)\n\tgo g.listenPeer(p)\n}", "func (c *Cluster) AddPeer(regionID, storeID, peerID uint64) {\n\tc.Lock()\n\tdefer c.Unlock()\n\n\tc.regions[regionID].addPeer(peerID, storeID)\n}", "func (epR *EventpoolReactor) AddPeer(peer p2p.Peer) {\n\tepR.ids.ReserveForPeer(peer)\n\tgo epR.broadcastEventsRoutine(peer)\n}", "func (s *server) addPeer(p *peer) {\n\tif p == nil {\n\t\treturn\n\t}\n\n\t// Ignore new peers if we're shutting down.\n\tif atomic.LoadInt32(&s.shutdown) != 0 {\n\t\tp.Disconnect()\n\t\treturn\n\t}\n\n\t// Track the new peer in our indexes so we can quickly look it up either\n\t// according to its public key, or it's peer ID.\n\t// TODO(roasbeef): pipe all requests through to the\n\t// queryHandler/peerManager\n\ts.peersMtx.Lock()\n\n\tpubStr := string(p.addr.IdentityKey.SerializeCompressed())\n\n\ts.peersByID[p.id] = p\n\ts.peersByPub[pubStr] = p\n\n\tif p.inbound {\n\t\ts.inboundPeers[pubStr] = p\n\t} else {\n\t\ts.outboundPeers[pubStr] = p\n\t}\n\n\ts.peersMtx.Unlock()\n\n\t// Launch a goroutine to watch for the termination of this peer so we\n\t// can ensure all resources are properly cleaned up and if need be\n\t// connections are re-established.\n\tgo s.peerTerminationWatcher(p)\n\n\t// Once the peer has been added to our indexes, send a message to the\n\t// channel router so we can synchronize our view of the channel graph\n\t// with this new peer.\n\tgo s.discoverSrv.SynchronizeNode(p.addr.IdentityKey)\n}", "func addPeer(rid proto.RID, peer *peer) {\n\tlog.Infof(\"AddPeer rid=%s uid=%s\", rid, peer.uid)\n\troom := getRoom(rid)\n\tif room == nil {\n\t\troom = newRoom(rid)\n\t}\n\troom.addPeer(peer)\n}", "func (handler *rpcHandler) AddPeer(peerInfo peerstore.PeerInfo) error {\n\tlog.Debug(\"received AddPeer request via RPC\")\n\tif err := handler.app.AddPeer(peerInfo); err != nil {\n\t\tlog.WithField(\"error\", err.Error()).Error(\"internal error in AddPeer RPC call\")\n\t\treturn errInternal\n\t}\n\treturn nil\n}", "func (r *room) addPeer(p *peer) {\n\tr.Lock()\n\tdefer r.Unlock()\n\tr.peers[p.uid] = p\n}", "func (r *room) addPeer(p *Peer) {\n\tr.Lock()\n\tdefer r.Unlock()\n\tr.peers[p.uid] = p\n}", "func addPeer(peer Peer) {\n\t// iterate to find empty slot\n\tpeerListLock.Lock()\n\tfor i := 0; i < MaxPeers; i++ {\n\t\t// if peer IS dead\n\t\tif peerList[i].expirationTimer == 0 {\n\t\t\t// ensure proper port by generating again\n\t\t\tpeer.addr.Port = listeningPort\n\n\t\t\t// ensure expiration timer is reset\n\t\t\tpeer.expirationTimer = expirationDefault\n\n\t\t\t// put new peer in that slot and return\n\t\t\tpeerList[i] = peer\n\t\t\tpeerListLock.Unlock()\n\t\t\treturn\n\t\t}\n\t}\n\tpeerListLock.Unlock()\n}", "func (pm *ConfigManager) AddPeer(multiAddress multiaddr.Multiaddr, alias string) error {\n\tpm.peersLock.Lock()\n\tdefer pm.peersLock.Unlock()\n\n\tnewPeerAddrInfo, err := peer.AddrInfoFromP2pAddr(multiAddress)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tfor _, p := range pm.peers {\n\t\tmultiAddr, err := multiaddr.NewMultiaddr(p.MultiAddress)\n\t\tif err != nil {\n\t\t\t// ignore wrong values in the config file\n\t\t\tcontinue\n\t\t}\n\n\t\taddrInfo, err := peer.AddrInfoFromP2pAddr(multiAddr)\n\t\tif err != nil {\n\t\t\t// ignore wrong values in the config file\n\t\t\tcontinue\n\t\t}\n\n\t\tif addrInfo.ID == newPeerAddrInfo.ID {\n\t\t\treturn errors.New(\"peer already exists\")\n\t\t}\n\t}\n\n\t// no peer with the same ID found, add the new one\n\tpm.peers = append(pm.peers, &PeerConfig{\n\t\tMultiAddress: multiAddress.String(),\n\t\tAlias: alias,\n\t})\n\n\treturn pm.Store()\n}", "func (s *schedule) AddPeer(id raft.ServerID) {\n\ts.peers = append(s.peers, id)\n\ts.occurred = append(s.occurred, false)\n}", "func (r *remoteRaft) addPeer(addr string) error {\n\treturn fmt.Errorf(\"cannot add peer using remote raft\")\n}", "func (c *Client) AddPeerToPeerStore(peerAddr string) (peer.ID, error) {\n\t// generate a multiformat address to connect to\n\t// /ip4/192.168.1.101/tcp/9999/ipfs/QmbtKadk9x6s56Wh226Wu84ZUc7xEe7AFgvm9bYUbrENDM\n\tipfsaddr, err := ma.NewMultiaddr(peerAddr)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\t// extract the ipfs peer id for the node\n\t// QmbtKadk9x6s56Wh226Wu84ZUc7xEe7AFgvm9bYUbrENDM\n\tpid, err := ipfsaddr.ValueForProtocol(ma.P_IPFS)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\t// decode the peerid\n\t// <peer.ID Qm*brENDM>\n\tpeerid, err := peer.IDB58Decode(pid)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\t// generate an ipfs based peer address address that we connect to\n\t// /ipfs/QmbtKadk9x6s56Wh226Wu84ZUc7xEe7AFgvm9bYUbrENDM\n\ttargetPeerAddr, err := ma.NewMultiaddr(\n\t\tfmt.Sprintf(\"/ipfs/%s\", pid),\n\t)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\t// generate a basic multiformat ip address to connect to\n\t// /ip4/192.168.1.101/tcp/9999\n\ttargetAddr := ipfsaddr.Decapsulate(targetPeerAddr)\n\t// add a properly formatted libp2p address to connect to\n\tc.Host.Peerstore().AddAddr(\n\t\tpeerid, targetAddr, pstore.PermanentAddrTTL,\n\t)\n\treturn peerid, nil\n}", "func (state *State) AddPeer(address string) bool {\n\tstate.lock_peers.Lock()\n\tdefer state.lock_peers.Unlock()\n\tif _, ok := state.known_peers[address]; ok || address == \"\" {\n\t\treturn false\n\t} else {\n\t\tpeer, err := NewPeer(address)\n\t\tif err == nil {\n\t\t\tstate.known_peers[address] = peer\n\t\t\tstate.list_peers = append(state.list_peers, address)\n\t\t\tfor _, c := range state.addPeerChannels {\n\t\t\t\tc <- address\n\t\t\t}\n\t\t}\n\t\treturn true\n\t}\n}", "func TestAddPeer(t *testing.T) {\n\tif testing.Short() {\n\t\tt.SkipNow()\n\t}\n\tt.Parallel()\n\tg := newTestingGateway(t)\n\tdefer g.Close()\n\n\tg.mu.Lock()\n\tdefer g.mu.Unlock()\n\tg.addPeer(&peer{\n\t\tPeer: modules.Peer{\n\t\t\tNetAddress: \"foo.com:123\",\n\t\t},\n\t\tsess: newClientStream(new(dummyConn), build.Version),\n\t})\n\tif len(g.peers) != 1 {\n\t\tt.Fatal(\"gateway did not add peer\")\n\t}\n}", "func (s *PendingConnections) Add(peer pex.Peer) {\n\ts.Lock()\n\tdefer s.Unlock()\n\ts.value[peer.Addr] = peer\n}", "func (r *Raft) AddPeer(peer net.Addr) ApplyDefer {\n\tdeferLog := &DeferLog{\n\t\tlog: Log{\n\t\t\tType: LogAddPeer,\n\t\t\tData: r.trans.EncodePeer(peer),\n\t\t},\n\t\tDeferError: DeferError{errCh: make(chan error, 1)},\n\t}\n\tselect{\n\tcase r.applyCh <- deferLog:\n\t\treturn deferLog\n\tcase <-r.shutdownCh:\n\t\treturn &DeferError{err: ErrRaftShutdown}\n\t}\n\n}", "func (*GenericFramework) PeerAdded(ctx *PeerContext) {}", "func (y *YggdrasilAdminAPI) AddPeer(uri string) ([]string, error) {\n\treq := fmt.Sprintf(`{\"keepalive\":true, \"request\":\"addpeer\", \"uri\":\"%s\"}`, uri)\n\tresp, err := y.execReq(req)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tfmt.Println(string(resp.Response))\n\tadded := struct {\n\t\tAdded []string `json:\"added\"`\n\t}{}\n\tif err := json.Unmarshal(resp.Response, &added); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn added.Added, nil\n}", "func (ps *PeerStore) Add(p *Peer) {\n\tps.lock.Lock()\n\tdefer ps.lock.Unlock()\n\tps.peers[p.ListenAddr] = p\n}", "func (r *localRaft) addPeer(addr string) error {\n\tpeers, err := r.peerStore.Peers()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif len(peers) >= 3 {\n\t\treturn nil\n\t}\n\n\tif fut := r.raft.AddPeer(addr); fut.Error() != nil {\n\t\treturn fut.Error()\n\t}\n\treturn nil\n}", "func (peer *Peer) Add(db *sql.DB) {\n\ttx, err := db.Begin()\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\tstmt, err := tx.Prepare(\"insert or replace into peer(ip, port, lastSeen) values(?, ?, ?)\")\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tdefer stmt.Close()\n\n\t_, err = stmt.Exec(peer.IP, peer.Port, peer.LastSeen)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\ttx.Commit()\n}", "func (n *bftnode) Add(addr string) error {\n\treturn n.Bn.AddPeer(addr)\n}", "func (mr *MockPoolMockRecorder) AddPeer(arg0, arg1 interface{}) *gomock.Call {\n\tmr.mock.ctrl.T.Helper()\n\treturn mr.mock.ctrl.RecordCallWithMethodType(mr.mock, \"AddPeer\", reflect.TypeOf((*MockPool)(nil).AddPeer), arg0, arg1)\n}", "func AddPeerDriver(name string, driver PeerDriver) {\n\tpeerDriversMutex.Lock()\n\tdefer peerDriversMutex.Unlock()\n\tpeerDrivers[name] = driver\n\tlog.Debugf(\"Registered peer storage driver: %s\", name)\n}", "func(peers *PeerList) Add(addr string, id int32) {\n\tpeers.mux.Lock()\n\tdefer peers.mux.Unlock()\n\tpeers.peerMap[addr] = id\n\tfmt.Println(\"After adding:\", peers.peerMap)\n}", "func (ps *PeerSet) Add(peer *Peer) error {\n\tps.mtx.Lock()\n\tdefer ps.mtx.Unlock()\n\tif ps.lookup[peer.Key] != nil {\n\t\treturn ErrSwitchDuplicatePeer\n\t}\n\n\t// ensure we havent maxed out connections for the peer's IP range yet\n\t// and update the IP range counters\n\tif !ps.incrIPRangeCounts(peer.Host) {\n\t\treturn ErrSwitchMaxPeersPerIPRange\n\t}\n\n\tindex := len(ps.list)\n\t// Appending is safe even with other goroutines\n\t// iterating over the ps.list slice.\n\tps.list = append(ps.list, peer)\n\tps.lookup[peer.Key] = &peerSetItem{peer, index}\n\treturn nil\n}", "func AddMember(client *clientv3.Client, peerAddrs []string) (*clientv3.MemberAddResponse, error) {\n\tctx, cancel := context.WithTimeout(client.Ctx(), DefaultRequestTimeout)\n\tdefer cancel()\n\treturn client.MemberAdd(ctx, peerAddrs)\n}", "func (p *peer) AddPeersPeers() {\n\tdefer p.ms.Done() // it's a goroutine\n\n\tif strings.Index(p.Url, getPeersUrl) < 0 {\n\t\tlog.Println(\"Error: you can only addpeers with a\", getPeersUrl, \"request\")\n\t\treturn\n\t}\n\n\t////\n\t// Get remote meshping server publich state. This may take a while!\n\t// That's why this is a goroutine...\n\trm, err := FetchRemotePeer(p.Url, p.PeerIP)\n\tif err != nil {\n\t\treturn // FetchRemotePeer reported to log(stderr) already\n\t}\n\n\tif p.ms.Verbose() > 2 {\n\t\tlog.Println(\"Got a remote server's state:\")\n\t\tenc := json.NewEncoder(os.Stdout)\n\t\tenc.SetIndent(\"\", \" \")\n\t\t// No need to take a lock on the mutex\n\t\tif err := enc.Encode(rm); err != nil {\n\t\t\tclient.LogSentry(sentry.LevelWarning, \"Error converting remote state to json: %s\", err)\n\t\t}\n\t}\n\n\tfor _, rmp := range rm.Peers {\n\t\turl := rmp.Url\n\t\tip := rmp.PeerIP\n\t\tpeer := p.ms.FindPeer(url, ip)\n\t\tif peer != nil {\n\t\t\tif p.ms.Verbose() > 2 {\n\t\t\t\tlog.Println(\"peer\", url, ip, \"-- PeerAlreadyPresent\")\n\t\t\t}\n\t\t\tcontinue\n\t\t}\n\n\t\tlog.Println(\"adding peer\", url, ip)\n\t\tpeer = p.ms.NewPeer(url, ip, p.Location)\n\t\tpeer.ms.Add() // for the Ping goroutine\n\t\tgo peer.Ping()\n\t}\n}", "func WGAddPeer(iface, pubKey, allowedIPs string) ([]byte, error) {\n\tcmd := exec.Command(\"wg\", \"set\", iface, \"peer\", pubKey, \"allowed-ips\", allowedIPs)\n\treturn cmd.CombinedOutput()\n}", "func (m *Mock) AddPeers(addr ...swarm.Address) {\n\tpanic(\"not implemented\") // TODO: Implement\n}", "func (m *Manager) AddPeers(peers peer.Peers) error {\n\tm.mutex.Lock()\n\tdefer m.mutex.Unlock()\n\tadded := false\n\tfor _, peer := range peers {\n\t\tif !peer.IsAllowed() {\n\t\t\terr := errors.New(\"denied\")\n\t\t\tlog.Println(err)\n\t\t\tcontinue\n\t\t}\n\t\tadded = added || m.peers.Add(peer)\n\t}\n\tif !added {\n\t\treturn nil\n\t}\n\treturn m.self.WritePeers(m.peers)\n}", "func AddOrUpdatePeer(p *Peer) error {\n\tjson, err := json.Marshal(p)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tidStr := p.ID.String()\n\n\tif err := context.Store.Put(peerPrefix+idStr, json, nil); err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}", "func (m *Model) AddNetworkPeer(address string) bool {\n\tm.pMutex.Lock()\n\tdefer m.pMutex.Unlock()\n\n\tvalue, ok := m.peersList[address]\n\tm.peersList[address] = ok && value\n\treturn !ok\n}", "func AddPeerHook(hookPoint boil.HookPoint, peerHook PeerHook) {\n\tswitch hookPoint {\n\tcase boil.BeforeInsertHook:\n\t\tpeerBeforeInsertHooks = append(peerBeforeInsertHooks, peerHook)\n\tcase boil.BeforeUpdateHook:\n\t\tpeerBeforeUpdateHooks = append(peerBeforeUpdateHooks, peerHook)\n\tcase boil.BeforeDeleteHook:\n\t\tpeerBeforeDeleteHooks = append(peerBeforeDeleteHooks, peerHook)\n\tcase boil.BeforeUpsertHook:\n\t\tpeerBeforeUpsertHooks = append(peerBeforeUpsertHooks, peerHook)\n\tcase boil.AfterInsertHook:\n\t\tpeerAfterInsertHooks = append(peerAfterInsertHooks, peerHook)\n\tcase boil.AfterSelectHook:\n\t\tpeerAfterSelectHooks = append(peerAfterSelectHooks, peerHook)\n\tcase boil.AfterUpdateHook:\n\t\tpeerAfterUpdateHooks = append(peerAfterUpdateHooks, peerHook)\n\tcase boil.AfterDeleteHook:\n\t\tpeerAfterDeleteHooks = append(peerAfterDeleteHooks, peerHook)\n\tcase boil.AfterUpsertHook:\n\t\tpeerAfterUpsertHooks = append(peerAfterUpsertHooks, peerHook)\n\t}\n}", "func NewAddPeerCommand() *cobra.Command {\n\tc := &cobra.Command{\n\t\tUse: \"add-peer <region_id> <to_store_id>\",\n\t\tShort: \"add a region peer on specified store\",\n\t\tRun: addPeerCommandFunc,\n\t}\n\treturn c\n}", "func (hpc *HashPeersCache) Add(hash types.Hash32, peer p2p.Peer) {\n\thpc.mu.Lock()\n\tdefer hpc.mu.Unlock()\n\n\thpc.add(hash, peer)\n}", "func (a *Agent) AddPeers(ctx context.Context, p pool.Pool, num int) error {\n\tkind := \"\" // Any kind of node by default\n\tif !a.nodeInfo.IsFullNode {\n\t\t// Non-full-nodes need nodes of the same kind to peer with (light nodes)\n\t\tkind = a.nodeInfo.Kind.String()\n\t}\n\n\tlogger.Printf(\"Requesting more kind=%q peers from pool: %d\", kind, num)\n\tpeerResp, err := p.Peer(ctx, pool.PeerRequest{\n\t\tNum: num,\n\t\tKind: kind,\n\t})\n\tif err != nil && jsonrpc2.IsErrorCode(err, jsonrpc2.ErrCodeInternal) {\n\t\tif strings.HasPrefix(err.Error(), \"no available\") {\n\t\t\treturn ErrNoPeers\n\t\t} else {\n\t\t\t// This can happen if there are some incompatible agents on the\n\t\t\t// pool (e.g. outdated broken vipnode version)\n\t\t\tlogger.Printf(\"AddPeers RPC failed (possibly due to outdated agents on the pool): %s\", err)\n\t\t\t// We can't recover on this end. This should also yield errors on\n\t\t\t// the broken agents' side so hopefully they'll update soon.\n\t\t\treturn ErrNoPeers\n\t\t}\n\t} else if err != nil {\n\t\treturn AgentPoolError{err, \"Failed during pool peer request\"}\n\t}\n\tnodes := peerResp.Peers\n\tlogger.Printf(\"Received %d peer candidates from pool.\", len(nodes))\n\tfor _, node := range nodes {\n\t\tif err := a.EthNode.ConnectPeer(ctx, node.URI); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}", "func (pm *peerManager) addOutboundPeer(meta PeerMeta) bool {\n\taddrString := fmt.Sprintf(\"/ip4/%s/tcp/%d\", meta.IPAddress, meta.Port)\n\tvar peerAddr, err = ma.NewMultiaddr(addrString)\n\tif err != nil {\n\t\tpm.logger.Warn().Err(err).Str(\"addr\", addrString).Msg(\"invalid NPAddPeer address\")\n\t\treturn false\n\t}\n\tvar peerID = meta.ID\n\tpm.mutex.Lock()\n\tinboundPeer, ok := pm.remotePeers[peerID]\n\tif ok {\n\t\t// peer is already exist (and maybe inbound peer)\n\t\tpm.logger.Info().Str(LogPeerID, inboundPeer.meta.ID.Pretty()).Msg(\"Peer is already managed by peermanager\")\n\t\tif meta.Designated {\n\t\t\t// If remote peer was connected first. designated flag is not set yet.\n\t\t\tinboundPeer.meta.Designated = true\n\t\t}\n\t\tpm.mutex.Unlock()\n\t\treturn true\n\t}\n\tpm.mutex.Unlock()\n\n\tpm.Peerstore().AddAddr(peerID, peerAddr, meta.TTL())\n\tctx := context.Background()\n\ts, err := pm.NewStream(ctx, meta.ID, aergoP2PSub)\n\tif err != nil {\n\t\tpm.logger.Info().Err(err).Str(\"addr\", addrString).Str(LogPeerID, meta.ID.Pretty()).Str(LogProtoID, string(aergoP2PSub)).Msg(\"Error while get stream\")\n\t\treturn false\n\t}\n\n\trd := metric.NewReader(s)\n\twt := metric.NewWriter(s)\n\th := newHandshaker(pm, pm.actorServ, pm.logger, peerID)\n\trw, remoteStatus, err := h.handshakeOutboundPeerTimeout(rd, wt, defaultHandshakeTTL)\n\tif err != nil {\n\t\tpm.logger.Debug().Err(err).Str(LogPeerID, meta.ID.Pretty()).Msg(\"Failed to handshake\")\n\t\t//pm.sendGoAway(rw, \"Failed to handshake\")\n\t\ts.Close()\n\t\treturn false\n\t}\n\n\tpm.mutex.Lock()\n\tinboundPeer, ok = pm.remotePeers[peerID]\n\tif ok {\n\t\tif ComparePeerID(pm.selfMeta.ID, meta.ID) <= 0 {\n\t\t\tpm.logger.Info().Str(LogPeerID, inboundPeer.meta.ID.Pretty()).Msg(\"Inbound connection was already handshaked while handshaking outbound connection, and remote peer is higher priority so closing this outbound connection.\")\n\t\t\tpm.mutex.Unlock()\n\t\t\tpm.sendGoAway(rw, \"Already handshaked\")\n\t\t\ts.Close()\n\t\t\treturn true\n\t\t} else {\n\t\t\tpm.logger.Info().Str(LogPeerID, inboundPeer.meta.ID.Pretty()).Msg(\"Inbound connection was already handshaked while handshaking outbound connection, but local peer is higher priority so closing that inbound connection\")\n\t\t\t// disconnect lower valued connection\n\t\t\tpm.deletePeer(meta.ID)\n\t\t\tinboundPeer.stop()\n\t\t}\n\t}\n\n\t// update peer info to remote sent infor\n\tmeta = FromPeerAddress(remoteStatus.Sender)\n\n\toutboundPeer := newRemotePeer(meta, pm, pm.actorServ, pm.logger, pm.mf, pm.signer, rw)\n\t// insert Handlers\n\tpm.handlerFactory.insertHandlers(outboundPeer)\n\tgo outboundPeer.runPeer()\n\tpm.insertPeer(peerID, outboundPeer)\n\tpm.logger.Info().Str(LogPeerID, peerID.Pretty()).Str(\"addr\", net.ParseIP(meta.IPAddress).String()+\":\"+strconv.Itoa(int(meta.Port))).Msg(\"Outbound peer is added to peerService\")\n\toutboundPeer.metric = pm.mm.Add(peerID, rd, wt)\n\tpm.mutex.Unlock()\n\n\taddrs := pm.Peerstore().Addrs(peerID)\n\taddrStrs := make([]string, len(addrs))\n\tfor i, addr := range addrs {\n\t\taddrStrs[i] = addr.String()\n\t}\n\tpm.logger.Debug().Strs(\"addrs\", addrStrs).Str(LogPeerID, outboundPeer.meta.ID.Pretty()).Msg(\"addresses of peer\")\n\n\t// peer is ready\n\th.doInitialSync()\n\n\t// notice to p2pmanager that handshaking is finished\n\tpm.NotifyPeerHandshake(peerID)\n\n\treturn true\n}", "func addUniquePeer(peers []net.Addr, peer net.Addr) []net.Addr {\n\tif peerExists(peers, peer) {\n\t\treturn peers\n\t} else {\n\t\treturn append(peers, peer)\n\t}\n}", "func (px *Pex) AddPeers(peers []string) {\n\tpx.lock.Lock()\n\tdefer px.lock.Unlock()\n\n\tfor _, p := range peers {\n\t\tif _, err := px.addPeer(p); err != nil {\n\t\t\tlogger.Warning(\"Failed to add peer %s, Reason: %v\", p, err)\n\t\t}\n\t}\n}", "func (consensus *Consensus) AddPeers(peers []*p2p.Peer) int {\n\tcount := 0\n\n\tfor _, peer := range peers {\n\t\t_, ok := consensus.validators.Load(utils.GetUniqueIDFromPeer(*peer))\n\t\tif !ok {\n\t\t\tif peer.ValidatorID == -1 {\n\t\t\t\tpeer.ValidatorID = int(consensus.uniqueIDInstance.GetUniqueID())\n\t\t\t}\n\t\t\tconsensus.validators.Store(utils.GetUniqueIDFromPeer(*peer), *peer)\n\t\t\tconsensus.pubKeyLock.Lock()\n\t\t\tconsensus.PublicKeys = append(consensus.PublicKeys, peer.PubKey)\n\t\t\tconsensus.pubKeyLock.Unlock()\n\t\t\tutils.GetLogInstance().Debug(\"[SYNC] new peer added\", \"pubKey\", peer.PubKey, \"ip\", peer.IP, \"port\", peer.Port)\n\t\t}\n\t\tcount++\n\t}\n\treturn count\n}", "func (*server) RegisterPeer(ctx context.Context, req *pb.RegisterPeerRequest) (*pb.RegisterPeerResponse, error) {\n\tlog.Println(\"Received peer request: \", req)\n\treturn &pb.RegisterPeerResponse{Status: \"OK\"}, nil\n}", "func (p *Peer) ConnectPeer(port string, id int) {\n\trequest := ConnectRequest{}\n\treply := ConnectReply{}\n\trequest.PeerID = p.PeerID\n\trequest.Port = p.Port\n\tcall(\"Peer.AcceptConnect\", &request, &reply, port)\n\tif reply.Accepted == false {\n\t\tfmt.Printf(\"Peer %v: Connection refused from Peer %v\\n\", p.PeerID, id)\n\t\treturn\n\t}\n\tp.peers[p.numPeers] = id\n\tp.numPeers = p.numPeers + 1\n\tfmt.Printf(\"Peer %v: Connected to Peer %v\\n\", p.PeerID, id)\n}", "func (pc *PeerConnection) AddIceCandidate(c *ice.Candidate) {\n\tif c == nil {\n\t\t// nil means end-of-candidates.\n\t\tclose(pc.remoteCandidates)\n\t\tpc.remoteCandidates = nil\n\t} else {\n\t\tselect {\n\t\tcase pc.remoteCandidates <- *c:\n\t\tcase <-pc.ctx.Done():\n\t\t}\n\t}\n}", "func (c *Client) AddPssPeer(pubkeyid string, addr []byte, spec *protocols.Spec) error {\n\ttopic := pss.ProtocolTopic(spec)\n\tif c.peerPool[topic] == nil {\n\t\treturn errors.New(\"addpeer on unset topic\")\n\t}\n\tif c.peerPool[topic][pubkeyid] == nil {\n\t\trw, err := c.newpssRPCRW(pubkeyid, addr, topic)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\t_, err = rw.handshake(handshakeRetryCount, true, true)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tc.poolMu.Lock()\n\t\tc.peerPool[topic][pubkeyid] = rw\n\t\tc.poolMu.Unlock()\n\t\tp := p2p.NewPeer(enode.ID{}, fmt.Sprintf(\"%v\", addr), []p2p.Cap{})\n\t\tgo c.protos[topic].Run(p, c.peerPool[topic][pubkeyid])\n\t}\n\treturn nil\n}", "func (pds *peerDistanceSorter) appendPeer(p peer.ID, pDhtId ID) {\n\tpds.peers = append(pds.peers, peerDistance{\n\t\tp: p,\n\t\tdistance: xor(pds.target, pDhtId),\n\t})\n}", "func (peer *peerImp) AddUse() {\n\tpeer.useCounter++\n}", "func (pm *peerManager) insertPeer(ID peer.ID, peer *remotePeerImpl) {\n\tpm.remotePeers[ID] = peer\n\tpm.updatePeerCache()\n}", "func Peer(target string) {\n\tserver.listen()\n\tserver.seenPeers[target] = struct{}{}\n\tserver.dial(target)\n\tserver.managePeers()\n}", "func (m *mempool) AddTxFromPeer(tx *Tx) {\n\tm.m.Lock()\n\tdefer m.m.Unlock()\n\tm.Txs[tx.Id] = tx\n}", "func (s *Switch) addIncomingPeer(n p2pcrypto.PublicKey) error {\n\ts.inpeersMutex.RLock()\n\tamnt := len(s.inpeers)\n\t_, exist := s.inpeers[n]\n\ts.inpeersMutex.RUnlock()\n\n\tif amnt >= s.config.MaxInboundPeers {\n\t\t// todo: close connection with CPOOL\n\t\treturn errors.New(\"reached max connections\")\n\t}\n\n\ts.inpeersMutex.Lock()\n\ts.inpeers[n] = struct{}{}\n\ts.inpeersMutex.Unlock()\n\tif !exist {\n\t\ts.publishNewPeer(n)\n\t\ts.discover.Attempt(n) // or good?\n\t\tmetrics.InboundPeers.Add(1)\n\t}\n\treturn nil\n}", "func (p *BoxPeer) AddToPeerstore(maddr multiaddr.Multiaddr) error {\n\thaddr, pid, err := DecapsulatePeerMultiAddr(maddr)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tptype, _ := p.Type(pid)\n\tp.table.peerStore.Put(pid, pstore.PTypeSuf, uint8(ptype))\n\t// TODO, we must consider how long the peer should be in the peerstore,\n\t// PermanentAddrTTL should only be for peer configured by user.\n\t// Peer that is connected or observed from other peers should have different TTL.\n\tp.host.Peerstore().AddAddr(pid, haddr, peerstore.PermanentAddrTTL)\n\tp.table.routeTable.Update(pid)\n\treturn nil\n}", "func (n *QriNode) AddQriPeer(pinfo pstore.PeerInfo) error {\n\t// add this peer to our store\n\tn.QriPeers.AddAddrs(pinfo.ID, pinfo.Addrs, pstore.TempAddrTTL)\n\n\t// if profile, _ := n.Repo.Peers().GetPeer(pinfo.ID); profile != nil {\n\t// \t// we've already seen this peer\n\t// \treturn nil\n\t// }\n\n\tif err := n.RequestProfileInfo(pinfo); err != nil {\n\t\treturn err\n\t}\n\n\t// some time later ask for a list of their peers, you know, \"for a friend\"\n\tgo func() {\n\t\t// time.Sleep(time.Second * 2)\n\t\tn.RequestPeersList(pinfo.ID)\n\t}()\n\n\treturn nil\n}", "func (n *NetImpl) usePeer(netID string) *peer {\n\tif netID == n.myNetID {\n\t\t// nil for itself\n\t\treturn nil // TODO: [KP] return self\n\t}\n\tn.peersMutex.Lock()\n\tdefer n.peersMutex.Unlock()\n\n\tif peer, ok := n.peers[n.peeringID(netID)]; ok {\n\t\t// existing peer\n\t\tpeer.numUsers++\n\t\treturn peer\n\t}\n\t// new peer\n\tret := newPeer(netID, n)\n\tn.peers[ret.peeringID()] = ret\n\tn.log.Debugf(\"added new peer id %s inbound = %v\", ret.peeringID(), ret.IsInbound())\n\treturn ret\n}", "func (sm *SyncManager) NewPeer(peer *peer.Peer) {\n\t// Ignore if we are shutting down.\n\tif atomic.LoadInt32(&sm.shutdown) != 0 {\n\t\treturn\n\t}\n\tsm.msgChan <- &newPeerMsg{peer: peer}\n}", "func (c *Client) NewPeer(payload string) bool {\n\tvar result bool\n\tif err := c.ClientRPC.Call(\"Server.NewPeer\", payload, &result); err != nil {\n\t\tlog.Println(err)\n\t}\n\treturn result\n}", "func (sm *SyncManager) NewPeer(peer *peer.Peer) {\n\t// Ignore if we are shutting down.\n\tif atomic.LoadInt32(&sm.shutdown) != 0 {\n\t\treturn\n\t}\n\n\tsm.msgChan <- &newPeerMsg{peer: peer}\n}", "func AddPeers(\n\tqueried map[string]struct{},\n\tunqueried ClosestPeers,\n\tdoc comm.Doctor,\n\tpeers []*api.PeerAddress,\n\tfromer peer.Fromer,\n) {\n\tfor _, pa := range peers {\n\t\tnewID := id.FromBytes(pa.PeerId)\n\t\tinUnqueried := unqueried.In(newID)\n\t\t_, inQueried := queried[newID.String()]\n\t\tif !inUnqueried && !inQueried && doc.Healthy(newID) {\n\t\t\t// only add discovered peers that we haven't already seen and are healthy\n\t\t\tnewPeer := fromer.FromAPI(pa)\n\t\t\tunqueried.SafePush(newPeer)\n\t\t}\n\t}\n}", "func (this *Client) connectToPeer(msg *Message, conn net.Conn) error {\n\tthis.connections[msg.Author] = conn\n\tresponse := CreateMessage(MESSAGE_CONNECT, \"\", this.nick)\n\tresponse.SendTo(conn)\n\treturn nil\n}", "func Connect(peer *Peer) {\n conn, or := net.Dial(\"tcp\", peer.Addr)\n m := maddr // TODO: Set a proper message here\n\n if !e.Rr(or, false) {\n handshake(conn, peer.PublicKey, m)\n fmt.Println(\"Successfully connected to peer: \" + conn.RemoteAddr().String())\n // This is how we distinguish between if they have contacted us or not\n // Once we have connected to them, we can send them messages (they might ignore us though)\n if peer.Status == \"authrec\" {\n peer.Status = \"authenticated\"\n } else {\n peer.Status = \"authsent\"\n }\n peer.Conn = conn\n }\n}", "func (ab *AddressBook) AddLocalPeerRelay(relayPeers ...string) error {\n\tfor _, relayPeer := range relayPeers {\n\t\trelayPeer = strings.Replace(relayPeer, \"relay:\", \"\", 1)\n\t\trelayPeer = \"relay:\" + relayPeer\n\t\tab.localRelays.Store(relayPeer, true)\n\t}\n\treturn nil\n}", "func (m *MockPool) AddPeer(arg0 context.Context, arg1 peer.Peer) error {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"AddPeer\", arg0, arg1)\n\tret0, _ := ret[0].(error)\n\treturn ret0\n}", "func (matcher *JoinSession) AddParticipant(maxAmount uint64, sessID SessionID) (*SessionParticipant, error) {\n\n\treq := addParticipantReq{\n\t\tmaxAmount: maxAmount,\n\t\tsessID: sessID,\n\t\tresp: make(chan addParticipantRes),\n\t}\n\tmatcher.addParticipantReq <- req\n\n\tresp := <-req.resp\n\treturn resp.participant, resp.err\n}", "func (d *Dialer) Add(from string, fromPort int, to net.IP, toPort int) {\n\tif d.resolve == nil {\n\t\td.resolve = make(map[string]string)\n\t}\n\td.resolve[fmt.Sprintf(\"%s:%d\", from, fromPort)] = fmt.Sprintf(\"%s:%d\", to, toPort)\n}", "func (s *Server) AddNode(meta serverpb.NodeMeta, force bool) error {\n\tlocalMeta, err := s.NodeMeta()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif localMeta.Id == meta.Id {\n\t\treturn nil\n\t}\n\n\tif err := validateNodeMeta(meta); err != nil {\n\t\treturn err\n\t}\n\n\ts.log.Printf(\"AddNode %s\", color.RedString(meta.Id))\n\n\tnew := s.addNodeMeta(meta)\n\tif err := s.persistNodeMeta(meta); err != nil {\n\t\treturn err\n\t}\n\n\ts.mu.Lock()\n\t_, alreadyPeer := s.mu.peers[meta.Id]\n\ts.mu.Unlock()\n\n\tif (!new || s.NumConnections() >= int(s.config.MaxPeers)) && !force || alreadyPeer {\n\t\treturn nil\n\t}\n\n\t// avoid duplicate connections\n\ts.mu.Lock()\n\t_, connecting := s.mu.connecting[meta.Id]\n\ts.mu.connecting[meta.Id] = struct{}{}\n\ts.mu.Unlock()\n\n\tif connecting {\n\t\treturn nil\n\t}\n\n\tdefer func() {\n\t\ts.mu.Lock()\n\t\tdefer s.mu.Unlock()\n\n\t\tdelete(s.mu.connecting, meta.Id)\n\t}()\n\n\tctx, cancel := context.WithCancel(s.ctx)\n\tconn, err := s.connectNode(ctx, meta)\n\tif err != nil {\n\t\treturn err\n\t}\n\tclient := serverpb.NewNodeClient(conn)\n\tresp, err := client.Hello(ctx, &serverpb.HelloRequest{\n\t\tMeta: &localMeta,\n\t})\n\tif err != nil {\n\t\treturn errors.Wrapf(err, \"Hello\")\n\t}\n\tif resp.Meta.Id != meta.Id {\n\t\treturn errors.Errorf(\"expected node with ID %+v; got %+v\", meta, resp.Meta)\n\t}\n\n\tpeer := &peer{\n\t\tctx: ctx,\n\t\tcancel: cancel,\n\t\tclient: client,\n\t\tconn: conn,\n\t\ts: s,\n\t\tmeta: meta,\n\t}\n\n\ts.mu.Lock()\n\t// make sure there isn't a duplicate connection\n\t_, ok := s.mu.peers[meta.Id]\n\tif !ok {\n\t\ts.mu.peers[meta.Id] = peer\n\t}\n\ts.mu.Unlock()\n\n\t// race condition to add connections, don't overwrite and close this\n\t// connection.\n\tif ok {\n\t\ts.log.Printf(\"found duplicate connection to: %s\", color.RedString(meta.Id))\n\t\treturn conn.Close()\n\t}\n\n\tgo peer.heartbeat()\n\n\tif err := s.AddNodes(resp.ConnectedPeers, resp.KnownPeers); err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}", "func addAddrToPeerstore(h host.Host, addr string) *peer.AddrInfo {\n\tpeerInfo := parseAddress(addr)\n\t// We have a peer ID and a targetAddr so we add\n\t// it to the peerstore so LibP2P knows how to contact it\n\th.Peerstore().AddAddrs(peerInfo.ID, peerInfo.Addrs, peerstore.PermanentAddrTTL)\n\treturn peerInfo\n}", "func (peer *PeerConnection) addConnection(sock *NamedWebSocket) {\n\t// Add this websocket instance to Named WebSocket broadcast list\n\tsock.peers = append(sock.peers, peer)\n\n\t// Inform all control connections that we now own this peer connection\n\tfor _, control := range sock.controllers {\n\t\t// don't notify controller if its id matches the peer's id\n\t\tif control.id != peer.id {\n\t\t\tcontrol.send(\"connect\", control.id, peer.id, \"\")\n\t\t}\n\t}\n\n\t// Inform all proxy connections that we now own this peer connection\n\tfor _, proxy := range sock.proxies {\n\t\tif proxy.writeable {\n\t\t\tproxy.send(\"connect\", proxy.id, peer.id, \"\")\n\t\t}\n\t}\n\n\t// Start connection read/write pumps\n\tgo peer.writeConnectionPump(sock)\n\tgo peer.readConnectionPump(sock)\n}", "func (n *Node) AddBuiltInPeers(hosts ...string) {\n\tn.builtin.Lock()\n\tdefer n.builtin.Unlock()\n\n\tn.builtin.hosts = append(n.builtin.hosts, hosts...)\n\n\tn.Debugf(\"built in peer hosts: %v\", n.builtin.hosts)\n}", "func (o *ResourcepoolPoolMember) SetPeer(v ResourcepoolLeaseRelationship) {\n\to.Peer = &v\n}", "func TestAdd(t *testing.T) {\n\t// Create a mocked peers cache connected to a mock directory\n\tcache, mgds, err := makePeersCache()\n\trequire.NoError(t, err, \"could not create mocked peers cache\")\n\tdefer mgds.Shutdown()\n\n\t// Common name is required to add a peer to the cache\n\terr = cache.Add(&peers.PeerInfo{})\n\trequire.EqualError(t, err, \"common name is required for all peers\")\n\n\t// Generate a random key for some of our fixtures.\n\tprivateKey, err := rsa.GenerateKey(rand.Reader, 2048)\n\trequire.NoError(t, err)\n\n\t// Test adding peers concurrently; the leonardo peer should be updated with\n\t// consecutive updates to\n\tt.Run(\"addTests\", func(t *testing.T) {\n\t\ttests := []struct {\n\t\t\tname string\n\t\t\tinfo *peers.PeerInfo\n\t\t}{\n\t\t\t{\"add-id-only\", &peers.PeerInfo{\n\t\t\t\tCommonName: \"leonardo.trisatest.net\",\n\t\t\t\tID: \"19d84515-007a-48cc-9efd-b153a263e77c\",\n\t\t\t}},\n\t\t\t{\"add-registered-directory-only\", &peers.PeerInfo{\n\t\t\t\tCommonName: \"leonardo.trisatest.net\",\n\t\t\t\tRegisteredDirectory: \"testdirectory.org\",\n\t\t\t}},\n\t\t\t{\"add-endpoint-only\", &peers.PeerInfo{\n\t\t\t\tCommonName: \"leonardo.trisatest.net\",\n\t\t\t\tEndpoint: \"leonardo.trisatest.net:443\",\n\t\t\t}},\n\t\t\t{\"add-signing-key-only\", &peers.PeerInfo{\n\t\t\t\tCommonName: \"leonardo.trisatest.net\",\n\t\t\t\tSigningKey: &privateKey.PublicKey,\n\t\t\t}},\n\t\t\t{\"add-new-peer\", &peers.PeerInfo{\n\t\t\t\tCommonName: \"donatello.trisatest.net\",\n\t\t\t\tID: \"b19c9ebd-82f5-4bda-91ef-226e3ecee4b8\",\n\t\t\t\tRegisteredDirectory: \"testdirectory.org\",\n\t\t\t\tEndpoint: \"donatello.trisatest.net:443\",\n\t\t\t}},\n\t\t}\n\t\tfor _, tt := range tests {\n\t\t\ttt := tt\n\t\t\tt.Run(tt.name, func(t *testing.T) {\n\t\t\t\tt.Parallel()\n\t\t\t\trequire.NoError(t, cache.Add(tt.info))\n\t\t\t})\n\t\t}\n\t})\n\n\t// Verify the final state of the cache\n\tleonardo, err := cache.Get(\"leonardo.trisatest.net\")\n\trequire.NoError(t, err)\n\trequire.Equal(t, \"leonardo.trisatest.net\", leonardo.Info().CommonName)\n\trequire.Equal(t, \"19d84515-007a-48cc-9efd-b153a263e77c\", leonardo.Info().ID)\n\trequire.Equal(t, \"testdirectory.org\", leonardo.Info().RegisteredDirectory)\n\trequire.Equal(t, \"leonardo.trisatest.net:443\", leonardo.Info().Endpoint)\n\trequire.Equal(t, &privateKey.PublicKey, leonardo.Info().SigningKey)\n\n\tdonatello, err := cache.Get(\"donatello.trisatest.net\")\n\trequire.NoError(t, err)\n\trequire.Equal(t, \"donatello.trisatest.net\", donatello.Info().CommonName)\n\trequire.Equal(t, \"b19c9ebd-82f5-4bda-91ef-226e3ecee4b8\", donatello.Info().ID)\n\trequire.Equal(t, \"testdirectory.org\", donatello.Info().RegisteredDirectory)\n\trequire.Equal(t, \"donatello.trisatest.net:443\", donatello.Info().Endpoint)\n}", "func (ab *AddressBook) AddLocalPeerAddress(addresses ...string) error {\n\tfor _, address := range addresses {\n\t\tab.localAddresses.Store(address, true)\n\t}\n\treturn nil\n}", "func (c *connection) addHost(conn *net.TCPConn) {\n\tmsg := read(conn)\n\tvar buf bytes.Buffer\n\tid := int(msg[1])\n\tport := int(msg[2])*256 + int(msg[3])\n\tif id == 0 {\n\t\tid = len(c.peers)\n\t\tbuf.Write([]byte{byte(id), byte(c.myId)})\n\t\tfor i := range c.peers {\n\t\t\tif i != c.myId {\n\t\t\t\tbuf.WriteByte(byte(i))\n\t\t\t\tbuf.Write(addrToBytes(c.peers[i].ip, c.peers[i].port))\n\t\t\t}\n\t\t}\n\t\twrite(conn, buf.Bytes())\n\n\t}\n\tc.addPeer(id, conn, port)\n\tgo c.receive(c.peers[id])\n}", "func (s *Service) RunPeer(orgName string, peer []config.Peers, projectPath string, i int,\n\terrChanPeer chan error, wgPeerDone chan bool) {\n\tctx := context.TODO()\n\tcfg, hostConfig := configPeer(peer, projectPath, orgName, i)\n\n\tresp, err := s.MyClient.ContainerCreate(ctx, cfg, hostConfig, nil,\n\t\tpeer[i].Name)\n\tif err != nil {\n\t\terrChanPeer <- errors.Wrap(err, \"ContainerCreate failed with error\")\n\t\treturn\n\t}\n\n\tlog.Println(\"ContainerCreate for peer response: \", resp)\n\n\terr = s.MyClient.ContainerStart(ctx, resp.ID, types.ContainerStartOptions{})\n\tif err != nil {\n\t\terrChanPeer <- errors.Wrap(err, \"ContainerStart failed with error\")\n\t\treturn\n\t}\n\n\tlog.Println(\"ContainerStart for peer succeed.\")\n\n\twgPeerDone <- true\n}", "func (p *BoxPeer) AddAddrToPeerstore(addr string) error {\n\tmaddr, err := multiaddr.NewMultiaddr(addr)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn p.AddToPeerstore(maddr)\n}", "func (a PeerAddress) AddTo(m *stun.Message) error {\n\treturn stun.XORMappedAddress(a).AddToAs(m, stun.AttrXORPeerAddress)\n}", "func (c *Client) AddMember(name string, peerAddrs string) ([]Member, error) {\n\treturn c.addMember(name, peerAddrs, false)\n}", "func (c *FakeEtcdClient) AddMember(memberID uint64, peerURLs []string) error {\n\t_, ok := c.members[memberID]\n\tif ok {\n\t\treturn fmt.Errorf(\"member with ID %d already exists\", memberID)\n\t}\n\tc.members[memberID] = &etcd.Member{\n\t\tID: memberID,\n\t\tPeerURLs: peerURLs,\n\t}\n\tc.healthy[memberID] = false\n\treturn nil\n}", "func (g *Gateway) Connect(addr modules.NetAddress) error {\n\tif addr == g.Address() {\n\t\treturn errors.New(\"can't connect to our own address\")\n\t}\n\n\tid := g.mu.RLock()\n\t_, exists := g.peers[addr]\n\tg.mu.RUnlock(id)\n\tif exists {\n\t\treturn errors.New(\"peer already added\")\n\t}\n\n\tconn, err := net.DialTimeout(\"tcp\", string(addr), dialTimeout)\n\tif err != nil {\n\t\treturn err\n\t}\n\t// send our address\n\tif err := encoding.WriteObject(conn, g.Address()); err != nil {\n\t\treturn err\n\t}\n\t// TODO: exchange version messages\n\n\tg.log.Println(\"INFO: connected to new peer\", addr)\n\n\tid = g.mu.Lock()\n\tg.addPeer(&peer{addr: addr, sess: muxado.Client(conn)})\n\tg.mu.Unlock(id)\n\n\t// request nodes\n\tvar nodes []modules.NetAddress\n\terr = g.RPC(addr, \"ShareNodes\", func(conn modules.PeerConn) error {\n\t\treturn encoding.ReadObject(conn, &nodes, maxSharedNodes*maxAddrLength)\n\t})\n\tif err != nil {\n\t\t// log this error, but don't return it\n\t\tg.log.Printf(\"WARN: request for node list of %v failed: %v\", addr, err)\n\t\treturn nil\n\t}\n\tg.log.Printf(\"INFO: %v sent us %v peers\", addr, len(nodes))\n\tid = g.mu.Lock()\n\tfor _, node := range nodes {\n\t\tg.addNode(node)\n\t}\n\tg.save()\n\tg.mu.Unlock(id)\n\n\treturn nil\n}", "func (_ERC20Pausable *ERC20PausableTransactor) AddPauser(opts *bind.TransactOpts, account common.Address) (*types.Transaction, error) {\n\treturn _ERC20Pausable.contract.Transact(opts, \"addPauser\", account)\n}", "func (o *IppoolPoolMember) SetPeer(v IppoolIpLeaseRelationship) {\n\to.Peer = &v\n}", "func (m *Model) AddNetworkPeers(addresses []string) {\n\tfor _, address := range addresses {\n\t\tm.AddNetworkPeer(address)\n\t}\n}", "func (s *server) ConnectToPeer(addr *lnwire.NetAddress,\n\tperm bool) error {\n\n\terrChan := make(chan error, 1)\n\n\ts.queries <- &connectPeerMsg{\n\t\taddr: addr,\n\t\tpersistent: perm,\n\t\terr: errChan,\n\t}\n\n\treturn <-errChan\n}", "func (hpc *HashPeersCache) add(hash types.Hash32, peer p2p.Peer) {\n\tpeers, exists := hpc.get(hash)\n\tif !exists {\n\t\thpc.Cache.Add(hash, HashPeers{peer: {}})\n\t\treturn\n\t}\n\n\tpeers[peer] = struct{}{}\n\thpc.Cache.Add(hash, peers)\n}", "func (_Pausable *PausableTransactor) AddPauser(opts *bind.TransactOpts, account common.Address) (*types.Transaction, error) {\n\treturn _Pausable.contract.Transact(opts, \"addPauser\", account)\n}", "func NewPeer(config Config) (*Peer, error) {\n\tvar (\n\t\td = newDelegate(config.Logger)\n\t\tc = memberlist.DefaultLANConfig()\n\t)\n\t{\n\t\tc.Name = uuid.New()\n\t\tc.BindAddr = config.ClusterHost\n\t\tc.BindPort = config.ClusterPort\n\t\tc.AdvertiseAddr = config.AdvertiseHost\n\t\tc.AdvertisePort = config.AdvertisePort\n\t\tc.LogOutput = ioutil.Discard\n\t\tc.Delegate = d\n\t\tc.Events = d\n\t}\n\n\tml, err := memberlist.Create(c)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\td.initialize(c.Name, config.Type, config.APIHost, config.APIPort, ml.NumMembers)\n\tn, _ := ml.Join(config.InitialPeers)\n\tlevel.Debug(config.Logger).Log(\"Join\", n)\n\n\treturn &Peer{\n\t\tml: ml,\n\t\td: d,\n\t}, nil\n}", "func (self *Node) AddRoute(id RouteId, toPeer cipher.PubKey) error {\n\t_, routeExists := self.safelyGetRoute(id)\n\tif routeExists {\n\t\treturn errors.New(fmt.Sprintf(\"Route %v already exists\\n\", id))\n\t}\n\n\ttransport := self.safelyGetTransportToPeer(toPeer, true)\n\tif transport == nil {\n\t\treturn errors.New(fmt.Sprintf(\"No transport to peer %v\\n\", toPeer))\n\t}\n\n\t// Add locally to routesById for backward termination\n\tself.lock.Lock()\n\tdefer self.lock.Unlock()\n\tself.routesById[id] =\n\t\tRoute{\n\t\t\ttoPeer,\n\t\t\tid,\n\t\t\tcipher.PubKey{},\n\t\t\tNilRouteId,\n\t\t\t// Route lifetime: never dies until route is removed\n\t\t\ttime.Unix(0, 0),\n\t\t}\n\n\tself.localRoutesByTerminatingPeer[toPeer] = id\n\tself.localRoutesById[id] = LocalRoute{self.config.PubKey, toPeer, NilRouteId, time.Unix(0, 0)}\n\treturn nil\n}", "func newPeer(host topology.Host, dialer client.PeerConnDialer) *peer {\n\treturn &peer{\n\t\thost: host,\n\t\tdialer: dialer,\n\t}\n}", "func NewPeer() *Peer {\n\treturn &Peer{\n\t\tlogT: log.NewLogTrace(0, 0, 0),\n\t}\n}", "func (c *Libp2pPubSub) CreatePeer(nodeId int, port int) *core.Host {\n\t// Creating a node\n\th, err := createHost(port)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\tfmt.Printf(\"Node %v is %s\\n\", nodeId, GetLocalhostAddress(h))\n\n\treturn &h\n}" ]
[ "0.8037111", "0.7986729", "0.7840991", "0.77524346", "0.7725641", "0.7709996", "0.76176715", "0.7593478", "0.73779005", "0.73482287", "0.7329669", "0.7313278", "0.7286379", "0.7278531", "0.7213862", "0.71712655", "0.71476495", "0.7099289", "0.7071857", "0.70617026", "0.7058194", "0.70499593", "0.69746196", "0.6974418", "0.69306487", "0.68674195", "0.68412465", "0.68364805", "0.67834026", "0.6779513", "0.67647004", "0.6746011", "0.66581607", "0.66138756", "0.65194416", "0.64812076", "0.64771056", "0.6380528", "0.63751316", "0.63365465", "0.6324841", "0.63084954", "0.6308106", "0.6307735", "0.6213808", "0.61860627", "0.6180997", "0.6156136", "0.6143394", "0.61411285", "0.60646886", "0.60645413", "0.6032575", "0.6025872", "0.6020381", "0.60050124", "0.5988077", "0.5943085", "0.5942164", "0.5869472", "0.5846577", "0.582466", "0.5789555", "0.5788977", "0.57861704", "0.57772946", "0.5767478", "0.57642597", "0.5763187", "0.5706353", "0.5706145", "0.56603956", "0.565583", "0.56536895", "0.56529516", "0.56436664", "0.56388736", "0.56204456", "0.5617659", "0.559989", "0.55941737", "0.5569925", "0.5540985", "0.5528928", "0.5522655", "0.5520445", "0.5518505", "0.5516797", "0.55073863", "0.5491486", "0.54885167", "0.54735696", "0.5472882", "0.5461193", "0.54608417", "0.5437459", "0.54213727", "0.54109144", "0.5407514", "0.5405063" ]
0.81139016
0
SubscribeToOrderEvents let's one subscribe to order events emitted by the OrderWatcher
func (app *App) SubscribeToOrderEvents(sink chan<- []*zeroex.OrderEvent) event.Subscription { subscription := app.orderWatcher.Subscribe(sink) return subscription }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (handler *rpcHandler) SubscribeToOrders(ctx context.Context) (*ethRpc.Subscription, error) {\n\tlog.Debug(\"received order event subscription request via RPC\")\n\tsubscription, err := SetupOrderStream(ctx, handler.app)\n\tif err != nil {\n\t\tlog.WithField(\"error\", err.Error()).Error(\"internal error in `mesh_subscribe` to `orders` RPC call\")\n\t\treturn nil, errInternal\n\t}\n\treturn subscription, nil\n}", "func (_EtherDelta *EtherDeltaFilterer) WatchOrder(opts *bind.WatchOpts, sink chan<- *EtherDeltaOrder) (event.Subscription, error) {\n\n\tlogs, sub, err := _EtherDelta.contract.WatchLogs(opts, \"Order\")\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn event.NewSubscription(func(quit <-chan struct{}) error {\n\t\tdefer sub.Unsubscribe()\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase log := <-logs:\n\t\t\t\t// New log arrived, parse the event and forward to the user\n\t\t\t\tevent := new(EtherDeltaOrder)\n\t\t\t\tif err := _EtherDelta.contract.UnpackLog(event, \"Order\", log); err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\tevent.Raw = log\n\n\t\t\t\tselect {\n\t\t\t\tcase sink <- event:\n\t\t\t\tcase err := <-sub.Err():\n\t\t\t\t\treturn err\n\t\t\t\tcase <-quit:\n\t\t\t\t\treturn nil\n\t\t\t\t}\n\t\t\tcase err := <-sub.Err():\n\t\t\t\treturn err\n\t\t\tcase <-quit:\n\t\t\t\treturn nil\n\t\t\t}\n\t\t}\n\t}), nil\n}", "func (w *Watcher) Subscribe(sink chan<- []*zeroex.OrderEvent) event.Subscription {\n\treturn w.orderScope.Track(w.orderFeed.Subscribe(sink))\n}", "func (s *OrderService) PubSubOrderReceiver(event *pubsub.CloudEvent) error {\n\t// This JSON nonsense is an \"easy\" way to convert\n\t// The event.Data which is a map back into a real Order\n\tjsonData, err := json.Marshal(event.Data)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tvar order spec.Order\n\tif err := json.Unmarshal(jsonData, &order); err != nil {\n\t\treturn err\n\t}\n\n\t// Now we have a real order, we can process it\n\tif err := s.ProcessOrder(order); err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}", "func (_m *OrderBookService) SubscribeOrderBook(c *ws.Client, bt string, qt string) {\n\t_m.Called(c, bt, qt)\n}", "func SubscribeToExchangeOrderbooks(exchange string) (dispatch.Pipe, error) {\n\tservice.mu.Lock()\n\tdefer service.mu.Unlock()\n\texch, ok := service.books[strings.ToLower(exchange)]\n\tif !ok {\n\t\treturn dispatch.Pipe{}, fmt.Errorf(\"%w for %s exchange\",\n\t\t\terrCannotFindOrderbook, exchange)\n\t}\n\treturn service.Mux.Subscribe(exch.ID)\n}", "func (bf *WebSocketClient) SubscribeChildOrder() {\n\tbf.subscribe(channelChildOrder)\n}", "func SetupOrderStream(ctx context.Context, app *core.App) (*ethRpc.Subscription, error) {\n\tnotifier, supported := ethRpc.NotifierFromContext(ctx)\n\tif !supported {\n\t\treturn &ethRpc.Subscription{}, ethRpc.ErrNotificationsUnsupported\n\t}\n\n\trpcSub := notifier.CreateSubscription()\n\n\tgo func() {\n\t\torderEventsChan := make(chan []*zeroex.OrderEvent)\n\t\torderWatcherSub := app.SubscribeToOrderEvents(orderEventsChan)\n\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase orderEvents := <-orderEventsChan:\n\t\t\t\terr := notifier.Notify(rpcSub.ID, orderEvents)\n\t\t\t\tif err != nil {\n\t\t\t\t\tlog.WithField(\"error\", err.Error()).Error(\"error while calling notifier.Notify\")\n\t\t\t\t}\n\t\t\tcase <-rpcSub.Err():\n\t\t\t\torderWatcherSub.Unsubscribe()\n\t\t\t\treturn\n\t\t\tcase <-notifier.Closed():\n\t\t\t\torderWatcherSub.Unsubscribe()\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t}()\n\n\treturn rpcSub, nil\n}", "func (_WyvernExchange *WyvernExchangeFilterer) WatchOrdersMatched(opts *bind.WatchOpts, sink chan<- *WyvernExchangeOrdersMatched, maker []common.Address, taker []common.Address, metadata [][32]byte) (event.Subscription, error) {\n\n\tvar makerRule []interface{}\n\tfor _, makerItem := range maker {\n\t\tmakerRule = append(makerRule, makerItem)\n\t}\n\tvar takerRule []interface{}\n\tfor _, takerItem := range taker {\n\t\ttakerRule = append(takerRule, takerItem)\n\t}\n\n\tvar metadataRule []interface{}\n\tfor _, metadataItem := range metadata {\n\t\tmetadataRule = append(metadataRule, metadataItem)\n\t}\n\n\tlogs, sub, err := _WyvernExchange.contract.WatchLogs(opts, \"OrdersMatched\", makerRule, takerRule, metadataRule)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn event.NewSubscription(func(quit <-chan struct{}) error {\n\t\tdefer sub.Unsubscribe()\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase log := <-logs:\n\t\t\t\t// New log arrived, parse the event and forward to the user\n\t\t\t\tevent := new(WyvernExchangeOrdersMatched)\n\t\t\t\tif err := _WyvernExchange.contract.UnpackLog(event, \"OrdersMatched\", log); err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\tevent.Raw = log\n\n\t\t\t\tselect {\n\t\t\t\tcase sink <- event:\n\t\t\t\tcase err := <-sub.Err():\n\t\t\t\t\treturn err\n\t\t\t\tcase <-quit:\n\t\t\t\t\treturn nil\n\t\t\t\t}\n\t\t\tcase err := <-sub.Err():\n\t\t\t\treturn err\n\t\t\tcase <-quit:\n\t\t\t\treturn nil\n\t\t\t}\n\t\t}\n\t}), nil\n}", "func onOrderUpdate(order kiteconnect.Order) {\n\tfmt.Printf(\"Order: %+v\", order.OrderID)\n}", "func init() {\n\tRegistry.register(EventOrderApproved, EventInfo{\n\t\tReqChan: \"ordersvc.EventOrderApproved\",\n\t\tisValidPayload: func(i interface{}) bool {\n\t\t\t_, ok := i.(EventOrderApprovedPayload)\n\t\t\treturn ok\n\t\t},\n\t})\n}", "func ControlOrders(ch c.Channels) {\n\tnewOrders := make(chan msgs.OrderMsg, 1000)\n\tgo handleNewOrder(newOrders, ch)\n\tgo listenForNewOrders(newOrders, ch)\n\tgo checkForAcceptedOrders(newOrders, ch)\n\tfor {\n\t\tselect {\n\t\tcase newOrder := <-ch.DelegateOrder:\n\t\t\torderMsg := msgs.OrderMsg{Order: newOrder}\n\t\t\torderMsg.Id = (<-ch.MetaData).Id\n\t\t\tdelegateOrder(orderMsg, ch)\n\t\t\tnewOrders <- orderMsg\n\t\tcase orderCompleted := <-ch.CompletedOrder: // the external order has been taken\n\t\t\torderTensorDiffMsg := msgs.OrderTensorDiffMsg{\n\t\t\t\tOrder: orderCompleted,\n\t\t\t\tDiff: msgs.DIFF_REMOVE,\n\t\t\t\tId: (<-ch.MetaData).Id}\n\t\t\tfor i := 0; i < 5; i++ {\n\t\t\t\torderTensorDiffMsg.Send()\n\t\t\t\ttime.Sleep(1 * time.Millisecond)\n\t\t\t}\n\t\t}\n\t}\n}", "func (_WyvernExchange *WyvernExchangeFilterer) WatchOrderCancelled(opts *bind.WatchOpts, sink chan<- *WyvernExchangeOrderCancelled, hash [][32]byte) (event.Subscription, error) {\n\n\tvar hashRule []interface{}\n\tfor _, hashItem := range hash {\n\t\thashRule = append(hashRule, hashItem)\n\t}\n\n\tlogs, sub, err := _WyvernExchange.contract.WatchLogs(opts, \"OrderCancelled\", hashRule)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn event.NewSubscription(func(quit <-chan struct{}) error {\n\t\tdefer sub.Unsubscribe()\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase log := <-logs:\n\t\t\t\t// New log arrived, parse the event and forward to the user\n\t\t\t\tevent := new(WyvernExchangeOrderCancelled)\n\t\t\t\tif err := _WyvernExchange.contract.UnpackLog(event, \"OrderCancelled\", log); err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\tevent.Raw = log\n\n\t\t\t\tselect {\n\t\t\t\tcase sink <- event:\n\t\t\t\tcase err := <-sub.Err():\n\t\t\t\t\treturn err\n\t\t\t\tcase <-quit:\n\t\t\t\t\treturn nil\n\t\t\t\t}\n\t\t\tcase err := <-sub.Err():\n\t\t\t\treturn err\n\t\t\tcase <-quit:\n\t\t\t\treturn nil\n\t\t\t}\n\t\t}\n\t}), nil\n}", "func (room *RoomRecorder) eventsSubscribe(e EventsI) {\n\tobserver := synced.NewObserver(\n\t\tsynced.NewPairNoArgs(room_.StatusFinished, room.finish))\n\te.Observe(observer.AddPublisherCode(room_.UpdateStatus))\n}", "func (*FakeReconcilerClient) SubscribeToEvents(since, until time.Time, ef filters.Args) ([]events.Message, chan interface{}) {\n\treturn nil, nil\n}", "func (bf *WebSocketClient) SubscribeParentOrder() {\n\tbf.subscribe(channelParentOrder)\n}", "func subscribeToEvents(bot *tgbotapi.BotAPI, redisClient *redis.Client, channel string) {\n\tpubsub := redisClient.Subscribe(channel)\n\tgo listen(bot, pubsub)\n}", "func (_m *OrderBookService) SubscribeRawOrderBook(c *ws.Client, bt string, qt string) {\n\t_m.Called(c, bt, qt)\n}", "func SubscribeOperatorFactoryEvents(\n\tt *testing.T,\n\tauthorizedForwarderCreated chan *operator_factory.OperatorFactoryAuthorizedForwarderCreated,\n\toperatorCreated chan *operator_factory.OperatorFactoryOperatorCreated,\n\tchainClient blockchain.EVMClient,\n\toperatorFactoryInstance contracts.OperatorFactory,\n) {\n\tl := utils.GetTestLogger(t)\n\tcontractABI, err := operator_factory.OperatorFactoryMetaData.GetAbi()\n\trequire.NoError(t, err, \"Getting contract abi for OperatorFactory shouldn't fail\")\n\tlatestBlockNum, err := chainClient.LatestBlockNumber(context.Background())\n\trequire.NoError(t, err, \"Subscribing to contract event log for OperatorFactory instance shouldn't fail\")\n\tquery := geth.FilterQuery{\n\t\tFromBlock: big.NewInt(0).SetUint64(latestBlockNum),\n\t\tAddresses: []common.Address{common.HexToAddress(operatorFactoryInstance.Address())},\n\t}\n\n\teventLogs := make(chan types.Log)\n\tsub, err := chainClient.SubscribeFilterLogs(context.Background(), query, eventLogs)\n\trequire.NoError(t, err, \"Subscribing to contract event log for OperatorFactory instance shouldn't fail\")\n\tgo func() {\n\t\tdefer sub.Unsubscribe()\n\t\tremainingExpectedEvents := 2\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase err := <-sub.Err():\n\t\t\t\tl.Error().Err(err).Msg(\"Error while watching for new contract events. Retrying Subscription\")\n\t\t\t\tsub.Unsubscribe()\n\n\t\t\t\tsub, err = chainClient.SubscribeFilterLogs(context.Background(), query, eventLogs)\n\t\t\t\trequire.NoError(t, err, \"Subscribing to contract event log for OperatorFactory instance shouldn't fail\")\n\t\t\tcase vLog := <-eventLogs:\n\t\t\t\teventDetails, err := contractABI.EventByID(vLog.Topics[0])\n\t\t\t\trequire.NoError(t, err, \"Getting event details for OperatorFactory instance shouldn't fail\")\n\t\t\t\tgo ProcessNewEvent(\n\t\t\t\t\tt, sub, operatorCreated, authorizedForwarderCreated, &vLog,\n\t\t\t\t\teventDetails, operatorFactoryInstance, contractABI, chainClient,\n\t\t\t\t)\n\t\t\t\tif eventDetails.Name == \"AuthorizedForwarderCreated\" || eventDetails.Name == \"OperatorCreated\" {\n\t\t\t\t\tremainingExpectedEvents--\n\t\t\t\t\tif remainingExpectedEvents <= 0 {\n\t\t\t\t\t\treturn\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}()\n}", "func (e *orderEndpoint) handleNewOrder(ev *types.WebsocketEvent, c *ws.Client) {\n\to := &types.Order{}\n\n\tbytes, err := json.Marshal(ev.Payload)\n\tif err != nil {\n\t\tlogger.Error(err)\n\t\tc.SendMessage(ws.OrderChannel, \"ERROR\", err.Error())\n\t\treturn\n\t}\n\n\terr = json.Unmarshal(bytes, &o)\n\tif err != nil {\n\t\tlogger.Error(err)\n\t\tc.SendOrderErrorMessage(err, o.Hash)\n\t\treturn\n\t}\n\n\to.Hash = o.ComputeHash()\n\tws.RegisterOrderConnection(o.UserAddress, c)\n\n\tacc, err := e.accountService.FindOrCreate(o.UserAddress)\n\tif err != nil {\n\t\tlogger.Error(err)\n\t\tc.SendOrderErrorMessage(err, o.Hash)\n\t}\n\n\tif acc.IsBlocked {\n\t\tc.SendMessage(ws.OrderChannel, \"ERROR\", errors.New(\"Account is blocked\"))\n\t}\n\n\terr = e.orderService.NewOrder(o)\n\tif err != nil {\n\t\tlogger.Error(err)\n\t\tc.SendOrderErrorMessage(err, o.Hash)\n\t\treturn\n\t}\n}", "func (s *OrderService) WatchChanges() {\n\tgo func() {\n\t\tfor {\n\t\t\t<-time.After(500 * time.Millisecond)\n\t\t\ts.processBulkOrders()\n\t\t}\n\t}()\n\ts.watchChanges()\n}", "func (tr *Transport) SubscribeEvents(ctx context.Context, mux transport.MessageDispatcher) error {\n\treturn ErrNotImplemented\n}", "func (o *Orderbook) trade_listener() {\n\t\n\tfor {\n\t\tselect {\n\t\tcase message := <- o.trading_channel:\n\t\t\tswitch message.GetSort() {\n\t\t\tcase \"Ask\":\n\t\t\t\to.asks = append(o.asks, message)\n\t\t\tcase \"Bid\":\n\t\t\t\to.bids = append(o.bids, message)\n\t\t\tdefault:\n\t\t\t\tlog.Panic(\"message arrived that wasn't a Bid or Ask\")\n\t\t\t}\t\n\t\tdefault:\n\t\t\to.housekeep()\n\t\t}\n\n\t\n\t}\n}", "func SubscribeToEvents() {\n\tnotesService := notes.NewNotesService(os.Getenv(\"MICRO_API_TOKEN\"))\n\trsp, err := notesService.Events(&notes.EventsRequest{\n\t\tId: \"63c0cdf8-2121-11ec-a881-0242e36f037a\",\n\t})\n\tfmt.Println(rsp, err)\n}", "func (channel Channel) subscribe(observers ...Observer) {\n\tchannel.checkChannelMap()\n\tfor _, observer := range observers {\n\t\tchannel.observers[observer.id] = observer\n\t\tfmt.Printf(\"New observer %s subscribed in channel %s \\n\", observer.id, channel.id)\n\t}\n}", "func (e *defaultEventBus) Subscribe(handlers ...EventHandler) {\n\te.lock.Lock()\n\te.log.Info().Int(\"count\", len(handlers)).Msg(\"adding listeners\")\n\tfor _, handler := range handlers {\n\t\tsub := newSubscription(e.log, handler, e.errorHandler)\n\t\te.handlers = append(e.handlers, sub)\n\t\tsub.Listen()\n\t}\n\te.lock.Unlock()\n}", "func (b *Broker) EventSubscribe(subscriptions []string) (events gp.MsgQueue) {\n\tcommands := make(chan gp.QueueCommand)\n\tmessages := make(chan []byte)\n\tevents = gp.MsgQueue{Commands: commands, Messages: messages}\n\tconn := b.pool.Get()\n\tpsc := redis.PubSubConn{Conn: conn}\n\tfor _, s := range subscriptions {\n\t\tpsc.Subscribe(s)\n\t}\n\tgo controller(&psc, events.Commands)\n\tgo messageReceiver(&psc, events.Messages)\n\tlog.Println(\"New websocket connection created.\")\n\treturn events\n}", "func (b *BTCC) OnGroupOrder(message []byte, output chan socketio.Message) {\n\ttype Response struct {\n\t\tGroupOrder WebsocketGroupOrder `json:\"grouporder\"`\n\t}\n\tvar resp Response\n\terr := common.JSONDecode(message, &resp)\n\n\tif err != nil {\n\t\tlog.Println(err)\n\t\treturn\n\t}\n}", "func pickOrders() {\n\tfor order := range ReceivedOrdersChan {\n\t\t// Log that the order is received\n\t\ttimeNow := time.Now().Format(time.Stamp)\n\t\tlogStr := fmt.Sprintf(\"%v Order %v received\", timeNow, order.ID)\n\t\tfmt.Println(logStr)\n\t\tOrdersLogChan <- logStr\n\n\t\t// Create a waiting for food channel\n\t\tOrdersChan[order.ID] = make(chan Food, 100)\n\t\tgo waitForOrder(order)\n\n\t\t// Send the food from the order to the FoodChan\n\t\tfor _, id := range order.Items {\n\t\t\tfood := Foods[id-1]\n\t\t\tfood.orderID = order.ID\n\t\t\tFoodChan <- food\n\t\t}\n\t}\n}", "func (l *ObserverList) Subscribe(obs Observer) {\n\tl.Lock()\n\tl.Observers = append(l.Observers, obs)\n\tl.Unlock()\n}", "func OrdersHandler(c buffalo.Context) error {\n\ttx := c.Value(\"tx\").(*pop.Connection)\n\n\tfilters := ordersFilters{\n\t\tStartDate: time.Now().AddDate(0, -1, 0),\n\t\tEndDate: time.Now(),\n\t}\n\n\tif err := c.Bind(&filters); err != nil {\n\t\treturn errors.Wrap(err, \"failed to bind arguments\")\n\t}\n\n\tfmt.Printf(\"params: %+v\\n\", c.Params())\n\tfmt.Printf(\"filters(after bind): %+v\\n\", filters)\n\n\t// Reset times so we do not get results based on current time\n\tfilters.StartDate = time.Date(\n\t\tfilters.StartDate.Year(),\n\t\tfilters.StartDate.Month(),\n\t\tfilters.StartDate.Day(),\n\t\t0,\n\t\t0,\n\t\t0,\n\t\t0,\n\t\tfilters.StartDate.Location(),\n\t)\n\tfilters.EndDate = time.Date(\n\t\tfilters.EndDate.Year(),\n\t\tfilters.EndDate.Month(),\n\t\tfilters.EndDate.Day(),\n\t\t0,\n\t\t0,\n\t\t0,\n\t\t0,\n\t\tfilters.EndDate.Location(),\n\t)\n\n\torders := models.Orders{}\n\n\tquery := tx.Where(\"date >= ?\", filters.StartDate)\n\tquery = query.Where(\"date < ?\", filters.EndDate.AddDate(0, 0, 1)) // We want to include end of the day, meaning starting of next day\n\terr := query.All(&orders)\n\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"failed to fetch all the orders\")\n\t}\n\n\tc.Set(\"orders\", orders)\n\tc.Set(\"filters\", filters)\n\n\ttotalWithoutVAT := 0.0\n\ttotalWithVAT := 0.0\n\ttotalVAT := 0.0\n\tfor i := range orders {\n\n\t\tfmt.Printf(\"wut %+v\\n\", orders[i])\n\t\tif err := tx.Load(&orders[i], \"Rows\"); err != nil {\n\t\t\treturn errors.Wrap(err, \"could not load order rows\")\n\t\t}\n\t\ttotalWithoutVAT += orders[i].TotalWithoutVAT()\n\t\ttotalWithVAT += orders[i].TotalWithVAT()\n\t\ttotalVAT += orders[i].TotalVAT()\n\t}\n\n\tc.Set(\"totalWithoutVAT\", totalWithoutVAT)\n\tc.Set(\"totalWithVAT\", totalWithVAT)\n\tc.Set(\"totalVAT\", totalVAT)\n\n\treturn c.Render(200, r.HTML(\"orders.html\"))\n}", "func (impl *dagserviceImpl) SubscribeToEvents(subscriber Subscriber) {\n\timpl.subscriber = subscriber\n}", "func (_WyvernExchange *WyvernExchangeFilterer) WatchOrderApprovedPartTwo(opts *bind.WatchOpts, sink chan<- *WyvernExchangeOrderApprovedPartTwo, hash [][32]byte) (event.Subscription, error) {\n\n\tvar hashRule []interface{}\n\tfor _, hashItem := range hash {\n\t\thashRule = append(hashRule, hashItem)\n\t}\n\n\tlogs, sub, err := _WyvernExchange.contract.WatchLogs(opts, \"OrderApprovedPartTwo\", hashRule)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn event.NewSubscription(func(quit <-chan struct{}) error {\n\t\tdefer sub.Unsubscribe()\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase log := <-logs:\n\t\t\t\t// New log arrived, parse the event and forward to the user\n\t\t\t\tevent := new(WyvernExchangeOrderApprovedPartTwo)\n\t\t\t\tif err := _WyvernExchange.contract.UnpackLog(event, \"OrderApprovedPartTwo\", log); err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\tevent.Raw = log\n\n\t\t\t\tselect {\n\t\t\t\tcase sink <- event:\n\t\t\t\tcase err := <-sub.Err():\n\t\t\t\t\treturn err\n\t\t\t\tcase <-quit:\n\t\t\t\t\treturn nil\n\t\t\t\t}\n\t\t\tcase err := <-sub.Err():\n\t\t\t\treturn err\n\t\t\tcase <-quit:\n\t\t\t\treturn nil\n\t\t\t}\n\t\t}\n\t}), nil\n}", "func EventSubscribeH(w http.ResponseWriter, r *http.Request) {\n\n\tlog.V(logLevel).Debugf(\"%s:subscribe:> subscribe on subscribe\", logPrefix)\n\n\tif r.Method != \"GET\" {\n\t\thttp.Error(w, \"Method not allowed\", http.StatusMethodNotAllowed)\n\t\treturn\n\t}\n\n\tlog.V(logLevel).Debugf(\"%s:subscribe:> watch all events\", logPrefix)\n\n\tvar (\n\t\tsm = distribution.NewServiceModel(r.Context(), envs.Get().GetStorage())\n\t\tnm = distribution.NewNamespaceModel(r.Context(), envs.Get().GetStorage())\n\t\tcm = distribution.NewClusterModel(r.Context(), envs.Get().GetStorage())\n\t\tdone = make(chan bool, 1)\n\t)\n\n\tconn, err := upgrader.Upgrade(w, r, nil)\n\tif err != nil {\n\t\tlog.V(logLevel).Debugf(\"%s:subscribe:> set websocket upgrade err: %s\", logPrefix, err.Error())\n\t\treturn\n\t}\n\n\tticker := time.NewTicker(time.Second)\n\tdefer ticker.Stop()\n\n\tvar serviceEvents = make(chan types.ServiceEvent)\n\tvar namespaceEvents = make(chan types.NamespaceEvent)\n\tvar clusterEvents = make(chan types.ClusterEvent)\n\n\tnotify := w.(http.CloseNotifier).CloseNotify()\n\n\tgo func() {\n\t\t<-notify\n\t\tlog.V(logLevel).Debugf(\"%s:subscribe:> HTTP connection just closed.\", logPrefix)\n\t\tdone <- true\n\t}()\n\n\tgo func() {\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase <-done:\n\t\t\t\tclose(serviceEvents)\n\t\t\t\tclose(namespaceEvents)\n\t\t\t\tclose(clusterEvents)\n\t\t\t\treturn\n\t\t\tcase e := <-clusterEvents:\n\n\t\t\t\tvar data interface{}\n\t\t\t\tif e.Data == nil {\n\t\t\t\t\tdata = nil\n\t\t\t\t} else {\n\t\t\t\t\tdata = v1.View().Cluster().New(e.Data)\n\t\t\t\t}\n\n\t\t\t\tevent := Event{\n\t\t\t\t\tEntity: \"cluster\",\n\t\t\t\t\tAction: e.Action,\n\t\t\t\t\tName: e.Name,\n\t\t\t\t\tData: data,\n\t\t\t\t}\n\n\t\t\t\tif err = conn.WriteJSON(event); err != nil {\n\t\t\t\t\tlog.Errorf(\"%s:subscribe:> write cluster event to socket error.\", logPrefix)\n\t\t\t\t}\n\t\t\tcase e := <-serviceEvents:\n\n\t\t\t\tvar data interface{}\n\t\t\t\tif e.Data == nil {\n\t\t\t\t\tdata = nil\n\t\t\t\t} else {\n\t\t\t\t\tdata = v1.View().Service().New(e.Data)\n\t\t\t\t}\n\n\t\t\t\tevent := Event{\n\t\t\t\t\tEntity: \"service\",\n\t\t\t\t\tAction: e.Action,\n\t\t\t\t\tName: e.Name,\n\t\t\t\t\tData: data,\n\t\t\t\t}\n\n\t\t\t\tif err = conn.WriteJSON(event); err != nil {\n\t\t\t\t\tlog.Errorf(\"%s:subscribe:> write service event to socket error.\", logPrefix)\n\t\t\t\t}\n\t\t\tcase e := <-namespaceEvents:\n\n\t\t\t\tvar data interface{}\n\t\t\t\tif e.Data == nil {\n\t\t\t\t\tdata = nil\n\t\t\t\t} else {\n\t\t\t\t\tdata = v1.View().Namespace().New(e.Data)\n\t\t\t\t}\n\n\t\t\t\tevent := Event{\n\t\t\t\t\tEntity: \"namespace\",\n\t\t\t\t\tAction: e.Action,\n\t\t\t\t\tName: e.Name,\n\t\t\t\t\tData: data,\n\t\t\t\t}\n\n\t\t\t\tif err = conn.WriteJSON(event); err != nil {\n\t\t\t\t\tlog.Errorf(\"%s:subscribe:> write namespace event to socket error.\", logPrefix)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}()\n\n\tgo cm.Watch(clusterEvents)\n\tgo sm.Watch(serviceEvents, nil)\n\tgo nm.Watch(namespaceEvents)\n\n\tgo func() {\n\t\tfor range ticker.C {\n\t\t\tif err := conn.WriteMessage(websocket.TextMessage, []byte{}); err != nil {\n\t\t\t\tlog.Errorf(\"%s:subscribe:> writing to the client websocket err: %s\", logPrefix, err.Error())\n\t\t\t\tdone <- true\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t}()\n\n\t<-done\n}", "func (s *Socket) placeOrder(p Payload) {\n\tpayload := p.(map[string]interface{})[\"order\"].(map[string]interface{})\n\to := &Order{}\n\to.Decode(payload)\n\n\to.events = s.events\n\tif err := s.server.engine.AddOrder(o); err != nil {\n\t\tlog.Printf(\"Error: Failed processing order: %v\", err)\n\t}\n}", "func (e *Engine) processEvent(in *InEvent) {\n\tif in.Id != nil {\n\t\tnewId := exutil.UUIDtoA(in.Id)\n\t\t_, ok := e.eventMapper[newId]\n\t\tif !ok {\n\t\t\tque := e.eventQue\n\t\t\tif len(e.eventQue) > 300 {\n\t\t\t\tque = e.eventQue[1:]\n\t\t\t\tdelete(e.eventMapper, e.eventQue[0])\n\t\t\t}\n\t\t\tque = append(que, newId)\n\t\t\te.eventQue = que\n\t\t\te.eventMapper[newId] = true\n\t\t} else {\n\t\t\tlog.Errorf(\"in-event %v come again, ignored\", newId)\n\t\t\treturn\n\t\t}\n\t}\n\tlog.Info(\" get order event: \", formatInEventToString(in))\n\n\tvar out *OutEvent\n\tswitch in.GetEvent().(type) {\n\tcase *pb.EngineEventIn_NewOrder:\n\t\tout = e.processNewOrderEvent(in.EventCoord, in.GetNewOrder())\n\tcase *pb.EngineEventIn_UpdateOrder:\n\t\tout = e.processUpdateOrderEvent(in.EventCoord, in.GetUpdateOrder())\n\tcase *pb.EngineEventIn_CancelOrder:\n\t\tout = e.processCancelOrderEvent(in.EventCoord, in.GetCancelOrder())\n\tdefault:\n\t\tvar msg string\n\t\tmsg = fmt.Sprintf(\"Engine event format is unknown, please check module version\")\n\t\tout = newFailEvent(e.Orderbook.Code, in.EventCoord, pb.EngineEventResult_ENG_ARG_INVALID, msg)\n\t\tlog.Errorln(msg)\n\t}\n\te.outEvents <- out\n}", "func (op *Operator) HandleEvents() error {\n\terrorEvents, err := op.Exchange.ListenToErrors()\n\tif err != nil {\n\t\tlogger.Error(err)\n\t\treturn err\n\t}\n\n\tfor {\n\t\tselect {\n\t\tcase event := <-errorEvents:\n\t\t\tlogger.Error(\"Receiving error event\", utils.JSON(event))\n\t\t\tmakerOrderHash := event.MakerOrderHash\n\t\t\ttakerOrderHash := event.TakerOrderHash\n\t\t\terrID := int(event.ErrorId)\n\n\t\t\ttrades, err := op.TradeService.GetByTakerOrderHash(takerOrderHash)\n\t\t\tif err != nil {\n\t\t\t\tlogger.Error(err)\n\t\t\t}\n\n\t\t\tto, err := op.OrderService.GetByHash(takerOrderHash)\n\t\t\tif err != nil {\n\t\t\t\tlogger.Error(err)\n\t\t\t}\n\n\t\t\tmakerOrders, err := op.OrderService.GetByHash(makerOrderHash)\n\t\t\tif err != nil {\n\t\t\t\tlogger.Error(err)\n\t\t\t}\n\n\t\t\tmatches := &types.Matches{\n\t\t\t\tMakerOrders: []*types.Order{makerOrders},\n\t\t\t\tTakerOrder: to,\n\t\t\t\tTrades: trades,\n\t\t\t}\n\n\t\t\tgo op.HandleTxError(matches, errID)\n\t\t}\n\t}\n}", "func orderList(context *router.Context) {\n\tgo func() {\n\n\t\tdata := []shared.SaleOrder{}\n\t\terr := restServer.ReadAll(&data)\n\t\tif err != nil {\n\t\t\tprint(\"REST error\", err.Error())\n\t\t\treturn\n\t\t}\n\n\t\tform := formulate.ListForm{}\n\t\tform.New(\"fa-vcard\", \"Order List\")\n\n\t\t// Define the layout\n\n\t\tform.DateColumn(\"Date\", \"Date\")\n\n\t\t// Add event handlers\n\t\tform.CancelEvent(func(evt dom.Event) {\n\t\t\tevt.PreventDefault()\n\t\t\tSession.Navigate(\"/\")\n\t\t})\n\n\t\tform.NewRowEvent(func(evt dom.Event) {\n\t\t\tevt.PreventDefault()\n\t\t\tSession.Navigate(\"/order/add\")\n\t\t})\n\n\t\tform.RowEvent(func(key string) {\n\t\t\tSession.Navigate(\"/order/\" + key)\n\t\t})\n\n\t\tform.Render(\"order-list\", \".jass-main\", &data)\n\t}()\n}", "func (s *OrderService) handleEngineOrderAdded(res *types.EngineResponse) {\n\to := res.Order\n\n\t// Save notification\n\tnotifications, err := s.notificationDao.Create(&types.Notification{\n\t\tRecipient: o.UserAddress,\n\t\tMessage: types.Message{\n\t\t\tMessageType: \"ORDER_ADDED\",\n\t\t\tDescription: o.Hash.Hex(),\n\t\t},\n\t\tType: types.TypeLog,\n\t\tStatus: types.StatusUnread,\n\t})\n\n\tif err != nil {\n\t\tlogger.Error(err)\n\t}\n\n\tws.SendOrderMessage(\"ORDER_ADDED\", o.UserAddress, o)\n\tws.SendNotificationMessage(\"ORDER_ADDED\", o.UserAddress, notifications)\n\ts.updateOrderPricepoint(o)\n}", "func (w *Worker) processOrder(order orders.Order) {\n\tlog.Println(\n\t\t\"level\", \"INFO\",\n\t\t\"object\", \"workers.worker\",\n\t\t\"method\", \"processOrder\",\n\t\t\"msg\", \"new order to proces\",\n\t\t\"order\", order,\n\t)\n\tw.resultStream <- order.ToRecord()\n}", "func (_EtherDelta *EtherDeltaFilterer) FilterOrder(opts *bind.FilterOpts) (*EtherDeltaOrderIterator, error) {\n\n\tlogs, sub, err := _EtherDelta.contract.FilterLogs(opts, \"Order\")\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &EtherDeltaOrderIterator{contract: _EtherDelta.contract, event: \"Order\", logs: logs, sub: sub}, nil\n}", "func Events(payloadCh <-chan []byte, registryCh chan<- client.RegistryFunc) {\n\tpackets := make(map[uint64]client.RegistryFunc)\n\n\tgo func(payloadCh <-chan []byte, registryCh chan<- client.RegistryFunc) {\n\t\tvar index uint64 = startingIndex\n\n\t\tdefer close(registryCh)\n\n\t\tfor payload := range payloadCh {\n\t\t\tpkt, err := event.Parse(payload)\n\t\t\tif err != nil {\n\t\t\t\t// TODO(tmrts): might try to read the packet sequence no and skip that packet\n\t\t\t\t// to make sure the flow continues.\n\t\t\t\tlog.Debug(fmt.Sprintf(\"event.Parse(%#q) got error %#q\", string(payload), err))\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tseq := pkt.Sequence()\n\t\t\t// Ignores packets with same sequence numbers or\n\t\t\t// lower than current index numbers.\n\t\t\tif _, ok := packets[seq]; !ok && seq >= index {\n\t\t\t\tpackets[seq] = notify.FuncFor(pkt)\n\t\t\t}\n\n\t\t\tfor {\n\t\t\t\tpkt, ok := packets[index]\n\t\t\t\tif !ok {\n\t\t\t\t\tbreak\n\t\t\t\t}\n\n\t\t\t\tregistryCh <- pkt\n\n\t\t\t\t// Evicts used event packets\n\t\t\t\t// NOTE: Bulk delete might increase performance\n\t\t\t\tdelete(packets, index)\n\n\t\t\t\tindex++\n\t\t\t}\n\t\t}\n\n\t\t// Send the remaning events\n\t\tfor {\n\t\t\tpkt, ok := packets[index]\n\t\t\tif !ok {\n\t\t\t\tbreak\n\t\t\t}\n\n\t\t\tregistryCh <- pkt\n\t\t\tindex++\n\t\t}\n\t}(payloadCh, registryCh)\n}", "func (_WyvernExchange *WyvernExchangeFilterer) WatchOrderApprovedPartOne(opts *bind.WatchOpts, sink chan<- *WyvernExchangeOrderApprovedPartOne, hash [][32]byte, maker []common.Address, feeRecipient []common.Address) (event.Subscription, error) {\n\n\tvar hashRule []interface{}\n\tfor _, hashItem := range hash {\n\t\thashRule = append(hashRule, hashItem)\n\t}\n\n\tvar makerRule []interface{}\n\tfor _, makerItem := range maker {\n\t\tmakerRule = append(makerRule, makerItem)\n\t}\n\n\tvar feeRecipientRule []interface{}\n\tfor _, feeRecipientItem := range feeRecipient {\n\t\tfeeRecipientRule = append(feeRecipientRule, feeRecipientItem)\n\t}\n\n\tlogs, sub, err := _WyvernExchange.contract.WatchLogs(opts, \"OrderApprovedPartOne\", hashRule, makerRule, feeRecipientRule)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn event.NewSubscription(func(quit <-chan struct{}) error {\n\t\tdefer sub.Unsubscribe()\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase log := <-logs:\n\t\t\t\t// New log arrived, parse the event and forward to the user\n\t\t\t\tevent := new(WyvernExchangeOrderApprovedPartOne)\n\t\t\t\tif err := _WyvernExchange.contract.UnpackLog(event, \"OrderApprovedPartOne\", log); err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\tevent.Raw = log\n\n\t\t\t\tselect {\n\t\t\t\tcase sink <- event:\n\t\t\t\tcase err := <-sub.Err():\n\t\t\t\t\treturn err\n\t\t\t\tcase <-quit:\n\t\t\t\t\treturn nil\n\t\t\t\t}\n\t\t\tcase err := <-sub.Err():\n\t\t\t\treturn err\n\t\t\tcase <-quit:\n\t\t\t\treturn nil\n\t\t\t}\n\t\t}\n\t}), nil\n}", "func (ch *chain) Order(env *cb.Envelope, configSeq uint64) error {\n\t// e, _ := protoutil.Marshal(env)\n\t// var tmp cb.Envelope\n\t// protoutil.UnmarshalEnvelopeOfType(e,_, tmp)\n\t// go func() error {\n\t// \tselect {\n\t// \tcase ch.sendChan <- &message{\n\t// \t\tconfigSeq: configSeq,\n\t// \t\tnormalMsg: env,\n\t// \t}:\n\t// \t\treturn nil\n\t// \tcase <-ch.exitChan:\n\t// \t\treturn fmt.Errorf(\"Exiting\")\n\t// \t}\n\t// }()\n\t// return nil\n\tselect {\n\tcase ch.sendChan <- &message{\n\t\tconfigSeq: configSeq,\n\t\tnormalMsg: env,\n\t}:\n\t\treturn nil\n\tcase <-ch.exitChan:\n\t\treturn fmt.Errorf(\"Exiting\")\n\t}\n\n}", "func SendOrderNotification(svc interface{}, from, notifyEmail string, order *store.Order) error {\n\t// generate receipt and email info\n\tsubject := fmt.Sprintf(\"New Order! (#%s)\", order.OrderID)\n\ttext := fmt.Sprintf(\"Order #%s received! Price: %0.2f\", order.OrderID, order.OrderTotal)\n\ttmpl, err := s3ops.GetOrderNotificationHtmlTemplate(s3ops.InitSesh())\n\tif err != nil {\n\t\tlog.Printf(\"SendCustomerReceipt failed: %v\", err)\n\t\treturn err\n\t}\n\n\titems := []htmlops.ItemSummary{}\n\tfor _, item := range order.Items {\n\t\tis := htmlops.ItemSummary{\n\t\t\tName: item.Name,\n\t\t\tQuantity: item.Quantity,\n\t\t}\n\t\titems = append(items, is)\n\t}\n\thtmlInput := htmlops.ReceiptTemplateData{\n\t\tOrderID: order.OrderID,\n\t\tSubtotal: order.SalesSubtotal,\n\t\tShipping: order.ShippingCost,\n\t\tSalesTax: order.SalesTax,\n\t\tOrderTotal: order.OrderTotal,\n\t\tFirstName: order.ShippingAddress.FirstName,\n\t\tLastName: order.ShippingAddress.LastName,\n\t\tAddress1: order.ShippingAddress.AddressLine1,\n\t\tAddress2: order.ShippingAddress.AddressLine2,\n\t\tCity: order.ShippingAddress.City,\n\t\tState: order.ShippingAddress.State,\n\t\tZip: order.ShippingAddress.Zip,\n\t\tPhone: order.ShippingAddress.PhoneNumber,\n\t\tItems: items,\n\t}\n\thtml, err := htmlops.CreateHtmlTemplate(tmpl, htmlInput)\n\tif err != nil {\n\t\tlog.Printf(\"SendCustomerReceipt failed: %v\", err)\n\t\treturn err\n\t}\n\n\t// poll for messages with exponential backoff for errors & empty responses\n\tretries := 0\n\tmaxRetries := 4\n\tbackoff := 1000.0\n\tfor {\n\t\t// receive messages from queue\n\t\terr := goses.SendEmail(svc, []string{notifyEmail}, []string{}, from, subject, text, html)\n\t\tif err != nil {\n\t\t\t// retry with backoff if error\n\t\t\tif retries > maxRetries {\n\t\t\t\tlog.Printf(\"SendCustomerReceipt failed: %v -- max retries exceeded\", err)\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tlog.Printf(\"SendCustomerReceipt failed: %v -- retrying...\", err)\n\t\t\ttime.Sleep(time.Duration(backoff) * time.Millisecond)\n\t\t\tbackoff = backoff * 2\n\t\t\tretries++\n\t\t\tcontinue\n\t\t}\n\n\t\treturn nil\n\t}\n}", "func SubscribeToExchangeTickers(exchange string) (dispatch.Pipe, error) {\n\texchange = strings.ToLower(exchange)\n\tservice.RLock()\n\tdefer service.RUnlock()\n\tid, ok := service.Exchange[exchange]\n\tif !ok {\n\t\treturn dispatch.Pipe{}, fmt.Errorf(\"%s exchange tickers not found\",\n\t\t\texchange)\n\t}\n\n\treturn service.mux.Subscribe(id)\n}", "func (wewq *WorkflowEventsWaitQuery) Order(o ...OrderFunc) *WorkflowEventsWaitQuery {\n\twewq.order = append(wewq.order, o...)\n\treturn wewq\n}", "func RegisterOrder(cdc *wire.Codec) {\n\tcdc.RegisterInterface((*types.Order)(nil), nil)\n\tcdc.RegisterConcrete(&types.BaseOrder{}, \"commit-blockchain/Order\", nil)\n}", "func (e *orderEndpoint) handleCancelOrder(ev *types.WebsocketEvent, c *ws.Client) {\n\tbytes, err := json.Marshal(ev.Payload)\n\toc := &types.OrderCancel{}\n\n\terr = oc.UnmarshalJSON(bytes)\n\tif err != nil {\n\t\tlogger.Error(err)\n\t\tc.SendOrderErrorMessage(err, oc.OrderHash)\n\t}\n\n\taddr, err := oc.GetSenderAddress()\n\tif err != nil {\n\t\tlogger.Error(err)\n\t\tc.SendOrderErrorMessage(err, oc.OrderHash)\n\t}\n\n\tws.RegisterOrderConnection(addr, c)\n\n\torderErr := e.orderService.CancelOrder(oc)\n\tif orderErr != nil {\n\t\tlogger.Error(err)\n\t\tc.SendOrderErrorMessage(orderErr, oc.OrderHash)\n\t\treturn\n\t}\n}", "func EventSubscriber(event interface{}) {\n\tbyteData, _ := json.Marshal(&event)\n\tvar message common.Events\n\n\terr := json.Unmarshal(byteData, &message)\n\tif err != nil {\n\t\tl.Log.Error(\"error while unmarshal the event\" + err.Error())\n\t\treturn\n\t}\n\twriteEventToJobQueue(message)\n}", "func (app *Application) Subscribe(store *todo.Store) {\n\tstore.Register(app.subscriber)\n}", "func (app *application) vendorOrders(w http.ResponseWriter, r *http.Request) {\n\tsession, err := app.sessionStore.Get(r, \"session-name\")\n\tif err != nil {\n\t\tapp.serverError(w, err)\n\t\treturn\n\t}\n\tvendorID := session.Values[\"vendorID\"].(int)\n\n\tdeliveries, err := app.deliveries.GetAllByVendorID(vendorID)\n\tif err != nil {\n\t\tapp.serverError(w, err)\n\t\treturn\n\t}\n\n\tapp.render(w, r, \"vendororders.page.tmpl\", &templateData{Deliveries: deliveries})\n}", "func (iwc *IdaxWsConn) SendSubMyOrder() error {\n\treq := WsMyReq{Timestamp: time.Now().UnixNano()}\n\treturn iwc.SendMessage(fmt.Sprintf(WS_MY_ORDER, AddSignToJsonStr(req, iwc.Key, iwc.Secret)))\n}", "func (_EtherDelta *EtherDeltaSession) Orders(arg0 common.Address, arg1 [32]byte) (bool, error) {\n\treturn _EtherDelta.Contract.Orders(&_EtherDelta.CallOpts, arg0, arg1)\n}", "func (w *Watcher) Add(orderInfo *ordervalidator.AcceptedOrderInfo, pinned bool) error {\n\tif err := w.decreaseMaxExpirationTimeIfNeeded(); err != nil {\n\t\treturn err\n\t}\n\n\t// TODO(albrow): technically we should count the current number of orders,\n\t// remove some if needed, and then insert the order in a single transaction to\n\t// ensure that we don't accidentally exceed the maximum. In practice, and\n\t// because of the way OrderWatcher works, the distinction shouldn't matter.\n\ttxn := w.meshDB.Orders.OpenTransaction()\n\tdefer func() {\n\t\t_ = txn.Discard()\n\t}()\n\n\t// Final expiration time check before inserting the order. We might have just\n\t// changed max expiration time above.\n\tif !pinned && orderInfo.SignedOrder.ExpirationTimeSeconds.Cmp(w.maxExpirationTime) == 1 {\n\t\t// HACK(albrow): This is technically not the ideal way to respond to this\n\t\t// situation, but it is a lot easier to implement for the time being. In the\n\t\t// future, we should return an error and then react to that error\n\t\t// differently depending on whether the order was received via RPC or from a\n\t\t// peer. In the former case, we should return an RPC error response\n\t\t// indicating that the order was not in fact added. In the latter case, we\n\t\t// should effectively no-op, neither penalizing the peer or emitting any\n\t\t// order events. For now, we respond by emitting an ADDED event immediately\n\t\t// followed by a STOPPED_WATCHING event. If this order was submitted via\n\t\t// RPC, the RPC client will see a response that indicates the order was\n\t\t// successfully added, and then it will look like we immediately stopped\n\t\t// watching it. This is not too far off from what really happened but is\n\t\t// slightly inefficient.\n\t\taddedEvent := &zeroex.OrderEvent{\n\t\t\tOrderHash: orderInfo.OrderHash,\n\t\t\tSignedOrder: orderInfo.SignedOrder,\n\t\t\tFillableTakerAssetAmount: orderInfo.FillableTakerAssetAmount,\n\t\t\tEndState: zeroex.ESOrderAdded,\n\t\t}\n\t\tstoppedWatchingEvent := &zeroex.OrderEvent{\n\t\t\tOrderHash: orderInfo.OrderHash,\n\t\t\tSignedOrder: orderInfo.SignedOrder,\n\t\t\tFillableTakerAssetAmount: orderInfo.FillableTakerAssetAmount,\n\t\t\tEndState: zeroex.ESStoppedWatching,\n\t\t}\n\t\tw.orderFeed.Send([]*zeroex.OrderEvent{addedEvent, stoppedWatchingEvent})\n\t\treturn nil\n\t}\n\n\torder := &meshdb.Order{\n\t\tHash: orderInfo.OrderHash,\n\t\tSignedOrder: orderInfo.SignedOrder,\n\t\tLastUpdated: time.Now().UTC(),\n\t\tFillableTakerAssetAmount: orderInfo.FillableTakerAssetAmount,\n\t\tIsRemoved: false,\n\t\tIsPinned: pinned,\n\t}\n\terr := txn.Insert(order)\n\tif err != nil {\n\t\tif _, ok := err.(db.AlreadyExistsError); ok {\n\t\t\t// If we're already watching the order, that's fine in this case. Don't\n\t\t\t// return an error.\n\t\t\treturn nil\n\t\t}\n\t\treturn err\n\t}\n\tif err := txn.Commit(); err != nil {\n\t\treturn err\n\t}\n\n\terr = w.setupInMemoryOrderState(orderInfo.SignedOrder)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\torderEvent := &zeroex.OrderEvent{\n\t\tOrderHash: orderInfo.OrderHash,\n\t\tSignedOrder: orderInfo.SignedOrder,\n\t\tFillableTakerAssetAmount: orderInfo.FillableTakerAssetAmount,\n\t\tEndState: zeroex.ESOrderAdded,\n\t}\n\tw.orderFeed.Send([]*zeroex.OrderEvent{orderEvent})\n\n\treturn nil\n}", "func (dc *dexConnection) subscribe(base, quote uint32) (*msgjson.OrderBook, error) {\n\tmkt := marketName(base, quote)\n\t// Subscribe via the 'orderbook' request.\n\tdc.log.Debugf(\"Subscribing to the %v order book for %v\", mkt, dc.acct.host)\n\treq, err := msgjson.NewRequest(dc.NextID(), msgjson.OrderBookRoute, &msgjson.OrderBookSubscription{\n\t\tBase: base,\n\t\tQuote: quote,\n\t})\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"error encoding 'orderbook' request: %w\", err)\n\t}\n\terrChan := make(chan error, 1)\n\tresult := new(msgjson.OrderBook)\n\terr = dc.RequestWithTimeout(req, func(msg *msgjson.Message) {\n\t\terrChan <- msg.UnmarshalResult(result)\n\t}, DefaultResponseTimeout, func() {\n\t\terrChan <- fmt.Errorf(\"timed out waiting for '%s' response\", msgjson.OrderBookRoute)\n\t})\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"error subscribing to %s orderbook: %w\", mkt, err)\n\t}\n\terr = <-errChan\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn result, nil\n}", "func (s *Streamer) eventsRouter() {\n\tdefer s.threads.Done()\n\nrouteEvents:\n\tfor {\n\t\tselect {\n\t\tcase listener := <-s.subscribe:\n\t\t\ts.subscribeListener(listener)\n\t\tcase listener := <-s.unsubscribe:\n\t\t\ts.unsubscribeListener(listener)\n\t\tcase filename, isOpen := <-s.changedFileNames:\n\t\t\tif !isOpen {\n\t\t\t\tbreak routeEvents\n\t\t\t}\n\n\t\t\tif _, exists := s.subscriptions[filename]; !exists || len(s.subscriptions[filename]) == 0 {\n\t\t\t\ts.logger.Printf(\"No listeners subscribed for '%s' file events\", filename)\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tfor toNotify := range s.subscriptions[filename] {\n\t\t\t\tif len(toNotify) < cap(toNotify) {\n\t\t\t\t\ttoNotify <- newDataEvent{}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\n\t// Wait for all subscriptions to be finished\n\tfor len(s.subscriptions) != 0 {\n\t\tselect {\n\t\tcase listener := <-s.subscribe:\n\t\t\ts.subscribeListener(listener)\n\t\tcase listener := <-s.unsubscribe:\n\t\t\ts.unsubscribeListener(listener)\n\t\t}\n\t}\n}", "func (rq *ReceiptQuery) Order(o ...OrderFunc) *ReceiptQuery {\n\trq.order = append(rq.order, o...)\n\treturn rq\n}", "func TestSubscribeConsecutively(t *testing.T) {\n\tmaybeSkipIntegrationTest(t)\n\n\ts, err := NewSession()\n\tif err != nil {\n\t\tt.Fatalf(\"Failed to create session: %v\", err)\n\t}\n\tdefer s.Close()\n\n\tif err := s.Subscribe(\"ike-updown\", \"child-updown\"); err != nil {\n\t\tt.Fatalf(\"Unexpected error subscribing for events: %v\", err)\n\t}\n\n\tif !reflect.DeepEqual(s.el.events, []string{\"ike-updown\", \"child-updown\"}) {\n\t\tt.Fatalf(\"Expected to find ike-updown and child-updown registered, got: %v\", s.el.events)\n\t}\n\n\tif err := s.Subscribe(\"child-updown\", \"log\", \"ike-updown\"); err != nil {\n\t\tt.Fatalf(\"Unexpected error subscribing for additional events: %v\", err)\n\t}\n\n\t// Only the 'log' event should have been added.\n\tif !reflect.DeepEqual(s.el.events, []string{\"ike-updown\", \"child-updown\", \"log\"}) {\n\t\tt.Fatalf(\"Expected to find ike-updown and child-updown registered, got: %v\", s.el.events)\n\t}\n}", "func (ds *Order) notifyWillUpdate(old, new int) {\n\tfor _, obs := range ds.observers {\n\t\tobs.WillUpdate(old, new)\n\t}\n}", "func (cg *CandlesGroup) subscribe() {\n\tfor _, symb := range cg.symbols {\n\t\tmessage := candlesSubsMessage{\n\t\t\tEvent: eventSubscribe,\n\t\t\tChannel: \"candles\",\n\t\t\tKey: \"trade:1m:t\" + strings.ToUpper(symb.OriginalName),\n\t\t}\n\n\t\tif err := cg.wsClient.Write(message); err != nil {\n\t\t\tlog.Printf(\"[BITFINEX] Error subsciring to %v candles\", symb.Name)\n\t\t\tcg.restart()\n\t\t\treturn\n\t\t}\n\t}\n\tlog.Println(\"[BITFINEX] Subscription ok\")\n}", "func TestObserverPattern(t *testing.T) {\n\tobserver1 := Observer{\"1000\"}\n\tobserver2 := Observer{\"2000\"}\n\tobserver3 := Observer{\"3000\"}\n\n\tchannel1 := Channel{\"1981\", make(map[string]Observer)}\n\tchannel2 := Channel{\"2981\", make(map[string]Observer)}\n\n\tchannel1.subscribe(observer1, observer2, observer3)\n\tchannel1.unsubscribe(observer2)\n\tchannel1.sendEvent(Event{channel1.id, \"Event1: Hello Observer pattern world\"})\n\n\tchannel2.subscribe(observer2)\n\tchannel2.sendEvent(\n\t\tEvent{channel2.id, \"Event2: Hello Observer pattern world\"},\n\t\tEvent{channel2.id, \"Event3: Hello again buddy!\"},\n\t\tEvent{channel2.id, \"Event4: Last message promise\"})\n\n}", "func (gc grpcClient) createOrder(order order.Order) error {\n\tconn, err := grpc.Dial(grpcUri, grpc.WithInsecure())\n\tif err != nil {\n\t\tlog.Fatalf(\"Unable to connect: %v\", err)\n\t}\n\tdefer conn.Close()\n\tclient := eventstore.NewEventStoreClient(conn)\n\torderJSON, _ := json.Marshal(order)\n\n\teventid, _ := uuid.NewUUID()\n\tevent := &eventstore.Event{\n\t\tEventId: eventid.String(),\n\t\tEventType: event,\n\t\tAggregateId: order.ID,\n\t\tAggregateType: aggregate,\n\t\tEventData: string(orderJSON),\n\t\tStream: \"ORDERS\",\n\t}\n\n\tcreateEventRequest := &eventstore.CreateEventRequest{Event: event}\n\tresp, err := client.CreateEvent(context.Background(), createEventRequest)\n\tif err != nil {\n\t\tif st, ok := status.FromError(err); ok {\n\t\t\treturn fmt.Errorf(\"error from RPC server with: status code:%s message:%s\", st.Code().String(), st.Message())\n\t\t}\n\t\treturn fmt.Errorf(\"error from RPC server: %w\", err)\n\t}\n\tif resp.IsSuccess {\n\t\treturn nil\n\t}\n\treturn errors.New(\"error from RPC server\")\n}", "func RegisterEventFilterListOrder(svc *graphql.Service) {\n\tsvc.RegisterEnum(_EnumTypeEventFilterListOrderDesc)\n}", "func (r *Repository) Subscribe(_ context.Context, agent *adagio.Agent, events chan<- *adagio.Event, types ...adagio.Event_Type) error {\n\tr.mu.Lock()\n\tdefer r.mu.Unlock()\n\n\tr.agents[agent.Id] = agent\n\n\tfor _, typ := range types {\n\t\tif typ == adagio.Event_NODE_READY {\n\t\t\tfor _, state := range r.runs {\n\t\t\t\tfor _, node := range state.lookup {\n\t\t\t\t\tif node.Status == adagio.Node_READY {\n\t\t\t\t\t\tevents <- &adagio.Event{\n\t\t\t\t\t\t\tRunID: state.run.Id,\n\t\t\t\t\t\t\tNodeSpec: node.Spec,\n\t\t\t\t\t\t\tType: adagio.Event_NODE_READY,\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\tr.listeners[typ] = append(r.listeners[typ], events)\n\t}\n\n\treturn nil\n}", "func (b *EtcdBackend) watchEvents(ctx context.Context) error {\n\n\t// etcd watch client relies on context cancellation for cleanup,\n\t// so create a new subscope for this function.\n\tctx, cancel := context.WithCancel(ctx)\n\tdefer cancel()\n\n\t// wrap fromEvent in a closure compatible with the concurrent queue\n\tworkfn := func(original clientv3.Event) eventResult {\n\t\tvar event backend.Event\n\t\te, err := b.fromEvent(ctx, original)\n\t\tif e != nil {\n\t\t\tevent = *e\n\t\t}\n\t\treturn eventResult{\n\t\t\toriginal: original,\n\t\t\tevent: event,\n\t\t\terr: err,\n\t\t}\n\t}\n\n\t// constants here are a bit arbitrary. the goal is to set up the queue s.t.\n\t// it could handle >100 events per second assuming an avg of .2 seconds of processing\n\t// time per event (as seen in tests of under-provisioned etcd instances).\n\tq := cq.New(\n\t\tworkfn,\n\t\tcq.Workers(24),\n\t\tcq.Capacity(240),\n\t\tcq.InputBuf(120),\n\t\tcq.OutputBuf(48),\n\t)\n\n\t// emitDone signals that the background goroutine used for emitting the processed\n\t// events to the buffer has halted.\n\temitDone := make(chan struct{})\n\n\t// watcher must be registered before we initialize the buffer\n\teventsC := b.clients.Next().Watch(ctx, b.cfg.Key, clientv3.WithPrefix())\n\n\t// set buffer to initialized state.\n\tb.buf.SetInit()\n\n\t// ensure correct cleanup ordering (buffer must not be reset until event emission has halted).\n\tdefer func() {\n\t\tq.Close()\n\t\t<-emitDone\n\t\tb.buf.Reset()\n\t}()\n\n\t// launch background process responsible for event emission.\n\tgo func() {\n\t\tdefer close(emitDone)\n\tEmitEvents:\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase r := <-q.Pop():\n\t\t\t\tif r.err != nil {\n\t\t\t\t\tb.WithError(r.err).Errorf(\"Failed to unmarshal event: %v.\", r.original)\n\t\t\t\t\tcontinue EmitEvents\n\t\t\t\t}\n\t\t\t\tb.buf.Emit(r.event)\n\t\t\tcase <-q.Done():\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t}()\n\n\tvar lastBacklogWarning time.Time\n\tfor {\n\t\tselect {\n\t\tcase e, ok := <-eventsC:\n\t\t\tif e.Canceled || !ok {\n\t\t\t\treturn trace.ConnectionProblem(nil, \"etcd watch channel closed\")\n\t\t\t}\n\n\t\tPushToQueue:\n\t\t\tfor i := range e.Events {\n\t\t\t\teventCount.Inc()\n\n\t\t\t\tevent := *e.Events[i]\n\t\t\t\t// attempt non-blocking push. We allocate a large input buffer for the queue, so this\n\t\t\t\t// aught to succeed reliably.\n\t\t\t\tselect {\n\t\t\t\tcase q.Push() <- event:\n\t\t\t\t\tcontinue PushToQueue\n\t\t\t\tdefault:\n\t\t\t\t}\n\n\t\t\t\teventBackpressure.Inc()\n\n\t\t\t\t// limit backlog warnings to once per minute to prevent log spam.\n\t\t\t\tif now := time.Now(); now.After(lastBacklogWarning.Add(time.Minute)) {\n\t\t\t\t\tb.Warnf(\"Etcd event processing backlog; may result in excess memory usage and stale cluster state.\")\n\t\t\t\t\tlastBacklogWarning = now\n\t\t\t\t}\n\n\t\t\t\t// fallback to blocking push\n\t\t\t\tselect {\n\t\t\t\tcase q.Push() <- event:\n\t\t\t\tcase <-ctx.Done():\n\t\t\t\t\treturn trace.ConnectionProblem(ctx.Err(), \"context is closing\")\n\t\t\t\t}\n\t\t\t}\n\t\tcase <-ctx.Done():\n\t\t\treturn trace.ConnectionProblem(ctx.Err(), \"context is closing\")\n\t\t}\n\t}\n}", "func (s *service) Subscribe(eventName, handler string) error {\n\tdata := struct {\n\t\tToken string `json:\"token\"`\n\t\tEvent string `json:\"event\"`\n\t\tURL string `json:\"url\"`\n\t}{\n\t\tToken: s.token,\n\t\tEvent: eventName,\n\t\tURL: handler,\n\t}\n\tif err := post(s.getServerURL(\"/subscribe\"), &data, nil); err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}", "func (wq *WifiQuery) Order(o ...OrderFunc) *WifiQuery {\n\twq.order = append(wq.order, o...)\n\treturn wq\n}", "func (_EtherDelta *EtherDeltaCallerSession) Orders(arg0 common.Address, arg1 [32]byte) (bool, error) {\n\treturn _EtherDelta.Contract.Orders(&_EtherDelta.CallOpts, arg0, arg1)\n}", "func (e *EventService) Subscribe(topic string, action string, eventHandler func(map[string]interface{}, string, map[string]string)) {\n\tgo func() {\n\t\tfor event := range e.EventChannel {\n\t\t\tif event.Topic == topic && event.Action == action {\n\t\t\t\teventHandler(event.Payload, event.Hash, event.Metadata)\n\t\t\t} else {\n\t\t\t\t// If it could not be processed, put it back in the back of the queue\n\t\t\t\te.EventChannel <- event\n\t\t\t}\n\t\t}\n\t}()\n}", "func updateOrders(orders *def.Orders, externalButtonPress def.Order, elevatorState def.ElevatorState) {\n\tif externalButtonPress.Direction == def.DIR_STOP {\n\t\t/*Detected internal button press*/\n\t\tdistributeInternalOrderToOrderList(externalButtonPress, orders, elevatorState)\n\t}\n\tif CheckForDuplicateOrder(orders, externalButtonPress.Floor) { // TODO: DO NOT REMOVE ORDERS ALONG THE SAME DIRECTION\n\t\tfindAndReplaceOrderIfSameDirection(orders, externalButtonPress, elevatorState.Direction) //TODO\n\t\treturn\n\t}\n\n\tif len(orders.Orders) > 0 { // For safety\n\t\t// Check to see if order should be placed first based on current elevator state\n\t\tif elevatorState.Direction == externalButtonPress.Direction && FloorIsInbetween(orders.Orders[0].Floor, externalButtonPress.Floor, elevatorState.LastFloor, elevatorState.Direction) {\n\t\t\t// Insert Order in first position\n\n\t\t\torders.Orders = append(orders.Orders, def.Order{})\n\t\t\tcopy(orders.Orders[1:], orders.Orders[:])\n\t\t\torders.Orders[0] = externalButtonPress\n\t\t\treturn\n\t\t}\n\n\t}\n\n\tfor i := 1; i < len(orders.Orders); i++ {\n\t\tdirection := orders.Orders[i].Direction\n\t\tif externalButtonPress.Direction == direction { // Elevator is moving in the right direction\n\t\t\tswitch direction {\n\t\t\tcase def.DIR_UP:\n\t\t\t\tif externalButtonPress.Floor < orders.Orders[i].Floor {\n\t\t\t\t\t// Insert Order in position (i)\n\t\t\t\t\torders.Orders = append(orders.Orders, def.Order{})\n\t\t\t\t\tcopy(orders.Orders[i+1:], orders.Orders[i:])\n\t\t\t\t\torders.Orders[i] = externalButtonPress\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\tcase def.DIR_DOWN:\n\t\t\t\tif externalButtonPress.Floor > orders.Orders[i].Floor {\n\t\t\t\t\t// Insert Order in position (i+1)\n\n\t\t\t\t\torders.Orders = append(orders.Orders, def.Order{})\n\t\t\t\t\tcopy(orders.Orders[i+1:], orders.Orders[i:])\n\t\t\t\t\torders.Orders[i] = externalButtonPress\n\t\t\t\t\treturn\n\n\t\t\t\t}\n\t\t\tdefault:\n\t\t\t\tfmt.Println(\"Something weird is up, buddy\")\n\t\t\t}\n\t\t}\n\t}\n\t// Place order at back of orderList\n\torders.Orders = append(orders.Orders, externalButtonPress)\n}", "func (wtq *WorkerTypeQuery) Order(o ...OrderFunc) *WorkerTypeQuery {\n\twtq.order = append(wtq.order, o...)\n\treturn wtq\n}", "func (d *Demo) Subscribe(recv backend.Receiver) {\n\td.Lock()\n\tdefer d.Unlock()\n\n\td.subscriber = recv\n\n\t// Release the lock before running an update.\n\tgo d.updateAll()\n}", "func (siq *SubItemQuery) Order(o ...OrderFunc) *SubItemQuery {\n\tsiq.order = append(siq.order, o...)\n\treturn siq\n}", "func (o *PluginDnsClient) OnRxEvent(event transport.SocketEventType) {}", "func (q *QuotesGroup) subscribe(ch chan schemas.ResultChannel, d time.Duration) {\n\tfor {\n\t\tquotes, err := q.Get()\n\t\tif err != nil {\n\t\t\tch <- schemas.ResultChannel{\n\t\t\t\tData: quotes,\n\t\t\t\tError: err,\n\t\t\t\tDataType: \"s\",\n\t\t\t}\n\t\t}\n\t\tfor _, b := range quotes {\n\t\t\tch <- schemas.ResultChannel{\n\t\t\t\tData: b,\n\t\t\t\tError: err,\n\t\t\t\tDataType: \"s\",\n\t\t\t}\n\t\t}\n\t\ttime.Sleep(d)\n\t}\n}", "func (rrq *ReserveRoomQuery) Order(o ...OrderFunc) *ReserveRoomQuery {\n\trrq.order = append(rrq.order, o...)\n\treturn rrq\n}", "func (bus *Bus) SubscribeToClock(subscriber BusSubscriber) {\n\tbus.clockSubscribers = append(bus.clockSubscribers, subscriber)\n}", "func Example_subscribeOn() {\n\tconcurrent := GoroutineScheduler()\n\n\tsource := FromInt(1, 2, 3, 4, 5).SubscribeOn(concurrent)\n\n\tobserver := func(next int, err error, done bool) {\n\t\tswitch {\n\t\tcase !done:\n\t\t\tfmt.Println(next)\n\t\tcase err != nil:\n\t\t\tfmt.Println(err)\n\t\tdefault:\n\t\t\tfmt.Println(\"complete\")\n\t\t}\n\t}\n\n\tsource.Subscribe(observer).Wait()\n\t//Output:\n\t// 1\n\t// 2\n\t// 3\n\t// 4\n\t// 5\n\t// complete\n}", "func (esq *EventSeverityQuery) Order(o ...OrderFunc) *EventSeverityQuery {\n\tesq.order = append(esq.order, o...)\n\treturn esq\n}", "func (vq *VehicleQuery) Order(o ...OrderFunc) *VehicleQuery {\n\tvq.order = append(vq.order, o...)\n\treturn vq\n}", "func (r *Registrar) onEvents(states []models.State) {\n\t \n\n\tlogp.Debug(\"registrar\", \"Registrar state updates processed. Count: %v\", len(states))\n\n\t// new set of events received -> mark state registry ready for next\n\t// cleanup phase in case gc'able events are stored in the registry.\n\t//r.gcRequired = r.gcEnabled\n}", "func Order(next http.Handler) http.Handler {\r\n\treturn http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\r\n\t\tvar order *types.Order\r\n\r\n\t\tif id := chi.URLParam(r, \"id\"); id != \"\" {\r\n\t\t\tintID, err := strconv.Atoi(id)\r\n\t\t\tif err != nil {\r\n\t\t\t\t_ = render.Render(w, r, types.ErrInvalidRequest(err))\r\n\t\t\t\treturn\r\n\t\t\t}\r\n\t\t\torder = DBClient.GetOrderByID(intID)\r\n\t\t} else {\r\n\t\t\t_ = render.Render(w, r, types.ErrNotFound())\r\n\t\t\treturn\r\n\t\t}\r\n\t\tif order == nil {\r\n\t\t\t_ = render.Render(w, r, types.ErrNotFound())\r\n\t\t\treturn\r\n\t\t}\r\n\r\n\t\tctx := context.WithValue(r.Context(), OrderCtxKey, order)\r\n\t\tnext.ServeHTTP(w, r.WithContext(ctx))\r\n\t})\r\n}", "func (me *TOrderReferences) Walk() (err error) {\n\tif fn := WalkHandlers.TOrderReferences; me != nil {\n\t\tif fn != nil {\n\t\t\tif err = fn(me, true); xsdt.OnWalkError(&err, &WalkErrors, WalkContinueOnError, WalkOnError) {\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t\tif err = me.XsdGoPkgHasElem_OriginatingON.Walk(); xsdt.OnWalkError(&err, &WalkErrors, WalkContinueOnError, WalkOnError) {\n\t\t\treturn\n\t\t}\n\t\tif err = me.XsdGoPkgHasElem_OrderDate.Walk(); xsdt.OnWalkError(&err, &WalkErrors, WalkContinueOnError, WalkOnError) {\n\t\t\treturn\n\t\t}\n\t\tif fn != nil {\n\t\t\tif err = fn(me, false); xsdt.OnWalkError(&err, &WalkErrors, WalkContinueOnError, WalkOnError) {\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t}\n\treturn\n}", "func (es *EventStore) Subscribe(event string, ec EventCallback) {\n\tes.mu.Lock()\n\tdefer es.mu.Unlock()\n\tif es.IsEventExists(event) {\n\t\tes.subscribers[event] = append(es.subscribers[event], ec)\n\t\treturn\n\t}\n\n\tes.subscribers[event] = EventCallbacks{}\n\tes.subscribers[event] = append(es.subscribers[event], ec)\n}", "func (gatewayContext *GatewayContext) updateSubscriberClients() {\n\tif gatewayContext.gateway.Spec.Subscribers == nil {\n\t\treturn\n\t}\n\n\tif gatewayContext.httpSubscribers == nil {\n\t\tgatewayContext.httpSubscribers = make(map[string]cloudevents.Client)\n\t}\n\tif gatewayContext.natsSubscribers == nil {\n\t\tgatewayContext.natsSubscribers = make(map[string]cloudevents.Client)\n\t}\n\n\t// http subscribers\n\tfor _, subscriber := range gatewayContext.gateway.Spec.Subscribers.HTTP {\n\t\tif _, ok := gatewayContext.httpSubscribers[subscriber]; !ok {\n\t\t\tt, err := cloudevents.NewHTTPTransport(\n\t\t\t\tcloudevents.WithTarget(subscriber),\n\t\t\t\tcloudevents.WithEncoding(cloudevents.HTTPBinaryV03),\n\t\t\t)\n\t\t\tif err != nil {\n\t\t\t\tgatewayContext.logger.WithError(err).WithField(\"subscriber\", subscriber).Warnln(\"failed to create a transport\")\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tclient, err := cloudevents.NewClient(t)\n\t\t\tif err != nil {\n\t\t\t\tgatewayContext.logger.WithError(err).WithField(\"subscriber\", subscriber).Warnln(\"failed to create a client\")\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tgatewayContext.logger.WithField(\"subscriber\", subscriber).Infoln(\"added a client for the subscriber\")\n\t\t\tgatewayContext.httpSubscribers[subscriber] = client\n\t\t}\n\t}\n\n\t// nats subscribers\n\tfor _, subscriber := range gatewayContext.gateway.Spec.Subscribers.NATS {\n\t\tif _, ok := gatewayContext.natsSubscribers[subscriber.Name]; !ok {\n\t\t\tt, err := cloudeventsnats.New(subscriber.ServerURL, subscriber.Subject)\n\t\t\tif err != nil {\n\t\t\t\tgatewayContext.logger.WithError(err).WithField(\"subscriber\", subscriber).Warnln(\"failed to create a transport\")\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tclient, err := cloudevents.NewClient(t)\n\t\t\tif err != nil {\n\t\t\t\tgatewayContext.logger.WithError(err).WithField(\"subscriber\", subscriber).Warnln(\"failed to create a client\")\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tgatewayContext.logger.WithField(\"subscriber\", subscriber).Infoln(\"added a client for the subscriber\")\n\t\t\tgatewayContext.natsSubscribers[subscriber.Name] = client\n\t\t}\n\t}\n}", "func (env Env) ListOrders(filter checkout.OrderFilter, page gorest.Pagination) (checkout.CMSOrderList, error) {\n\tdefer env.logger.Sync()\n\tsugar := env.logger.Sugar()\n\n\twhere := filter.SQLWhere()\n\tcountCh := make(chan int64)\n\tlistCh := make(chan checkout.CMSOrderList)\n\n\tgo func() {\n\t\tdefer close(countCh)\n\t\tn, err := env.countOrder(where)\n\t\tif err != nil {\n\t\t\tsugar.Error(err)\n\t\t}\n\n\t\tcountCh <- n\n\t}()\n\n\tgo func() {\n\t\tdefer close(listCh)\n\n\t\torders, err := env.listOrders(where, page)\n\n\t\tlistCh <- checkout.CMSOrderList{\n\t\t\tPagedList: pkg.PagedList{\n\t\t\t\tTotal: 0,\n\t\t\t\tPagination: gorest.Pagination{},\n\t\t\t\tErr: err,\n\t\t\t},\n\t\t\tData: orders,\n\t\t}\n\t}()\n\n\tcount, listResult := <-countCh, <-listCh\n\tif listResult.Err != nil {\n\t\treturn checkout.CMSOrderList{}, listResult.Err\n\t}\n\n\treturn checkout.CMSOrderList{\n\t\tPagedList: pkg.PagedList{\n\t\t\tTotal: count,\n\t\t\tPagination: page,\n\t\t\tErr: nil,\n\t\t},\n\t\tData: listResult.Data,\n\t}, nil\n}", "func (l *EventListener) ListenFlushCartEvents() {\n\tfor msg := range l.FlushCartMessageChan {\n\t\tvar cartMessage flushCartMessage\n\n\t\terr := json.Unmarshal([]byte(msg.Payload), &cartMessage)\n\n\t\tif err == nil && cartMessage.UserID != \"\" {\n\t\t\tl.Repository.FlushCart(cartMessage.UserID)\n\t\t}\n\t}\n}", "func (o *Orderbook) candle_listener() {\n\tfor {\n\t\tselect {\n\t\tcase request := <- o.Candle_in:\n\t\t\tcandle := o.generateCandle(request)\n\t\t\to.Candle_out <- candle.ToString() \t\n\t\t}\n\t}\n}", "func (cq *ConfirmationQuery) Order(o ...OrderFunc) *ConfirmationQuery {\n\tcq.order = append(cq.order, o...)\n\treturn cq\n}", "func WithOrder(order int) EventConfigurator {\n\treturn func(handler EventHandler) {\n\t\th := handler.(*eventHandler)\n\t\th.order = order\n\t}\n}", "func (s *BasePlSqlParserListener) EnterOrder_by_elements(ctx *Order_by_elementsContext) {}", "func (room *RoomRecorder) peopleSubscribe(p PeopleI) {\n\tobserver := synced.NewObserver(\n\t\tsynced.NewPair(room_.ObserverEnter, room.peopleObserverEnter),\n\t\tsynced.NewPair(room_.PlayerEnter, room.peoplePlayerEnter))\n\tp.Observe(observer.AddPublisherCode(room_.UpdatePeople))\n}", "func gumballOrderStatusHandlerAny(formatter *render.Render) http.HandlerFunc {\n\treturn func(w http.ResponseWriter, req *http.Request) {\n\t\t\n\t\tparams := mux.Vars(req)\n\t\tvar uuid string = params[\"id\"]\n\t\tfmt.Println( \"Order Params ID: \", uuid )\n\n\t\tc1 := make(chan order)\n \tc2 := make(chan order)\n \tc3 := make(chan order)\n\n\t\tif uuid == \"\" {\n\t\t\tformatter.JSON(w, http.StatusBadRequest, \"Invalid Request. Order ID Missing.\")\n\t\t} else {\n\n\t\t\tgo getOrderServer1(uuid, c1) \n\t\t\tgo getOrderServer2(uuid, c2) \n\t\t\tgo getOrderServer3(uuid, c3) \n\n\t\t\tvar ord order\n\t\t \tselect {\n\t\t\t case ord = <-c1:\n\t\t\t fmt.Println(\"Received Server1: \", ord)\n\t\t\t case ord = <-c2:\n\t\t\t fmt.Println(\"Received Server2: \", ord)\n\t\t\t case ord = <-c3:\n\t\t\t fmt.Println(\"Received Server3: \", ord)\n\t\t }\n\n\t\t\tif ord == (order{}) {\n\t\t\t\tformatter.JSON(w, http.StatusBadRequest, \"\")\n\t\t\t} else {\n\t\t\t\tfmt.Println( \"Order: \", ord )\n\t\t\t\tformatter.JSON(w, http.StatusOK, ord)\n\t\t\t}\n\t\t}\n\t}\n}", "func (sq *ServerQuery) Order(o ...OrderFunc) *ServerQuery {\n\tsq.order = append(sq.order, o...)\n\treturn sq\n}", "func RegisterEventHandlers(router *message.Router, subscriber message.Subscriber, logger logur.Logger) error {\n\tcommonLogger := commonadapter.NewContextAwareLogger(logger, &correlation.ContextExtractor{})\n\ttodoEventProcessor, _ := cqrs.NewEventProcessor(\n\t\t[]cqrs.EventHandler{\n\t\t\ttododriver.NewMarkedAsDoneEventHandler(todo.NewLogEventHandler(commonLogger)),\n\t\t},\n\t\tfunc(eventName string) string { return todoTopic },\n\t\tfunc(handlerName string) (message.Subscriber, error) { return subscriber, nil },\n\t\tcqrs.JSONMarshaler{GenerateName: cqrs.StructName},\n\t\twatermilllog.New(logur.WithFields(logger, map[string]interface{}{\"component\": \"watermill\"})),\n\t)\n\n\terr := todoEventProcessor.AddHandlersToRouter(router)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}", "func (osq *OfflineSessionQuery) Order(o ...OrderFunc) *OfflineSessionQuery {\n\tosq.order = append(osq.order, o...)\n\treturn osq\n}", "func (e *orderEndpoint) ws(input interface{}, c *ws.Client) {\n\tmsg := &types.WebsocketEvent{}\n\n\tbytes, _ := json.Marshal(input)\n\tif err := json.Unmarshal(bytes, &msg); err != nil {\n\t\tlogger.Error(err)\n\t\tc.SendMessage(ws.OrderChannel, \"ERROR\", err.Error())\n\t}\n\n\tswitch msg.Type {\n\tcase \"NEW_ORDER\":\n\t\te.handleNewOrder(msg, c)\n\tcase \"CANCEL_ORDER\":\n\t\te.handleCancelOrder(msg, c)\n\tdefault:\n\t\tlog.Print(\"Response with error\")\n\t}\n}", "func (ttrq *TradeTimeRangeQuery) Order(o ...OrderFunc) *TradeTimeRangeQuery {\n\tttrq.order = append(ttrq.order, o...)\n\treturn ttrq\n}", "func (room *RoomRecorder) subscribe(builder RBuilderI) {\n\tvar (\n\t\tevents EventsI\n\t\tclient RClientI\n\t)\n\tbuilder.BuildEvents(&events)\n\tbuilder.BuildConnectionEvents(&client)\n\n\troom.eventsSubscribe(events)\n\troom.peopleSubscribe(room.p)\n\troom.connectionSubscribe(client)\n}" ]
[ "0.6924018", "0.6784168", "0.6600295", "0.6304606", "0.59060335", "0.5901376", "0.58783144", "0.58025783", "0.56605494", "0.5610966", "0.55999756", "0.55674213", "0.5540953", "0.55338764", "0.5472549", "0.5441667", "0.53214586", "0.52938116", "0.52710706", "0.5252714", "0.52517825", "0.5237412", "0.51842606", "0.5154717", "0.51445127", "0.5137577", "0.5134831", "0.51058614", "0.5094094", "0.5090225", "0.50682765", "0.506337", "0.5061916", "0.50065684", "0.4993642", "0.49654827", "0.49352875", "0.49179232", "0.4905832", "0.48791227", "0.48589396", "0.48554724", "0.48272544", "0.48177338", "0.4809611", "0.4804934", "0.4801885", "0.47786394", "0.47711766", "0.47652277", "0.4750984", "0.47504684", "0.47384077", "0.47278622", "0.47270906", "0.47249404", "0.472045", "0.4706838", "0.46911594", "0.4690724", "0.4690448", "0.46662202", "0.4664982", "0.46600455", "0.4635513", "0.4634402", "0.46320504", "0.46244845", "0.4620741", "0.46190634", "0.46068102", "0.45960295", "0.45959446", "0.45954308", "0.4582568", "0.45763627", "0.45759314", "0.45664272", "0.45663986", "0.45657936", "0.45637998", "0.45635986", "0.45635113", "0.45465392", "0.45452765", "0.45367867", "0.4534307", "0.453348", "0.45262128", "0.45238683", "0.4522474", "0.45104134", "0.45091304", "0.4508979", "0.45026922", "0.44957012", "0.4490034", "0.44878063", "0.4487026", "0.4484499" ]
0.80939853
0
Close closes the app
func (app *App) Close() { if err := app.node.Close(); err != nil { log.WithField("error", err.Error()).Error("error while closing node") } app.ethWatcher.Stop() if err := app.orderWatcher.Stop(); err != nil { log.WithField("error", err.Error()).Error("error while closing orderWatcher") } app.blockWatcherCancel() app.db.Close() }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (v *App) Close() {\n\tif !v.opened {\n\t\treturn\n\t}\n\n\tlog.Println(\"\\nClosing App...\")\n\n\tlog.Println(\"Destroying font\")\n\tv.nFont.Destroy()\n\tlog.Println(\"Destroying text(s)\")\n\tv.txtSimStatus.Destroy()\n\t// v.dynaTxt.Destroy()\n\tv.txtActiveProperty.Destroy()\n\n\tlog.Println(\"Destroying graphs\")\n\n\tv.spikeGraph.Destroy()\n\tv.expoGraph.Destroy()\n\n\tlog.Println(\"Destroying texture\")\n\terr := v.texture.Destroy()\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\tlog.Println(\"Destroying renderer\")\n\terr = v.renderer.Destroy()\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\t// log.Println(\"Shutting down App\")\n\terr = v.window.Destroy()\n\tsdl.Quit()\n\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n}", "func (a *App) Close() {\n\ta.gui.Close()\n}", "func (a *App) Close() {\n\ta.l.Debug().Msg(\"Closing application\")\n\ta.cancelStateUpdate()\n\ta.gui.Close()\n}", "func Quit() {\n\twindow.Close(true)\n}", "func Close() {\n\tif App.WatchEnabled && !App.CmdMode {\n\t\tchannelExchangeCommands(LevelCritical, command{Action: \"DONE\"})\n\t}\n}", "func (*View) Close() error { return nil }", "func (app *App) Close(hardClean bool) {\n\tif err := app.store.CleanDB(hardClean, db.Things{}); err != nil {\n\t\tlog.Errorf(\"failed to release resources: %s\", err.Error())\n\t}\n\tlog.Info(\"finished successfully\")\n}", "func (as *AppStorage) Close() {}", "func (m *AppManager) Close() {\n\tlog.Info(\"Closing HO App Manager\")\n}", "func (l Launcher) Close() error {\n\treturn nil\n}", "func (v *App) Quit() {\n\tv.running = false\n}", "func (app *DockApp) Quit() {\n\txevent.Quit(app.x)\n}", "func (app *Application) Quit() {\n\t/*\n\t\tif app.Mopidy.Cmd.Process != nil {\n\t\t\tapp.Mopidy.Cmd.Process.Kill()\n\t\t}\n\t*/\n\tapp.Running = false\n}", "func (g *Gui) Close() {\n\tg.screen.Fini()\n}", "func (t *qmlfrontend) Quit() (err error) {\n\t// todo: handle changed files that aren't saved.\n\tfor _, v := range t.windows {\n\t\tif v.window != nil {\n\t\t\tv.window.Hide()\n\t\t\tv.window.Destroy()\n\t\t\tv.window = nil\n\t\t}\n\t}\n\treturn\n}", "func (a *App) Close() {\n\ta.Logger.Info(\"Shutting down sidecar\")\n\n\tdefer a.Meta.Close()\n\tdefer a.Publisher.Close()\n\tdefer a.Blob.Close()\n}", "func endApp() {\n\tfmt.Println(\"Aplikasi Selesai\")\n}", "func (a *Application) Exit() {\r\n\r\n\ta.IWindow.(*window.GlfwWindow).SetShouldClose(true)\r\n}", "func OnExit() {\n\tapp.GetIndicator().Disconnect()\n}", "func (a *App) Close() error {\n\ta.Trace(\"lego.close\", \"Closing immediately!\")\n\ta.disco.Leave(a.appCtx)\n\ta.close()\n\treturn nil\n}", "func (w *MainWindow) Close() {\n\tw.glfwWindow.SetShouldClose(true)\n}", "func closeApp() {\n\tfor _, b := range mapBrush {\n\t\tw32.DeleteObject(w32.HGDIOBJ(b.pen))\n\t\tw32.DeleteObject(w32.HGDIOBJ(b.brush))\n\t}\n\tw32.PostQuitMessage(0)\n}", "func (app *App) Close() {\n\tapp.logger.Info(\"Closing App...\")\n\tif app.node == nil {\n\t\tapp.logger.Info(\"node is nil!\")\n\t} else {\n\t\tapp.node.OnStop()\n\t}\n\tapp.Context.Close()\n}", "func (app *App) Close() {\n\tapp.workerGroup.Close()\n\tapp.sizeCounter.Close()\n}", "func (a *App) Close() error {\n\treturn a.Database.Close()\n}", "func (controller *UIController) Close() {\n}", "func (s *Splasher) Close() (err error) {\n\t// Remove binary\n\tastilog.Debugf(\"Removing %s\", s.binaryPath)\n\tif err = os.Remove(s.binaryPath); err != nil {\n\t\terr = errors.Wrapf(err, \"remove of %s failed\", s.binaryPath)\n\t\treturn\n\t}\n\treturn\n}", "func (app *App) Close() error {\n\tif app.srv != nil {\n\t\terr := app.srv.Shutdown(context.Background())\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tapp.srv = nil\n\t}\n\treturn nil\n}", "func (c *Chrome) Close() error {\n\t// TODO: everything\n\treturn nil\n}", "func endApp() {\n\tfmt.Println(\"Aplikasi Selesai\")\n\tmessage := recover()\n\tif message != nil {\n\t\tfmt.Println(\"Ada error di:\", message)\n\t}\n}", "func QuitGame() {\n\t// Close the termbox\n\ttermbox.Close()\n\n\t// Display message to player\n\tfmt.Println(\"Thanks for playing!!\")\n\n\t// Close program without error\n\tos.Exit(0)\n}", "func (a *AppRPCClient) Close() {\n\ta.App.Client.Close()\n}", "func closeApp(ctx context.Context, tconn *chrome.TestConn, app *wmputils.ResizeApp) error {\n\t// The app \"Play Store\" isn't unified depending on different models.\n\t// Thus, the ID isn't specified in this test.\n\t// Instead, we find the ID from running apps on the shelf.\n\tif app.ID == \"\" {\n\t\t// Finds the currently running app ID with specified name on the shelf.\n\t\tshelfApps, err := ash.ShelfItems(ctx, tconn)\n\t\tif err != nil {\n\t\t\treturn errors.Wrap(err, \"failed to obtain all shelf apps\")\n\t\t}\n\t\tfor _, shelfapp := range shelfApps {\n\t\t\tif shelfapp.Status == ash.ShelfItemRunning {\n\t\t\t\treturn apps.Close(ctx, tconn, shelfapp.AppID)\n\t\t\t}\n\t\t}\n\t\treturn errors.Errorf(\"failed to find app ID of app %s\", app.Name)\n\t}\n\n\treturn apps.Close(ctx, tconn, app.ID)\n}", "func (application *Application) Close() {\n\tlog.Info(\"Bye!\")\n\tapplication.DBSession.Db.Close()\n}", "func (vd *videoDriver) Close() {\n\tdoOnMainThread(func() {\n\t\tvd.renderer.Destroy()\n\t\tvd.window.Destroy()\n\t}, false)\n}", "func Quit() {\n\tscreen.Fini()\n}", "func (gc *GalleryContext) CloseApp() uiauto.Action {\n\treturn func(ctx context.Context) error {\n\t\treturn apps.Close(ctx, gc.tconn, apps.Gallery.ID)\n\t}\n}", "func Quit() {\n\tclose(quit)\n}", "func (vr *VoiceRecorder) Close(ctx context.Context, cr *chrome.Chrome, hasError func() bool, outDir string) error {\n\treturn vr.app.Close(ctx, cr, hasError, outDir)\n}", "func Close() {\n\t//Nothing to do\n}", "func (s *Sun) Close() {\n\ts.open = false\n}", "func (r *Window) Close() {\n\tr.renderer.Close()\n}", "func (p *Program) Quit() {\n\terr := p.frame.Close()\n\tif err != nil {\n\t\tlog.Fatalf(\"%+v\", err)\n\t}\n\tos.Exit(0)\n}", "func (c *ClickHouse) Close() error {\n\treturn nil\n}", "func (p *bytesViewer) Close() error { return nil }", "func shutdownApp() error {\n\treturn nil\n}", "func (d *Driver) Close() {\n\tappiumReq := &appiumRequest{\n\t\t\"DELETE\",\n\t\tnil,\n\t\t\"/wd/hub/session/\" + d.sessionID,\n\t}\n\n\tresp := doAppiumRequest(appiumReq, d.driverClient, \"\")\n\n\tstatusCodeErrorHandler(\n\t\tresp.StatusCode, 500,\n\t\t\"appigo: unable to close session\",\n\t)\n}", "func Close(ctx context.Context, tconn *chrome.TestConn, appID string) error {\n\treturn tconn.Call(ctx, nil, `tast.promisify(chrome.autotestPrivate.closeApp)`, appID)\n}", "func (r *Radio) Close() {}", "func (ws *WindowSurface) Close() {\n\tif !ws.opened {\n\t\treturn\n\t}\n\tvar err error\n\n\tif ws.nFont == nil {\n\t\treturn\n\t}\n\tws.nFont.Destroy()\n\n\tws.txtFPSLabel.Destroy()\n\tws.txtMousePos.Destroy()\n\tws.dynaTxt.Destroy()\n\n\tlog.Println(\"Destroying texture\")\n\terr = ws.texture.Destroy()\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\tlog.Println(\"Destroying renderer\")\n\tws.renderer.Destroy()\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\tlog.Println(\"Destroying window\")\n\terr = ws.window.Destroy()\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\tsdl.Quit()\n\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n}", "func (c *Calculator) Close() error {\n\treturn c.g.Close()\n}", "func (win *Window) Close() {\n\tC.sfRenderWindow_close(win.win)\n\tC.sfRenderWindow_destroy(win.win)\n}", "func endApp() {\n\tfmt.Println(\"end app\")\n}", "func (this *Window) Close() {\n\tC.sfWindow_close(this.cptr)\n}", "func (app *App) Close() error {\n\tclose(app.errors)\n\tclose(app.replies)\n\treturn app.Conn.Close()\n}", "func (ws *WindowSurface) Quit() {\n\tws.running = false\n}", "func (this *MiniCon) Close() {\n\tthis.box.Close()\n}", "func StopApp(pkgname string) {\n\texec.Command(\"adb\", \"shell\", \"am\", \"force-stop\", pkgname).Run()\n}", "func Close(uiView View) {\n\tif !uiView.UIEnabled {\n\t\treturn\n\t}\n\tui.Close()\n}", "func (w *FabricSDKWrapper) Close() {\n\tw.sdk.Close()\n}", "func appStop(glctx gl.Context) {\n\tprintln(\"Exiting\")\n\trunning = false\n\tif currentScene.OnPause != nil {\n\t\tcurrentScene.OnPause(currentScene)\n\t\tsoundsPlayer.Close()\n\t}\n\tglctx.DeleteProgram(program)\n\tglctx.DeleteBuffer(buf)\n\tfps.Release()\n\timages.Release()\n}", "func Quit(g *gocui.Gui, v *gocui.View) error {\n\treturn gocui.ErrQuit\n}", "func (c *CloseProject) Run(w *lime.Window) error {\n\tw.Project().Close()\n\treturn nil\n}", "func Close() {\n}", "func (k *Khaiii) Close() {\n\tif k.firstWord != nil {\n\t\tk.FreeAnalyzeResult()\n\t}\n\tC.khaiii_close(k.handle)\n}", "func (c *ClockHand) Close() {\n\tif c.Stepper != nil {\n\t\tc.Stepper.Close()\n\t}\n\tif c.Input != nil {\n\t\tc.Input.Close()\n\t}\n}", "func (c *connection) Close() {\n\tbaseurl := \"http://fritz.box/webservices/homeautoswitch.lua\"\n\tparameters := make(map[string]string)\n\tparameters[\"sid\"] = c.sid\n\tparameters[\"logout\"] = \"logout\"\n\tUrl := prepareRequest(baseurl, parameters)\n\tsendRequest(Url)\n}", "func (app *App) Stop() {}", "func (app *DockApp) Destroy() {\n\tapp.img.Destroy()\n\tapp.win.Destroy()\n}", "func (s *Slicer) Close() {\n\tglfw.Terminate()\n}", "func (mc *MindControl) Close() {\n\tmc.SerialDevice.Write([]byte{'\\x73'})\n\tif mc.saving {\n\t\tmc.quitSave <- true\n\t}\n\tmc.SerialDevice.Close()\n\tmc.quitDecodeStream <- true\n\tclose(mc.quitSendPackets)\n\tclose(mc.quitGenTest)\n\tclose(mc.shutdown)\n}", "func (i *instance) Close() (err error) {\n\tfor _, rel := range i.relFuncs {\n\t\trel()\n\t}\n\ti.relFuncs = nil\n\tif i.child == nil {\n\t\treturn nil\n\t}\n\tdefer func() {\n\t\ti.child = nil\n\t\terr1 := os.RemoveAll(i.appDir)\n\t\tif err == nil {\n\t\t\terr = err1\n\t\t}\n\t}()\n\n\tif p := i.child.Process; p != nil {\n\t\terrc := make(chan error, 1)\n\t\tgo func() {\n\t\t\terrc <- i.child.Wait()\n\t\t}()\n\n\t\t// Call the quit handler on the admin server.\n\t\tres, err := http.Get(i.adminURL + \"/quit\")\n\t\tif err != nil {\n\t\t\tp.Kill()\n\t\t\treturn fmt.Errorf(\"unable to call /quit handler: %v\", err)\n\t\t}\n\t\tres.Body.Close()\n\n\t\tselect {\n\t\tcase <-time.After(15 * time.Second):\n\t\t\tp.Kill()\n\t\t\treturn errors.New(\"timeout killing child process\")\n\t\tcase err = <-errc:\n\t\t\t// Do nothing.\n\t\t}\n\t}\n\treturn\n}", "func (mv *MultipleView) Close() {\n\tmv.viewManager.removeAll()\n\tgotoLifecycleStageDead(mv.root)\n}", "func (b *ApplicationBuilder) Close() error {\n\treturn b.engine.Close()\n}", "func (t *qmlfrontend) onClose(v *backend.View) {\n\tw2 := t.windows[v.Window()]\n\tfor i := range w2.views {\n\t\tif w2.views[i].bv == v {\n\t\t\tw2.window.ObjectByName(\"tabs\").Call(\"removeTab\", i)\n\t\t\tcopy(w2.views[i:], w2.views[i+1:])\n\t\t\tw2.views = w2.views[:len(w2.views)-1]\n\t\t\treturn\n\t\t}\n\t}\n\tlog.Error(\"Couldn't find closed view...\")\n}", "func (b *Bot) Close() (err error) {\n\t_, err = b.doGet(\"close\")\n\treturn\n}", "func (r *Replayer) Close() error {\n\treturn errors.New(\"implement me\")\n}", "func Close() {\n\tVac.close()\n}", "func Quit(args []string) bool {\n\t// Close the main view\n\tCurView().Quit()\n\treturn false\n}", "func Close() {\n\tif global != nil {\n\t\tglobal.Close()\n\t}\n}", "func CloseDB() {\n\tdb.Close()\n}", "func CloseDB() {\n\tdb.Close()\n}", "func (s *Service) Close(ctx context.Context, e *empty.Empty) (*empty.Empty, error) {\n\tcr := s.sharedObject.Chrome\n\tif cr == nil {\n\t\treturn &empty.Empty{}, errors.New(\"Chrome has not been started\")\n\t}\n\ttconn, err := cr.TestAPIConn(ctx)\n\tif err != nil {\n\t\treturn &empty.Empty{}, errors.Wrap(err, \"failed to create test API connection\")\n\t}\n\tif err := apps.Close(ctx, tconn, apps.Settings.ID); err != nil {\n\t\treturn &empty.Empty{}, errors.Wrap(err, \"failed to close OS Settings\")\n\t}\n\treturn &empty.Empty{}, nil\n}", "func (result VoiceInfo) Close() {\n\tresult.Properties.Close()\n\tC.voice_info_handle_release(result.handle)\n}", "func (m *Main) Close() error {\n\tdefer os.RemoveAll(m.DataDir)\n\treturn m.Main.Close()\n}", "func (canvas *Canvas) Close() error {\n\treturn canvas.contents.Close()\n}", "func CloseDB() {\n\tDb.Close()\n}", "func (f *Frame) Close() error {\n\treturn nil\n}", "func (self *LdapSearchApp) Close() (error){\n\treturn self.ldap.Close()\n}", "func (ps *ProxySettings) Close(ctx context.Context, kb *input.KeyboardEventWriter) {\n\tif ps.isLoggedIn {\n\t\tif err := apps.Close(ctx, ps.tconn, apps.Settings.ID); err != nil {\n\t\t\ttesting.ContextLog(ctx, \"Failed to close Settings app: \", err)\n\t\t}\n\t} else {\n\t\tif err := kb.AccelAction(\"esc\")(ctx); err != nil {\n\t\t\ttesting.ContextLog(ctx, \"Failed to close Settings window: \", err)\n\t\t}\n\t}\n}", "func Quit() {\n\tC.IMG_Quit()\n}", "func (s *Speaker) Close() error { return nil }", "func (b Banai) Close() {\n\tos.RemoveAll(b.TmpDir)\n\tb.Jse.ClearInterrupt()\n\n}", "func (b *Button) Close() error {\n\tif err := b.pin.EndWatch(); err != nil {\n\t\treturn err\n\t}\n\treturn b.pin.Close()\n}", "func (app *App) Destroy() {\n}", "func (m *MainWindow) Close() {\n\t// Save window parameters.\n\tctx.Cfg.Cfg[\"/mainwindow/width\"] = strconv.Itoa(m.window_width)\n\tctx.Cfg.Cfg[\"/mainwindow/height\"] = strconv.Itoa(m.window_height)\n\tctx.Cfg.Cfg[\"/mainwindow/position_x\"] = strconv.Itoa(m.window_pos_x)\n\tctx.Cfg.Cfg[\"/mainwindow/position_y\"] = strconv.Itoa(m.window_pos_y)\n\tctx.Cfg.Cfg[\"/mainwindow/pane_negative_position\"] = strconv.Itoa(m.pane_negative_position)\n\n\t// Saving columns sizes and positions.\n\tall_servers_columns := m.all_servers.GetColumns()\n\tfor i := range all_servers_columns {\n\t\tctx.Cfg.Cfg[\"/mainwindow/all_servers/\"+all_servers_columns[i].GetTitle()+\"_position\"] = strconv.Itoa(i)\n\t\tctx.Cfg.Cfg[\"/mainwindow/all_servers/\"+all_servers_columns[i].GetTitle()+\"_width\"] = strconv.Itoa(all_servers_columns[i].GetWidth())\n\t}\n\tfav_servers_columns := m.fav_servers.GetColumns()\n\tfor i := range fav_servers_columns {\n\t\tctx.Cfg.Cfg[\"/mainwindow/fav_servers/\"+fav_servers_columns[i].GetTitle()+\"_position\"] = strconv.Itoa(i)\n\t\tctx.Cfg.Cfg[\"/mainwindow/fav_servers/\"+fav_servers_columns[i].GetTitle()+\"_width\"] = strconv.Itoa(fav_servers_columns[i].GetWidth())\n\t}\n\n\t// Additional actions should be taken on Windows.\n\tif runtime.GOOS == \"windows\" {\n\t\tm.closeWin()\n\t}\n\n\tctx.Close()\n}", "func CloseCurrentPopup() {\n\timgui.CloseCurrentPopup()\n}", "func (a *API) Close() error {\n\treturn nil\n}", "func (ui *UI) Quit() {\n\tui.HasQuit = true\n}", "func (a *MockApp) Quit() {\n\ta.Called()\n}" ]
[ "0.766603", "0.7470319", "0.7057013", "0.68337286", "0.67127764", "0.6656304", "0.6617461", "0.6609202", "0.65968126", "0.6585093", "0.64652884", "0.64607316", "0.6456707", "0.6399147", "0.6339275", "0.63321006", "0.6327963", "0.62970155", "0.6246347", "0.62412775", "0.62020075", "0.61613744", "0.6137697", "0.6136877", "0.6113086", "0.60740143", "0.6037574", "0.60136706", "0.6010434", "0.6002704", "0.59849364", "0.59821147", "0.59238803", "0.5921833", "0.59100074", "0.59044313", "0.5892694", "0.5883568", "0.58688915", "0.58666044", "0.583387", "0.58316046", "0.58185476", "0.58181083", "0.58167255", "0.5813455", "0.5791058", "0.5789411", "0.57836014", "0.57574654", "0.57457316", "0.57393", "0.5737192", "0.573569", "0.5732295", "0.57085407", "0.56923544", "0.56871974", "0.5669934", "0.56671983", "0.56581885", "0.5652112", "0.5646194", "0.5643869", "0.5642593", "0.56373477", "0.5635216", "0.56263214", "0.5617134", "0.559735", "0.55954605", "0.5592647", "0.55906117", "0.5586137", "0.55824596", "0.557613", "0.5567177", "0.55642366", "0.5562786", "0.55537605", "0.55378723", "0.55378723", "0.5534528", "0.55316883", "0.5526737", "0.55193454", "0.55107677", "0.54909855", "0.5485927", "0.5480052", "0.5478381", "0.54731065", "0.54694957", "0.5467615", "0.5466852", "0.54620165", "0.54618484", "0.54563195", "0.5456317", "0.5454565" ]
0.59153163
34
New returns wrapped go kit logger.
func New(out io.Writer) Logger { l := log.NewJSONLogger(log.NewSyncWriter(out)) l = log.With(l, "ts", log.DefaultTimestampUTC) return &logger{l} }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func New() logr.Logger {\n\treturn klogger{\n\t\tlevel: 0,\n\t\tprefix: \"\",\n\t\tvalues: nil,\n\t}\n}", "func New(format, level string, atomic zap.AtomicLevel) (*Logger, error) {\n\tlogFormat, err := logger.MapFormat(format)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tlogLevel, err := logger.MapLevel(level)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tlog, err := logger.NewWithAtomicLevel(logFormat, atomic)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif err = logger.InitKlog(log, logLevel); err != nil {\n\t\treturn nil, err\n\t}\n\n\t// Redirects logs those are being written using standard logging mechanism to klog\n\t// to avoid logs from controller-runtime being pushed to the standard logs.\n\tklog.CopyStandardLogTo(\"ERROR\")\n\n\treturn &Logger{Logger: log}, nil\n}", "func New(ctx ...interface{}) log15.Logger {\n\treturn log15.Root().New(ctx...)\n}", "func New(ctx ...interface{}) log15.Logger {\n\tl := log15.New(ctx...)\n\tl.SetHandler(defaultHandler)\n\treturn l\n}", "func New(lgr *stdlog.Logger) *Logger {\n\treturn &Logger{\n\t\tlogger: lgr,\n\t\tLevel: log.InfoLevel,\n\t}\n}", "func New(w io.Writer, prefix string) *Logger {\n\treturn &Logger{w: w, prefix: prefix, Level: defaultLogLevel }\n}", "func New(logger *zerolog.Logger) log.Logger {\n\tif logger == nil {\n\t\tlg := zerolog.New(os.Stdout).With().Timestamp().Logger()\n\t\tlogger = &lg\n\t}\n\n\treturn &shim{logger: logger}\n}", "func New() *Logger {\n\tdefaultLogger = &Logger{\n\t\toutput: os.Stdout,\n\t\terrOutput: os.Stderr,\n\t}\n\treturn defaultLogger\n}", "func New(pluginName string) *Logger {\n\tlg := log.New(os.Stdout, \"\", log.Ldate|log.Ltime)\n\tif len(pluginName) == 0 {\n\t\treturn &Logger{logger: lg}\n\t}\n\treturn &Logger{\n\t\tprefix: fmt.Sprintf(\"[%s] \", pluginName),\n\t\tlogger: lg,\n\t}\n}", "func New() *Logger {\n\tconf := zap.NewDevelopmentConfig()\n\tl, err := conf.Build(\n\t\tzap.AddStacktrace(zap.FatalLevel),\n\t)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tz := l.Sugar()\n\treturn &Logger{z}\n}", "func New(handler Handler, flag int) *Logger {\n\tvar l = new(Logger)\n\n\tl.level = LevelInfo\n\tl.handler = handler\n\n\tl.flag = flag\n\n\tl.quit = make(chan struct{})\n\tl.closed = false\n\n\tl.msg = make(chan []byte, 1024)\n\n\tl.bufs = make([][]byte, 0, 16)\n\n\tl.wg.Add(1)\n\tgo l.run()\n\n\treturn l\n}", "func New(config logging.Config) logging.Logger {\n\treturn rem.New(config, writeWithStd)\n}", "func New(adapter Adapter) Logger {\n\treturn &logger{adapter: adapter}\n}", "func New() *Logger {\n\tif log == nil {\n\t\tlog = new(Logger)\n\t\tlog.Logger = logrus.New()\n\n\t\tlog.Formatter = &MyFormatter{}\n\n\t\tswitch strings.ToUpper(strings.TrimSpace(configure.GetString(\"log.level\"))) {\n\t\tcase \"PANIC\":\n\t\t\tlog.Level = logrus.PanicLevel\n\t\tcase \"FATAL\":\n\t\t\tlog.Level = logrus.FatalLevel\n\t\tcase \"ERROR\":\n\t\t\tlog.Level = logrus.ErrorLevel\n\t\tcase \"WARN\", \"WARNING\":\n\t\t\tlog.Level = logrus.WarnLevel\n\t\tcase \"INFO\":\n\t\t\tlog.Level = logrus.InfoLevel\n\t\tcase \"DEBUG\":\n\t\t\tlog.Level = logrus.DebugLevel\n\t\tdefault:\n\t\t\tlog.Level = logrus.DebugLevel\n\t\t}\n\n\t\tlogFile := getLogFile(strings.TrimSpace(configure.GetString(\"log.file\")))\n\t\tlog.Out = logFile\n\n\t}\n\treturn log\n}", "func New(w io.Writer) *Logger {\n\treturn &Logger{w: w}\n}", "func New(info logger.Info) (logger.Logger, error) {\n\tlogDir := removeLogDirOption(info.Config)\n\tif logDir == \"\" {\n\t\tlogDir = defaultLogDir\n\t}\n\tinfo.LogPath = filepath.Join(logDir, info.ContainerID)\n\n\tif err := os.MkdirAll(filepath.Dir(info.LogPath), 0755); err != nil {\n\t\treturn nil, fmt.Errorf(\"error setting up logger dir: %v\", err)\n\t}\n\n\treturn jsonfilelog.New(info)\n}", "func New(config *Config) (Logger, error) {\n\troot = logrus.New()\n\tif err := SetOutput(config.Output); err != nil {\n\t\treturn nil, err\n\t}\n\t// Set level\n\tif err := SetLevel(config.Level); err != nil {\n\t\treturn nil, err\n\t}\n\tconsole := false\n\tswitch config.Output {\n\tcase \"stdout\":\n\t\tconsole = true\n\tcase \"stderr\":\n\t\tconsole = true\n\tcase \"split\":\n\t\tconsole = true\n\t}\n\tif console {\n\t\tSetTextFormatter(config.ConsoleFormat)\n\t} else {\n\t\tSetJSONFormatter()\n\t}\n\t// Add global fields\n\tSetFields(config.Fields)\n\tlogg = &logger{\n\t\tentry: logrus.NewEntry(root),\n\t\tconfig: config,\n\t}\n\treturn logg, nil\n}", "func New(options ...Option) *Logger {\n\n\tres := Logger{\n\t\tnow: time.Now,\n\t\tfatal: func() { os.Exit(1) },\n\t\tstdout: os.Stdout,\n\t\tstderr: os.Stderr,\n\t\tcallerDepth: 0,\n\t\tmapper: nopMapper,\n\t\treTrace: reTraceDefault,\n\t}\n\tfor _, opt := range options {\n\t\topt(&res)\n\t}\n\n\tif res.format != \"\" {\n\t\t// formatter defined\n\t\tvar err error\n\t\tres.templ, err = template.New(\"lgr\").Parse(res.format)\n\t\tif err != nil {\n\t\t\tfmt.Printf(\"invalid template %s, error %v. switched to %s\\n\", res.format, err, Short)\n\t\t\tres.format = Short\n\t\t\tres.templ = template.Must(template.New(\"lgrDefault\").Parse(Short))\n\t\t}\n\n\t\tbuf := bytes.Buffer{}\n\t\tif err = res.templ.Execute(&buf, layout{}); err != nil {\n\t\t\tfmt.Printf(\"failed to execute template %s, error %v. switched to %s\\n\", res.format, err, Short)\n\t\t\tres.format = Short\n\t\t\tres.templ = template.Must(template.New(\"lgrDefault\").Parse(Short))\n\t\t}\n\t}\n\n\t// set *On flags once for optimization on multiple Logf calls\n\tres.callerOn = strings.Contains(res.format, \"{{.Caller\") || res.callerFile || res.callerFunc || res.callerPkg\n\tres.levelBracesOn = strings.Contains(res.format, \"[{{.Level}}]\") || res.levelBraces\n\n\tres.sameStream = isStreamsSame(res.stdout, res.stderr)\n\n\treturn &res\n}", "func New(prefix string, writers ...io.Writer) *log.Logger {\n\treturn log.New(NewWriter(prefix, writers...), \"\", 0)\n}", "func New() logr.Logger {\n\treturn NewWithOptions(Options{})\n}", "func New() logr.Logger {\n\treturn NewWithOptions(Options{})\n}", "func New(logger TestLogger) *Logger {\n\treturn &Logger{logger}\n}", "func New(w io.Writer) *Logger {\n\tout := w\n\tif out == nil {\n\t\tout = os.Stderr\n\t}\n\n\treturn &Logger{\n\t\tcalldepth: 2,\n\t\tlog: log.New(out, \"\", log.LstdFlags|log.Lmicroseconds),\n\t}\n}", "func New(i ILog) *Logger {\n\treturn &Logger{\n\t\tout: i,\n\t}\n}", "func New() Logger {\n\treturn Logger{\n\t\tout: os.Stdout,\n\t\terr: os.Stderr,\n\t}\n}", "func New(filename string) *Logger {\n\tl := lumberjack.Logger{\n\t\tFilename: filename,\n\t\tMaxSize: 500,\n\t\tMaxBackups: 3,\n\t\tMaxAge: 30,\n\t\tCompress: true,\n\t}\n\n\treturn &Logger{\n\t\tLogger: l,\n\t}\n}", "func NewLogger(env string, loggerName string, minLevel AtomicLevelName) (Logger, error) {\n\tdt := utils.DateTime{}\n\tdtNow := dt.Now()\n\n\tkitLogger, err := buildLogger(dtNow)\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tkitLogger = gokitLogger.With(kitLogger,\n\t\t\"timestamp\", gokitLogger.DefaultTimestampUTC,\n\t\t\"caller\", gokitLogger.Caller(8),\n\t\t\"process\", utils.ProcessName(),\n\t\t\"loggerName\", loggerName,\n\t\t\"env\", env,\n\t)\n\tkitLogger = level.NewFilter(kitLogger, toLevelOption(minLevel))\n\treturn &logger{\n\t\tkitLogger: kitLogger,\n\t\tenv: env,\n\t\tloggerName: loggerName,\n\t\tminLevel: minLevel,\n\t\tdateUpdated: dtNow,\n\t}, nil\n}", "func New() Logger {\n\treturn &logger{\n\t\tconfig: LevelDisabled,\n\t\tctx: nil,\n\t\tformatter: NewStringFormatter(StringFormatterParams{}),\n\t\tlevel: LevelUnknown,\n\t\tnamespace: \"\",\n\t\twriter: os.Stderr,\n\t}\n}", "func New(f *os.File, level gol.Level) logging.Logger {\n\t// Leveled formatted file backend.\n\tbackend := gol.AddModuleLevel(\n\t\tgol.NewBackendFormatter(\n\t\t\tgol.NewLogBackend(f, \"\", 0),\n\t\t\tgol.MustStringFormatter(fmt)))\n\tbackend.SetLevel(level, \"\")\n\tlogger := gol.MustGetLogger(\"\")\n\tlogger.SetBackend(backend)\n\treturn Wrap(logger)\n}", "func New(config *Configuration) (Logger, error) {\n\tlogger, err := newZapLogger(config)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tlog = logger\n\treturn log, nil\n}", "func New(dir, name string) (lp *Logger, err error) {\n\tvar l Logger\n\tl.dir = dir\n\tl.name = name\n\n\t// Set initial logger file\n\tif err = l.setFile(); err != nil {\n\t\treturn\n\t}\n\n\t// Assign lp as a pointer to our created logger\n\tlp = &l\n\treturn\n}", "func New(counter metrics.Counter, latency metrics.Histogram, logger log.Logger) Logger {\n\treturn Logger{\n\t\tcallUpdate: make(chan interface{}),\n\t\tcallError: make(chan error),\n\t\trequestCount: counter,\n\t\trequestLatency: latency,\n\t\tlogger: logger,\n\t}\n}", "func New() *Logger {\n\treturn &Logger{\n\t\tStdout: os.Stdout,\n\t\tStderr: os.Stderr,\n\t\tVerbosity: VerbosityLevelNormal,\n\t}\n}", "func New(level Level, output io.Writer, fields ...Field) *Logger {\n\tl := new(Logger)\n\tl.level = level\n\tl.output = output\n\tl.encoder = NewEncoderText(EncoderTextConfig{\n\t\tSeparator: defaultTextSeparator,\n\t})\n\tl.hooks = newLevelHooks()\n\tl.exit = os.Exit\n\n\tl.setCalldepth(calldepth)\n\tl.SetFields(fields...)\n\tl.SetFlags(LstdFlags)\n\n\treturn l\n}", "func New(name string, base logrus.Level, level []logrus.Level, dev bool) (*Logger, error) {\n\t// If Logger had been created, return nil.\n\tif Has(name) {\n\t\treturn nil, errors.New(\"Name cannot be duplicated\")\n\t}\n\n\t// Create logger.\n\tlogger := &Logger{Logger: logrus.New()}\n\n\t// Create log file in temp folder.\n\tif logFile, err := ioutil.TempFile(\"\", name+\".*.log\"); err == nil {\n\t\tlogger.Path = logFile.Name()\n\t} else {\n\t\treturn nil, errors.New(\"Cannot create log file\")\n\t}\n\n\t// Enable color logging in Windows console.\n\tlogger.Formatter = &logrus.TextFormatter{ForceColors: true}\n\tlogger.SetOutput(colorable.NewColorableStdout())\n\n\t// Update logger config.\n\tlogger.Config(base, level, dev)\n\n\t// Store logger.\n\tloggers[name] = logger\n\n\treturn logger, nil\n}", "func NewKitLogger(opts ...lvl.Option) Logger {\n\tlogger := log.NewLogfmtLogger(log.NewSyncWriter(os.Stdout))\n\tfor _, opt := range opts {\n\t\tlogger = lvl.NewFilter(logger, opt)\n\t}\n\ttimestamp := log.TimestampFormat(time.Now, time.RFC1123)\n\tlogger = log.With(logger, \"ts\", timestamp)\n\tlogger = log.With(logger, \"call\", log.Caller(8))\n\treturn NewKitLoggerFrom(logger)\n}", "func newLogger() services.Logger {\n\treturn &services.SystemOutLogger{}\n}", "func New(fullPath string, prefix string, flag int, level int, model int) *Logger {\n\tvar file *os.File\n\tif model&Lfile != 0 {\n\t\tvar err error\n\t\tif file, err = os.OpenFile(fullPath, os.O_CREATE|os.O_WRONLY|os.O_APPEND, 0666); err != nil {\n\t\t\tfmt.Println(\"打开日志文件失败(开启屏幕打印):\", err)\n\t\t\tmodel = Lscreen\n\t\t}\n\t}\n\n\tvar out io.Writer\n\tif model&Lscreen != 0 && model&Lfile != 0 {\n\t\tout = io.MultiWriter(os.Stdout, file)\n\t} else if model&Lscreen != 0 {\n\t\tout = os.Stdout\n\t} else if model&Lfile != 0 {\n\t\tout = file\n\t}\n\n\treturn &Logger{\n\t\tprefix: prefix,\n\t\tflag: flag,\n\t\tout: out,\n\t\tlevel: level,\n\t\tmodel: model,\n\t\tfile: file,\n\t}\n}", "func New(w io.Writer, levels uint8) *Logger {\n\tif w == nil {\n\t\tw = ioutil.Discard\n\t}\n\n\treturn &Logger{\n\t\tw: w,\n\t\tlevels: levels,\n\t\tExitFn: os.Exit,\n\t}\n}", "func NewLogger(level int) Logger {\n\tvar opt lvl.Option\n\tswitch level {\n\tcase LogNone:\n\t\topt = lvl.AllowNone()\n\tcase LogInfo:\n\t\topt = lvl.AllowInfo()\n\tcase LogDebug:\n\t\topt = lvl.AllowDebug()\n\tdefault:\n\t\tpanic(\"unknown log level\")\n\t}\n\treturn NewKitLogger(opt)\n}", "func New(config Config) Logger {\n\tlg := &logger{\n\t\tencoder: config.Encoder,\n\t\tfilters: config.Filters,\n\t\tthreshold: config.Threshold,\n\t}\n\tif len(lg.filters) == 0 {\n\t\tlg.filters = []Filter{DefaultFilter}\n\t}\n\tif config.Encoder == nil {\n\t\tlg.encoder = DefaultEncoder\n\t}\n\treturn lg\n}", "func New(l logrus.FieldLogger) ctxlog.Logger {\n\tif l == nil {\n\t\tl = logrus.New()\n\t}\n\n\treturn logrusAdapter{l}\n}", "func New() Logger {\n\n\tzerolog.TimeFieldFormat = zerolog.TimeFormatUnix\n\n\tlogLevel := zerolog.InfoLevel\n\n\tzerolog.SetGlobalLevel(logLevel)\n\n\tlogger := zerolog.New(os.Stdout).With().Timestamp().Logger()\n\n\treturn Logger{logger: &logger}\n}", "func New() {\n\tzapLogger, _ := zap.NewProduction()\n\tLog = Logger{\n\t\tinternalLogger: zapLogger.Sugar(),\n\t}\n}", "func New(output io.Writer, level LogLevel) *Logger {\n\tvar (\n\t\tsystemdInvocation bool\n\t\tawsLogGroup bool\n\t\tflags int\n\t)\n\t// Detect special compatibility modes\n\t_, systemdInvocation = os.LookupEnv(\"INVOCATION_ID\")\n\t_, awsLogGroup = os.LookupEnv(\"AWS_LAMBDA_LOG_GROUP_NAME\")\n\t// Prepare configuration accordingly\n\tif systemdInvocation && output != os.Stdout && output != os.Stderr {\n\t\t// launched by systemd but logger is not being redirected to std output (may be to a file ?)\n\t\t// disabling journald compat mode\n\t\tsystemdInvocation = false\n\t}\n\tif !systemdInvocation && !awsLogGroup {\n\t\tflags = log.Ltime | log.Ldate\n\t}\n\t// Return the initialized logger\n\treturn &Logger{\n\t\tjournald: systemdInvocation,\n\t\tllevel: level,\n\t\tlogger: log.New(output, \"\", flags),\n\t}\n}", "func NewLogger(client *Client, mode int, ttl time.Duration) *Logger {\n\tl := &Logger{\n\t\tclient: client,\n\t\tqueue: make(chan LogEntry, 1024),\n\t\tnow: time.Now,\n\t\tmode: mode,\n\t}\n\tif mode != LogDiscard {\n\t\tgo l.readQueue(ttl)\n\t}\n\treturn l\n}", "func newLogger() *ServiceLogger {\n\tLogger := log.New()\n\tvar serviceLogger ServiceLogger\n\t// Log as JSON instead of the default ASCII formatter.\n\tLogger.SetFormatter(&log.JSONFormatter{})\n\n\t// Output to stdout instead of the default stderr\n\tLogger.SetOutput(os.Stdout)\n\n\t// Only log the warning severity or above.\n\tLogger.SetLevel(log.InfoLevel)\n\n\tserviceLogger.Logger = Logger\n\n\treturn &serviceLogger\n}", "func New(opts ...NewFuncOption) {\n\tlogLevel := zerolog.InfoLevel\n\tzerolog.SetGlobalLevel(logLevel)\n\tlogger := zerolog.New(os.Stdout).With().Timestamp().Logger()\n\tfor _, o := range opts {\n\t\tlogger = o(&logger)\n\t}\n\n\tLogger = logger\n}", "func New(t testing.TB) lg.Log {\n\treturn NewWith(t, FactoryFn)\n}", "func New() *Logger {\n\n\tbaseLogrus := logrus.New()\n\n\tvar logger = &Logger{baseLogrus}\n\n\tf, err := os.OpenFile(\"dummy-api.log\", os.O_CREATE|os.O_WRONLY, 0666)\n\tif err != nil {\n\t\tlog.Fatalf(\"unable to interact with log file: %s\", err)\n\t}\n\n\tlogger.SetFormatter(&logrus.JSONFormatter{\n\t\tTimestampFormat: \"02-01-2006 15:04:05\", // DD-MM-YYYY HH:MM:SS\n\n\t})\n\n\toutputs := io.MultiWriter(os.Stderr, f) // Write to both standard error and the log file.\n\tlogger.Out = outputs\n\n\treturn logger\n\n}", "func (alog *AppLogger) New(requestID string) Logger {\n\t// shortcut\n\tif alog.requestID == requestID {\n\t\treturn alog\n\t}\n\n\tlg := alog.pool.Get()\n\tif nlog, ok := lg.(*AppLogger); ok {\n\t\tnlog.requestID = requestID\n\t\tnlog.SetTags(requestID)\n\n\t\treturn nlog\n\t}\n\n\treturn lg.(Logger).New(requestID)\n}", "func New(opts ...Option) log.Logger {\n\tl := new(logger)\n\tfor _, opt := range append(defaultOptions, opts...) {\n\t\topt(l)\n\t}\n\n\treturn l.setLevelMode(l.level).\n\t\tsetLogFormat(l.format)\n}", "func New(opts ...Option) log.Logger {\n\tl := new(logger)\n\tfor _, opt := range append(defaultOptions, opts...) {\n\t\topt(l)\n\t}\n\n\treturn l.setLevelMode(l.level).\n\t\tsetLogFormat(l.format)\n}", "func New(l *logrus.Logger) loggers.Logger {\n\treturn &Logrus{l}\n}", "func NewLogger(t mockConstructorTestingTNewLogger) *Logger {\n\tmock := &Logger{}\n\tmock.Mock.Test(t)\n\n\tt.Cleanup(func() { mock.AssertExpectations(t) })\n\n\treturn mock\n}", "func NewLogger(level string) *Logger {\n\tlog := kitlog.NewJSONLogger(kitlog.NewSyncWriter(os.Stdout))\n\tswitch level {\n\tcase \"error\":\n\t\tlog = kitlevel.NewFilter(log, kitlevel.AllowError()) // only error logs\n\tcase \"warn\":\n\t\tlog = kitlevel.NewFilter(log, kitlevel.AllowWarn()) // warn + error logs\n\tcase \"info\":\n\t\tlog = kitlevel.NewFilter(log, kitlevel.AllowInfo()) // info + warn + error logs\n\tcase \"debug\":\n\t\tlog = kitlevel.NewFilter(log, kitlevel.AllowDebug()) // all logs\n\tdefault:\n\t\tlog = kitlevel.NewFilter(log, kitlevel.AllowNone()) // no logs\n\t}\n\n\tlog = kitlog.With(log, \"service\", \"vent\")\n\tlog = kitlog.With(log, \"ts\", kitlog.DefaultTimestampUTC)\n\tlog = kitlog.With(log, \"caller\", kitlog.Caller(4))\n\n\treturn &Logger{\n\t\tLog: log,\n\t}\n}", "func New(logFile string, prefix string) (*Logger, error) {\n\tlogger := new(Logger)\n\tvar err error\n\tlogger.file, err = os.OpenFile(logFile, os.O_RDWR|os.O_APPEND|os.O_CREATE, 0600)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tlogger.logger.SetOutput(logger.file)\n\tlogger.logger.SetPrefix(prefix)\n\tlogger.logger.SetFlags(log.LstdFlags)\n\treturn logger, nil\n}", "func New(opts ...Options) *Logger {\n\tvar o Options\n\tif len(opts) == 0 {\n\t\to = Options{}\n\t} else {\n\t\to = opts[0]\n\t}\n\n\t// Determine prefix.\n\tprefix := o.Prefix\n\tif len(prefix) > 0 && o.DisableAutoBrackets == false {\n\t\tprefix = \"[\" + prefix + \"] \"\n\t}\n\n\t// Determine output writer.\n\tvar output io.Writer\n\tif o.Out != nil {\n\t\toutput = o.Out\n\t} else {\n\t\t// Default is stdout.\n\t\toutput = os.Stdout\n\t}\n\n\t// Determine output flags.\n\tflags := log.LstdFlags\n\tif o.OutputFlags == -1 {\n\t\tflags = 0\n\t} else if o.OutputFlags != 0 {\n\t\tflags = o.OutputFlags\n\t}\n\n\treturn &Logger{\n\t\tLogger: log.New(output, prefix, flags),\n\t\topt: o,\n\t}\n}", "func NewLogger(keptnContext string, eventID string, serviceName string) *Logger {\n\treturn &Logger{\n\t\tKeptnContext: keptnContext,\n\t\tEventID: eventID,\n\t\tServiceName: serviceName,\n\t}\n}", "func New(channelID string, session *discordgo.Session) *Logger {\n\n\tl := &Logger{\n\t\tChannelID: channelID,\n\t\tLogDeletes: false,\n\t\tLogEdits: false,\n\t\tLogImages: false,\n\t}\n\n\treturn l\n}", "func New(out io.Writer, level LogLevel, useLocalTime bool) *Logger {\n\tif out == nil {\n\t\treturn nil\n\t}\n\n\tswitch level {\n\tcase Fine, Trace, Debug, Info, Warn, Error:\n\tcase Off:\n\t\treturn nil\n\tdefault:\n\t\treturn nil\n\t}\n\n\tvar tz string\n\tflag := log.LstdFlags | log.Lmicroseconds\n\tif !useLocalTime {\n\t\tflag |= log.LUTC\n\t\ttz = \"UTC \"\n\t}\n\n\treturn &Logger{\n\t\tlevel: level,\n\t\tlogger: log.New(out, \"\", flag),\n\t\ttimezone: tz,\n\t}\n}", "func New(core Core, options ...Option) *Logger {\n\tif core == nil {\n\t\tcore = NewNopCore()\n\t}\n\n\tlog := &Logger{\n\t\tcore: core,\n\t}\n\n\tfor _, opt := range options {\n\t\topt.apply(log)\n\t}\n\treturn log\n}", "func New(service string) (*Logger, error) {\n\n\tvar logger *log.Logger\n\tvar file *os.File\n\n\tif service != \"\" {\n\t\tfilename := fmt.Sprintf(\"%s_%s.log\", service, time.Now().Format(\"2006_01_02_15_04_05\"))\n\t\tf, err := os.Create(filename)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tlogger = log.New(f, \"\", log.LstdFlags)\n\t\tfile = f\n\t} else {\n\t\tlogger = log.New(os.Stdout, \"\", log.LstdFlags)\n\t}\n\n\tl := new(Logger)\n\tl.logger = logger\n\tl.file = file\n\n\treturn l, nil\n}", "func New(config log.Config, pctx core.PeerContext) (*Logger, error) {\n\thostname, err := os.Hostname()\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"hostname: %s\", err)\n\t}\n\n\tlogger, err := log.New(config, map[string]interface{}{\n\t\t\"hostname\": hostname,\n\t\t\"zone\": pctx.Zone,\n\t\t\"cluster\": pctx.Cluster,\n\t\t\"peer_id\": pctx.PeerID.String(),\n\t})\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"config: %s\", err)\n\t}\n\treturn &Logger{logger}, nil\n}", "func New() *log.Logger {\n\treturn log.New(os.Stdout, \"\", log.Lshortfile)\n}", "func NewLogger() Logger {\n\tdepth := 6\n\treturn new(&depth)\n}", "func New(configurations ...func(*Logger)) *Logger {\n\t// default log config\n\tlogger := Logger{\n\t\tlogCritical: log.New(os.Stderr, fmt.Sprintf(\"%-9s\", LevelCritical), DefaultLogFlags),\n\t\tlogError: log.New(os.Stderr, fmt.Sprintf(\"%-9s\", LevelError), DefaultLogFlags),\n\t\tlogWarning: log.New(os.Stdout, fmt.Sprintf(\"%-9s\", LevelWarning), DefaultLogFlags),\n\t\tlogNotice: log.New(os.Stdout, fmt.Sprintf(\"%-9s\", LevelNotice), DefaultLogFlags),\n\t\tlogInfo: log.New(os.Stdout, fmt.Sprintf(\"%-9s\", LevelInfo), DefaultLogFlags),\n\t\tlogDebug: log.New(os.Stdout, fmt.Sprintf(\"%-9s\", LevelDebug), DefaultLogFlags),\n\t\tlogTrace: log.New(os.Stdout, fmt.Sprintf(\"%-9s\", LevelTrace), DefaultLogFlags),\n\t\tlevel: Info,\n\t}\n\n\t// now customize logger\n\tfor _, config := range configurations {\n\t\tconfig(&logger)\n\t}\n\n\treturn &logger\n}", "func NewLogger(name string, t *testing.T) *Logger {\n\tlogger := &Logger{\n\t\tstd: t,\n\t\tname: name,\n\t}\n\n\treturn logger\n}", "func New(progressAction string, logger slog.Logger) *Logger {\n\treturn &Logger{\n\t\tlastLogTime: time.Now(),\n\t\tprogressAction: progressAction,\n\t\tsubsystemLogger: logger,\n\t}\n}", "func New() Logger {\n\treturn NewWithWriter(os.Stdout)\n}", "func New(logger *logger.Logger) *Log {\n\treturn &Log{write: logger}\n}", "func New(options ...Option) (*Logger, error) {\n\tl := &Logger{\n\t\tFormatter: NewTextOutputFormatter(),\n\t\tOutput: NopCloserWriter{NewInterlockedWriter(os.Stdout)},\n\t\tRecoverPanics: DefaultRecoverPanics,\n\t\tFlags: NewFlags(DefaultFlags...),\n\t\tWritable: FlagsAll(),\n\t\tScopes: ScopesAll(),\n\t\tWritableScopes: ScopesAll(),\n\t}\n\n\tl.Scope = NewScope(l)\n\tvar err error\n\tfor _, option := range options {\n\t\tif err = option(l); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\treturn l, nil\n}", "func New(opts ...Option) *Logger {\n\tconfig := &Config{}\n\tfor _, o := range opts {\n\t\to(config)\n\t}\n\n\tresult := &Logger{\n\t\tconfig: config,\n\t}\n\n\tzapOptions := []zap.Option{\n\t\tzap.AddCaller(),\n\t\tzap.AddCallerSkip(1),\n\t}\n\n\tif config.zapConfig != nil {\n\t\tresult.logger, _ = config.zapConfig.Build(zapOptions...)\n\t} else {\n\t\tresult.logger, _ = zap.NewProduction(zapOptions...)\n\t}\n\n\treturn result\n}", "func New() *Logger {\n\treturn &Logger{\n\t\t&logrus.Logger{\n\t\t\tOut: os.Stderr,\n\t\t\tLevel: logrus.ErrorLevel,\n\t\t\tHooks: logrus.LevelHooks{},\n\t\t\tFormatter: &TextFormatter{\n\t\t\t\tIgnoreFields: []string{\"ctx\"},\n\t\t\t\tTimestampFormat: time.RFC3339,\n\t\t\t},\n\t\t},\n\t}\n}", "func New() *logrus.Logger {\n\treturn logrus.New()\n}", "func newLogger() logger {\n\tleKey := os.Getenv(\"LOG_ENTRIES_KEY\")\n\tfmtFallback := &fmtLogger{}\n\n\tif leKey == \"\" {\n\t\treturn fmtFallback\n\t}\n\n\tle, err := le_go.Connect(leKey)\n\n\tif err != nil {\n\t\treturn fmtFallback\n\t}\n\n\tdefer le.Close()\n\n\treturn le\n}", "func New(filename string, prefix string, flag int) *Logger {\n\treturn NewEx(filename, prefix, flag, SPLIT_FILE_SIZE, TOTAL_ROTATE_SPLIT)\n}", "func New(lev Level, lis Listener, layout string) *Logger {\n\tlogger := &Logger{\n\t\tlev: lev,\n\t\tlis: []Listener{lis},\n\t}\n\tlogger.SetLayout(layout)\n\treturn logger\n}", "func newCMLogger(name string, chainId string, logger *zap.SugaredLogger, logLevel log.LOG_LEVEL) *CMLogger {\n\treturn &CMLogger{name: name, chainId: chainId, SugaredLogger: logger, logLevel: logLevel}\n}", "func New(opts *Options) *Logger {\n\tif opts == nil {\n\t\topts = NewOptions()\n\t}\n\n\tvar zapLevel zapcore.Level\n\tif err := zapLevel.UnmarshalText([]byte(opts.Level)); err != nil {\n\t\tzapLevel = zapcore.InfoLevel\n\t}\n\tcore := zapcore.NewCore(getEncoder(opts), getWriteSyncer(opts), zapLevel)\n\n\tlog := zap.New(core, getZapOptions(opts)...)\n\n\tlogger := &Logger{\n\t\tlog: log,\n\t}\n\n\treturn logger\n}", "func NewLogger(l logger.Logger) Logger {\n\treturn func(next http.Handler) http.Handler {\n\t\tfn := func(w http.ResponseWriter, r *http.Request) {\n\t\t\tww := cw.NewWrapResponseWriter(w, r.ProtoMajor)\n\t\t\tt1 := time.Now()\n\t\t\tdefer func() {\n\t\t\t\tmethod := r.Method\n\t\t\t\turl := r.URL\n\t\t\t\trequestID, _ := r.Context().Value(cw.RequestIDKey).(string)\n\t\t\t\tl.Info(\n\t\t\t\t\tfmt.Sprintf(\"%s %s %s\", method, r.URL, r.Proto),\n\t\t\t\t\tzap.Int(\"status\", ww.Status()),\n\t\t\t\t\tzap.String(\"url\", url.String()),\n\t\t\t\t\tzap.String(\"proto\", r.Proto),\n\t\t\t\t\tzap.String(\"ip\", r.RemoteAddr),\n\t\t\t\t\tzap.Int(\"byte\", ww.BytesWritten()),\n\t\t\t\t\tzap.Duration(\"latency\", time.Since(t1)),\n\t\t\t\t\tzap.String(\"reqID\", requestID),\n\t\t\t\t)\n\t\t\t}()\n\t\t\tnext.ServeHTTP(ww, r)\n\t\t}\n\t\treturn http.HandlerFunc(fn)\n\t}\n}", "func NewLogger(kitlogger log.Logger, options ...LoggerOption) *Logger {\n\tlogger := &Logger{\n\t\tinfoLogger: level.Info(kitlogger),\n\t\terrorLogger: level.Error(kitlogger),\n\n\t\tmessageKey: \"msg\",\n\t}\n\n\tfor _, option := range options {\n\t\toption(logger)\n\t}\n\n\treturn logger\n}", "func New(l *logging.Logger) transport.Transport {\n\treturn &DummyLogs{\n\t\tlogger: l,\n\t}\n}", "func NewLogger() *Clogger {\n\tl := &Clogger{\n\t\tin: make(chan string, 256),\n\t\tw: os.Stderr,\n\t}\n\tgo l.run()\n\treturn l\n}", "func New(t testing.TB) *slog.Logger {\n\ttw := &testWriter{t: t}\n\tslh := slog.NewTextHandler(tw)\n\treturn slog.New(slh)\n}", "func New(md config.Mode) (*logger, error) {\n\tvar lgr logger\n\tvar zapLg *zap.Logger\n\tvar err error\n\n\tif md.IsProduction() {\n\t\tzapLg, err = zap.NewProduction()\n\t} else {\n\t\tzapLg, err = zap.NewDevelopment()\n\t}\n\tif err != nil {\n\t\treturn &lgr, err\n\t}\n\n\tif md.IsProduction() {\n\t\tif err := zapLg.Sync(); err != nil {\n\t\t\treturn &lgr, err\n\t\t}\n\t}\n\n\tlgr.client = zapLg\n\n\treturn &lgr, nil\n}", "func newLogger(dbg bool) *lgr.Logger {\n\tif dbg {\n\t\treturn lgr.New(lgr.Msec, lgr.Debug, lgr.CallerFile, lgr.CallerFunc, lgr.LevelBraces)\n\t}\n\n\treturn lgr.New(lgr.Msec, lgr.LevelBraces)\n}", "func newLogger(buffer int64) *Logger {\n\tl := &Logger{\n\t\tmsg: make(chan *logMsg, buffer),\n\t\toutputs: make(map[string]LoggerInterface),\n\t\tquit: make(chan bool),\n\t}\n\tgo l.StartLogger()\n\treturn l\n}", "func NewLogger(p Priority, logFlag int) (*log.Logger, error) {}", "func New() *logrus.Entry {\n\tbaseLogger := logrus.WithField(\"request_id\", uuid.New().String())\n\tbaseLogger.Logger.SetFormatter(&logrus.JSONFormatter{})\n\treturn baseLogger\n}", "func NewLogger(name string) Logger {\n\tlogFormat := DefaultLogFormat\n\tenvLogFormat := strings.ToUpper(os.Getenv(EnvKeyLogFormat))\n\tif envLogFormat == \"JSON\" {\n\t\tlogFormat = FormatJson\n\t}\n\tzl, lvl, _ := newZapLogger(logFormat, DefaultLogLevel)\n\tif name == \"\" {\n\t\tname = \"flogo.custom\"\n\t}\n\treturn &zapLoggerImpl{loggerLevel: lvl, mainLogger: zl.Named(name).Sugar()}\n}", "func NewLogger(w io.Writer) Logger {\n\treturn &logger{\n\t\tlog0: log.New(w, \"INFO: \", log.Lshortfile),\n\t}\n}", "func New(l *logrus.Entry) grpclog.Logger {\n\tif l == nil {\n\t\tl = logrus.WithFields(logrus.Fields{\"source\": \"grpc\"})\n\t}\n\treturn &log{l: l}\n}", "func New(c *Config) (l *Logger, err error) {\n\t// set FlushAfterSeconds\n\tif c.FlushAfterSeconds > LoggerFlushAfterSecondsMax {\n\t\tlog.Printf(\"Limiting FlushAfterSeconds to %d\", LoggerFlushAfterSecondsMax)\n\t\tc.FlushAfterSeconds = LoggerFlushAfterSecondsMax\n\t}\n\n\tconsumers := make([]chan DataUnit, 0)\n\tl = &Logger{\n\t\tconsumers: consumers,\n\t\tconfig: *c,\n\n\t\tstop: make(chan struct{}, 0),\n\t}\n\terr = l.initLoggerBuffer()\n\treturn\n}", "func New(opts ...OptFunc) *Logger {\n\tl := &Logger{lg: log.New(os.Stderr, \"\", LstdFlags&maskStdLogFlags), flag: LstdFlags, min: TRACE}\n\tfor _, opt := range opts {\n\t\topt(l)\n\t}\n\treturn l\n}", "func New(fileName string) JLogger {\n\n\tvar l = &jLog{}\n\tl.Logger = newLoggerFile(fileName, true)\n\treturn l\n}", "func New() *Logger {\n\tcfg := zap.NewProductionConfig()\n\tcfg.Level = zap.NewAtomicLevelAt(zap.DebugLevel)\n\n\tzapLogger, err := cfg.Build()\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\treturn &Logger{Zap: zapLogger}\n}", "func New(name string) *Logger {\n\t// Ensure there's an entry, but if not yet true then no env-var has yet enabled\n\t// it so default to disabled.\n\tif _, ok := Enabled[name]; !ok {\n\t\tEnabled[name] = false\n\t}\n\n\treturn &Logger{\n\t\tName: name,\n\t\tColor: nextColor(),\n\t}\n}", "func New(outter EntryWriter) logr.Logger {\n\treturn &rlog{\n\t\tlevel: 0,\n\t\tname: \"\",\n\t\tfields: make([]interface{}, 0),\n\t\toutter: outter,\n\t}\n}", "func NewLogger(cfg Config) CustomLogger {\n\tvar output io.Writer\n\toutput = cfg.Output\n\n\tif output == nil {\n\t\toutput = os.Stderr\n\t}\n\n\tif cfg.EnableConsoleLogger {\n\t\toutput = zerolog.ConsoleWriter{\n\t\t\tOut: output,\n\t\t\tTimeFormat: time.RFC3339Nano,\n\t\t\tFormatTimestamp: timeFormat,\n\t\t\tNoColor: cfg.testMode,\n\t\t}\n\t}\n\n\tloggerContext := zerolog.New(output).With()\n\tif !cfg.testMode {\n\t\tloggerContext = loggerContext.Timestamp()\n\t\thost, err := os.Hostname()\n\n\t\tif err == nil {\n\t\t\tloggerContext = loggerContext.Str(\"host\", host)\n\t\t}\n\t}\n\n\tif cfg.WithCaller {\n\t\tloggerContext = loggerContext.Caller()\n\t}\n\n\tzerolog.TimeFieldFormat = time.RFC3339Nano\n\tzerolog.ErrorStackMarshaler = pkgerrors.MarshalStack\n\tzerolog.ErrorFieldName = \"error.message\"\n\n\tif cfg.ErrorFieldName != \"\" {\n\t\tzerolog.ErrorFieldName = cfg.ErrorFieldName\n\t}\n\n\treturn CustomLogger{loggerContext.Logger().Level(cfg.LogLevel.toZeroLog())}\n}" ]
[ "0.7956083", "0.76403785", "0.75820285", "0.7515588", "0.7464783", "0.7403431", "0.74015224", "0.73690116", "0.73249155", "0.7323101", "0.73066914", "0.7304487", "0.73012733", "0.72983545", "0.72934717", "0.72890353", "0.728749", "0.72861475", "0.72795135", "0.72777456", "0.72777456", "0.72463566", "0.72239816", "0.72060686", "0.71844274", "0.718017", "0.7171146", "0.7166517", "0.716335", "0.7146909", "0.7143557", "0.71298444", "0.7121637", "0.71154106", "0.7075905", "0.7075695", "0.70642596", "0.70638704", "0.7061442", "0.70484996", "0.7047769", "0.7036085", "0.70322126", "0.70218414", "0.70217294", "0.7012173", "0.7010207", "0.699443", "0.6993525", "0.6992568", "0.69897187", "0.6982379", "0.6982379", "0.6980563", "0.69746834", "0.6973782", "0.6972331", "0.6968666", "0.69669145", "0.69562495", "0.69520205", "0.6946653", "0.6946415", "0.6938434", "0.6934108", "0.69302523", "0.6914918", "0.6912226", "0.6911255", "0.690783", "0.69050443", "0.6904464", "0.6903554", "0.69021004", "0.68995446", "0.68950325", "0.6890487", "0.688612", "0.6883445", "0.6875543", "0.68741935", "0.6868382", "0.68573195", "0.6856086", "0.68338627", "0.68312526", "0.6816967", "0.68145066", "0.6810991", "0.6810076", "0.6804534", "0.6788226", "0.6784907", "0.67787045", "0.677589", "0.677589", "0.67633957", "0.67620504", "0.6759194", "0.6754983" ]
0.7361198
8
ServeHTTP populates the status page template with data and serves it when there is a request.
func (s *StatusPageHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { if s.Authenticator != nil { _, err := s.Authenticator.Authenticate(r.Context(), w, r) if errors.Is(err, oidc.ErrRedirectRequired) { return } if err != nil { http.Error(w, "Error: Authentication failed", http.StatusInternalServerError) log.Logger("webserver").Error("Authentication failed", "error", err, "time", s.Clock.Now().String()) return } } log.Logger("webserver").Info("Applier status request", "time", s.Clock.Now().String()) if s.Template == nil { http.Error(w, "Error: Unable to load HTML template", http.StatusInternalServerError) log.Logger("webserver").Error("Request failed", "error", "No template found", "time", s.Clock.Now().String()) return } ctx, cancel := context.WithTimeout(context.Background(), s.Timeout) defer cancel() waybills, err := s.KubeClient.ListWaybills(ctx) if err != nil { http.Error(w, fmt.Sprintf("Error: Unable to list Waybill resources: %v", err), http.StatusInternalServerError) log.Logger("webserver").Error("Unable to list Waybill resources", "error", err, "time", s.Clock.Now().String()) return } events, err := s.KubeClient.ListWaybillEvents(ctx) if err != nil { http.Error(w, fmt.Sprintf("Error: Unable to list Waybill events: %v", err), http.StatusInternalServerError) log.Logger("webserver").Error("Unable to list Waybill events", "error", err, "time", s.Clock.Now().String()) return } result := GetNamespaces(waybills, events, s.DiffURLFormat) rendered := &bytes.Buffer{} if err := s.Template.ExecuteTemplate(rendered, "index", result); err != nil { http.Error(w, "Error: Unable to render HTML template", http.StatusInternalServerError) log.Logger("webserver").Error("Request failed", "error", http.StatusInternalServerError, "time", s.Clock.Now().String(), "err", err) return } w.WriteHeader(http.StatusOK) if _, err := rendered.WriteTo(w); err != nil { log.Logger("webserver").Error("Request failed", "error", http.StatusInternalServerError, "time", s.Clock.Now().String(), "err", err) } log.Logger("webserver").Info("Request completed successfully", "time", s.Clock.Now().String()) }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (s *StatusPageHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {\n\tlog.Logger.Info(\"Applier status request\", \"time\", s.Clock.Now().String())\n\tif s.Template == nil {\n\t\thttp.Error(w, \"Error: Unable to load HTML template\", http.StatusInternalServerError)\n\t\tlog.Logger.Error(\"Request failed\", \"error\", \"No template found\", \"time\", s.Clock.Now().String())\n\t\treturn\n\t}\n\tif err := s.Template.Execute(w, s.Data); err != nil {\n\t\thttp.Error(w, \"Error: Unable to load HTML template\", http.StatusInternalServerError)\n\t\tlog.Logger.Error(\"Request failed\", \"error\", http.StatusInternalServerError, \"time\", s.Clock.Now().String())\n\t\treturn\n\t}\n\tlog.Logger.Info(\"Request completed successfully\", \"time\", s.Clock.Now().String())\n}", "func (c *Client) ServeHTTP(rw http.ResponseWriter, req *http.Request) {\n\tinfo := c.Status()\n\tstatusTemplate.Execute(rw, info)\n}", "func (s *StatusWebService) ServeHTTP(\n\trw http.ResponseWriter, req *http.Request) {\n\tvar span *trace.Span\n\tvar mainData = mainStatusData{\n\t\tBootstrapCSSPath: s.bootstrapCSSPath,\n\t\tBootstrapCSSHash: s.bootstrapCSSHash,\n\t\tInstance: s.registry.Instance(),\n\t}\n\tvar ranges []*common.KeyRange\n\tvar table string\n\tvar err error\n\n\t_, span = trace.StartSpan(\n\t\tcontext.Background(), \"red-cloud.StatusWebService/ServeHTTP\")\n\tdefer span.End()\n\n\tfor table, ranges = range s.registry.GetRanges() {\n\t\tvar rn *common.KeyRange\n\n\t\tfor _, rn = range ranges {\n\t\t\tmainData.Tablets = append(mainData.Tablets, &tableKeyRange{\n\t\t\t\tTable: table,\n\t\t\t\tKeyRange: rn,\n\t\t\t})\n\t\t}\n\t}\n\n\tif err = mainStatusTemplate.Execute(rw, mainData); err != nil {\n\t\tspan.AddAttributes(trace.StringAttribute(\"error\", err.Error()))\n\t\tspan.Annotate(nil, \"Error executing web status page template\")\n\t\tlog.Print(\"Error sending status page: \", err)\n\t}\n}", "func (con *Controller) ServeHTTP(res http.ResponseWriter, req *http.Request) {\n\tstartTime := time.Now()\n\n\tswitch con.Status {\n\tcase 0: // This will actually never show because this function won't run if the server is off\n\t\thttp.Error(res, \"The server is currently down and not serving requests.\", http.StatusServiceUnavailable)\n\t\treturn\n\tcase 1: // Normal\n\t\tbreak\n\tcase 2: // Maintenance mode\n\t\thttp.Error(res, \"The server is currently maintenance mode and not serving requests.\", http.StatusServiceUnavailable)\n\t\treturn\n\tcase 3: // This will actually never show because this function won't run if the server is off\n\t\thttp.Error(res, \"The server is currently restarting.\", http.StatusServiceUnavailable)\n\t\treturn\n\t}\n\n\tpath := req.URL.Path[1:]\n\tif len(path) == 0 {\n\t\tpath = (con.DocumentRoot + \"index.html\")\n\t} else {\n\t\tpath = (con.DocumentRoot + path)\n\t}\n\n\tf, err := os.Open(path)\n\n\tif err != nil {\n\t\tcon.PublicLogger.Write(path + \"::\" + strconv.Itoa(http.StatusNotFound) + \"::\" + err.Error())\n\t\trouting.HttpThrowStatus(http.StatusNotFound, res)\n\t\treturn\n\t}\n\n\tcontentType, err := routing.GetContentType(path)\n\n\tif err != nil {\n\t\tcon.PublicLogger.Write(path + \"::\" + strconv.Itoa(http.StatusUnsupportedMediaType) + \"::\" + err.Error())\n\t\trouting.HttpThrowStatus(http.StatusUnsupportedMediaType, res)\n\t\treturn\n\t}\n\n\tres.Header().Add(\"Content-Type\", contentType)\n\t_, err = io.Copy(res, f)\n\n\tif err != nil {\n\t\tcon.PublicLogger.Write(path + \"::\" + strconv.Itoa(http.StatusInternalServerError) + \"::\" + err.Error())\n\t\trouting.HttpThrowStatus(http.StatusInternalServerError, res)\n\t\treturn\n\t}\n\n\telapsedTime := time.Since(startTime)\n\tcon.LoadTimeLogger.Write(path + \" rendered in \" + strconv.FormatFloat(elapsedTime.Seconds(), 'f', 6, 64) + \" seconds\")\n}", "func statusHandler(w http.ResponseWriter, _ *http.Request) {\n\tbToMb := func(b uint64) string {\n\t\treturn fmt.Sprintf(\"%v MiB\", b/1024/1024)\n\t}\n\tvar m runtime.MemStats\n\truntime.ReadMemStats(&m)\n\n\trowData := [][2]string{\n\t\t{\"Total Files:\", fmt.Sprintf(\"%v\", uint64(fileCount(config.Global.Root)))},\n\t\t{\"Media Folder Size:\", bToMb(uint64(folderSize(config.Global.Root)))},\n\t\t{\"Thumbnail Folder Size:\", bToMb(uint64(folderSize(config.Global.Cache)))},\n\t\t{\"Folders Watched:\", fmt.Sprint(gallery.WatchedFolders)},\n\t\t{\"Public Url:\", config.Global.PublicUrl},\n\t\t{\"Prefix:\", config.Global.Prefix},\n\t\t{\"-\", \"\"},\n\t\t{\"Alloc Memory:\", bToMb(m.Alloc)},\n\t\t{\"Sys Memory:\", bToMb(m.Sys)},\n\t\t{\"Goroutines:\", fmt.Sprint(runtime.NumGoroutine())},\n\t\t{\"-\", \"\"},\n\t\t{\"App Version:\", BuildVersion},\n\t\t{\"App Build Date:\", BuildTimestamp},\n\t\t{\"Service Uptime:\", time.Since(startTime).String()},\n\t}\n\n\tpage := templates.TwoColTable{\n\t\tPage: templates.Page{\n\t\t\tTitle: \"System Status\",\n\t\t\tPrefix: urlPrefix,\n\t\t\tAppVersion: BuildVersion,\n\t\t\tAppBuildTime: BuildTimestamp,\n\t\t},\n\t\tRows: rowData,\n\t}\n\t_ = templates.Html.ExecuteTemplate(w, \"table\", &page)\n}", "func serveHealthStatus(w http.ResponseWriter, req *http.Request) {\n\tfmt.Fprintf(w, \"OK\")\n}", "func (service StatusHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {\n\n\t// Get account information\n\taccount, _ := AccountAndSessionFromRequestContext(r)\n\n\tapplication, fetchErr := service.Store.LoadApplication(account.ID)\n\tif fetchErr != nil {\n\t\tif fetchErr == api.ErrApplicationDoesNotExist {\n\t\t\tservice.Log.Info(\"Status requested for application that has not been created\", api.LogFields{})\n\t\t} else {\n\t\t\tservice.Log.WarnError(api.StatusError, fetchErr, api.LogFields{})\n\t\t\tRespondWithStructuredError(w, api.StatusError, http.StatusInternalServerError)\n\t\t\treturn\n\t\t}\n\t}\n\n\thash, hashErr := application.Hash()\n\tif hashErr != nil {\n\t\tservice.Log.WarnError(api.HashingFailure, hashErr, api.LogFields{})\n\t\tRespondWithStructuredError(w, api.HashingFailure, http.StatusInternalServerError)\n\t\treturn\n\t}\n\n\tstatus := formStatusInfo{\n\t\tStatus: account.Status,\n\t\tHash: hash,\n\t}\n\n\tstatusBytes, jsonErr := json.Marshal(status)\n\tif jsonErr != nil {\n\t\tservice.Log.WarnError(api.StatusError, jsonErr, api.LogFields{})\n\t\tRespondWithStructuredError(w, api.StatusError, http.StatusInternalServerError)\n\t\treturn\n\t}\n\n\t// Get the CSRF token and add it as a header\n\tAddCSRFTokenHeader(w, r)\n\n\tw.Header().Set(\"Content-Type\", \"application/json\")\n\tfmt.Fprint(w, string(statusBytes))\n}", "func (t *templateHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {\n t.once.Do(func() { // not matter how many gorous call this, this func() will only be called once for the current instance of \"templateHandler\"\n t.templ = template.Must(template.ParseFiles(filepath.Join(\"templates\",\n t.filename)))\n })\n t.templ.Execute(w, nil)\n }", "func (h *TemplateFileHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {\n\ttemplateFileName := r.URL.Path\n\tif templateFileName == \"\" || strings.HasSuffix(templateFileName, \"/\") {\n\t\ttemplateFileName += \"index.html\"\n\t}\n\tlog.Println(\"fetch \" + templateFileName)\n\trealPath := path.Join(h.RootDir, templateFileName)\n\t// caching\n\ttempl, err := template.ParseFiles(realPath)\n\t// should have the option of specify an error page\n\tif err != nil {\n\t\tlog.Printf(\"error when parse template %#v \\n\", err)\n\t\thttp.Error(w, \"some thing wrong\", http.StatusInternalServerError)\n\t\treturn\n\t}\n\tdata := h.DataFunc(r)\n\tcontentType := mime.TypeByExtension(filepath.Ext(templateFileName))\n\t// should use some constant or sniff the type\n\tw.Header().Set(\"Content-Type\", contentType)\n\ttempl.Execute(w, &data)\n}", "func (exp *attackcost) StatusPage(w http.ResponseWriter, r *http.Request, code, message, additionalInfo string, sType web.ExpStatus) {\n\tcommonPageData := exp.commonData(r)\n\tif commonPageData == nil {\n\t\t// exp.blockData.GetTip likely failed due to empty DB.\n\t\thttp.Error(w, \"The database is initializing. Try again later.\",\n\t\t\thttp.StatusServiceUnavailable)\n\t\treturn\n\t}\n\tstr, err := exp.templates.Exec(\"status\", struct {\n\t\t*web.CommonPageData\n\t\tStatusType web.ExpStatus\n\t\tCode string\n\t\tMessage string\n\t\tAdditionalInfo string\n\t}{\n\t\tCommonPageData: commonPageData,\n\t\tStatusType: sType,\n\t\tCode: code,\n\t\tMessage: message,\n\t\tAdditionalInfo: additionalInfo,\n\t})\n\tif err != nil {\n\t\tlog.Errorf(\"Template execute failure: %v\", err)\n\t\tstr = \"Something went very wrong if you can see this, try refreshing\"\n\t}\n\n\tw.Header().Set(\"Content-Type\", \"text/html\")\n\tswitch sType {\n\tcase web.ExpStatusDBTimeout:\n\t\tw.WriteHeader(http.StatusServiceUnavailable)\n\tcase web.ExpStatusNotFound:\n\t\tw.WriteHeader(http.StatusNotFound)\n\tcase web.ExpStatusFutureBlock:\n\t\tw.WriteHeader(http.StatusOK)\n\tcase web.ExpStatusError:\n\t\tw.WriteHeader(http.StatusInternalServerError)\n\t// When blockchain sync is running, status 202 is used to imply that the\n\t// other requests apart from serving the status sync page have been received\n\t// and accepted but cannot be processed now till the sync is complete.\n\tcase web.ExpStatusSyncing:\n\t\tw.WriteHeader(http.StatusAccepted)\n\tcase web.ExpStatusNotSupported:\n\t\tw.WriteHeader(http.StatusUnprocessableEntity)\n\tcase web.ExpStatusBadRequest:\n\t\tw.WriteHeader(http.StatusBadRequest)\n\tdefault:\n\t\tw.WriteHeader(http.StatusServiceUnavailable)\n\t}\n\tio.WriteString(w, str)\n}", "func (s *statusHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {\n\tw.WriteHeader(int(*s))\n}", "func (handler Handler) ServeHTTP(w http.ResponseWriter, r *http.Request) {\n\tfmt.Printf(\"%s %s %s\\n\", r.RemoteAddr, r.Method, r.URL.Path)\n\tt1 := time.Now()\n\thandler.store.ExpireSessions()\n\treq := newReqImpl(w, r, handler.store)\n\thandler.serve(req)\n\tif req.html != \"\" {\n\t\tio.WriteString(w, req.html)\n\t} else if req.template != \"\" {\n\t\tt := template.New(\"\").Funcs(handler.funcmap)\n\t\tt, err := t.ParseGlob(handler.templatePattern)\n\t\tif err != nil {\n\t\t\tpanic(err)\n\t\t}\n\t\terr = t.ExecuteTemplate(w, req.template, req.model)\n\t\tif err != nil {\n\t\t\tpanic(err)\n\t\t}\n\t} else if req.redirect != \"\" {\n\t\thttp.Redirect(w, r, req.redirect, http.StatusFound)\n\t} else if req.status != 0 {\n\t\tmsg := http.StatusText(req.status)\n\t\thttp.Error(w, msg, req.status)\n\t} else {\n\t\tio.WriteString(w, \"no result\")\n\t}\n\td := time.Since(t1)\n\tfmt.Printf(\"%s %s %s - %f s\\n\", r.RemoteAddr, r.Method, r.URL.Path, float64(d)/1e9)\n}", "func handlerMain(w http.ResponseWriter, r *http.Request) {\n\tw.Header().Set(\"Server\", \"Go WebServer\")\n\tw.Header().Set(\"Content-Type\", \"text/html\")\n\tserver := r.URL.Query().Get(\"server\")\n\treadCookies(r)\n\t_, _ = io.WriteString(w, tplMain(getServerStatus(), server))\n}", "func (i indexHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {\n\tlog.Printf(\"You are all my minions, %v, beware %v, %v!\\n\", r.RemoteAddr, r.Method, r.URL)\n\tif r.URL.Path != \"/\" {\n\t\tlog.Printf(\"Sirree, this is a wrong URL path: %v!\\n\", r.URL.Path)\n\t\tw.WriteHeader(http.StatusNotFound)\n\t\tfmt.Fprintf(w, i.pageNotFound)\n\t\treturn\n\t}\n\tif r.Method != \"GET\" {\n\t\tlog.Printf(\"Madam, the method thou art using is wrong: %v!\\n\", r.Method)\n\t\tw.WriteHeader(http.StatusBadRequest)\n\t\tfmt.Fprintf(w, i.pageBadRequest)\n\t\treturn\n\t}\n\tdata := pageData{\n\t\tTitle: \"Welcome\",\n\t\tVersion: fmt.Sprintf(\"This is version %v\", i.version),\n\t}\n\tif err := i.tmpl.Execute(w, data); err != nil {\n\t\tpanic(err)\n\t}\n}", "func (t *staticTemplateHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {\n\tif err := t.templ.Execute(w, t.data); err != nil {\n\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t}\n}", "func (h *App) ServeHTTP(res http.ResponseWriter, req *http.Request) {\n\tvar head string\n\thead, req.URL.Path = ShiftPath(req.URL.Path)\n\tswitch head {\n\tcase \"hash\":\n\t\tbefore := makeTimestamp()\n\t\th.hashHandler.ServeHTTP(res, req)\n\t\tafter := makeTimestamp()\n\t\tif req.Method == \"POST\" && req.URL.Path == \"/\" {\n\t\t\tpostStats = append(postStats, (after-before))\n\t\t\tlog.Print(postStats)\n\t\t\tlog.Printf(\"before: %d\", before)\n\t\t\tlog.Printf(\"after: %d\", after)\n\t\t}\n\tcase \"stats\":\n\t\th.statsHandler.ServeHTTP(res, req)\n\tcase \"shutdown\":\n\t\th.shutdownHandler.ServeHTTP(res, req)\n\tdefault:\n\t\thttp.Error(res, \"Not Found\", http.StatusNotFound)\n\t}\n}", "func (t *templateHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {\n\tt.once.Do(func() {\n\t\tt.templ = template.Must(template.ParseFiles(filepath.Join(\"templates\", t.filename)))\n\t})\n\tt.templ.Execute(w, r)\n}", "func (h healthHandler) ServeHTTP(writer http.ResponseWriter, _ *http.Request) {\n\tvar status int\n\tvar body []byte\n\tif h.status == Up {\n\t\tstatus = http.StatusOK\n\t} else {\n\t\tstatus = http.StatusInternalServerError\n\t}\n\tif h.useJSON {\n\t\twriter.Header().Set(\"Content-Type\", \"application/json\")\n\t\tbody = []byte(`{\"status\":\"` + h.status + `\"}`)\n\t} else {\n\t\tbody = []byte(h.status)\n\t}\n\twriter.WriteHeader(status)\n\t_, _ = writer.Write(body)\n}", "func (t *TemplateHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {\n\tvariablesMap := map[string]interface{}{}\n\tconfigName := filepath.Clean(r.URL.Path)\n\n\tif configName == \"\" {\n\t\thttp.Error(w, \"No template name provided\", http.StatusNotFound)\n\t\treturn\n\t}\n\n\tfor key, val := range r.URL.Query() {\n\t\tvariablesMap[key] = val[0]\n\t}\n\n\tenv := envFromRequest(r)\n\tenvName := envNameFromRequest(r)\n\tvariablesMap[\"baseURL\"] = utils.BaseURLforEnvName(env.BaseURL, envName)\n\n\tconfigString, err := env.Templates.RenderTemplate(env.Logger, configName, variablesMap, envName)\n\tif err != nil {\n\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t} else {\n\t\tio.WriteString(w, configString)\n\t}\n}", "func (t *templateHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {\n\tt.once.Do(func(){\n\t\tt.templ = template.Must(\n\t\t\ttemplate.ParseFiles(\n\t\t\t\tfilepath.Join(\"templates\", t.filename)))\n\t})\n\tdata := map[string]interface{}{\n\t\t\"Host\": r.Host,\n\t}\n\tif authCookie, err := r.Cookie(\"auth\"); err == nil {\n\t\tdata[\"UserData\"] = objx.MustFromBase64(authCookie.Value)\n\t}\n\tt.templ.Execute(w, data)\n}", "func (t *templateHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {\n\tt.once.Do(func() {\n\t\tt.templ = template.Must(template.ParseFiles(filepath.Join(\"templates\", t.filename)))\n\t})\n\tdata := map[string]interface{}{\n\t\t\"Host\": r.Host,\n\t}\n\t//Decode value in the \"auth\" cookie sent by the client\n\tif authCookie, err := r.Cookie(\"auth\"); err == nil {\n\t\tdata[\"UserData\"] = objx.MustFromBase64(authCookie.Value)\n\t}\n\tt.templ.Execute(w, data)\n}", "func serveHTTP(c *RequestContext, w http.ResponseWriter, r *http.Request) (int, error) {\n\t// Checks if the URL contains the baseURL and strips it. Otherwise, it just\n\t// returns a 404 error because we're not supposed to be here!\n\tp := strings.TrimPrefix(r.URL.Path, c.FM.BaseURL)\n\n\tif len(p) >= len(r.URL.Path) && c.FM.BaseURL != \"\" {\n\t\treturn http.StatusNotFound, nil\n\t}\n\n\tr.URL.Path = p\n\n\t// Check if this request is made to the service worker. If so,\n\t// pass it through a template to add the needed variables.\n\tif r.URL.Path == \"/sw.js\" {\n\t\treturn renderFile(\n\t\t\tw,\n\t\t\tc.FM.assets.MustString(\"sw.js\"),\n\t\t\t\"application/javascript\",\n\t\t\tc,\n\t\t)\n\t}\n\n\t// Checks if this request is made to the static assets folder. If so, and\n\t// if it is a GET request, returns with the asset. Otherwise, returns\n\t// a status not implemented.\n\tif matchURL(r.URL.Path, \"/static\") {\n\t\tif r.Method != http.MethodGet {\n\t\t\treturn http.StatusNotImplemented, nil\n\t\t}\n\n\t\treturn staticHandler(c, w, r)\n\t}\n\n\t// Checks if this request is made to the API and directs to the\n\t// API handler if so.\n\tif matchURL(r.URL.Path, \"/api\") {\n\t\tr.URL.Path = strings.TrimPrefix(r.URL.Path, \"/api\")\n\t\treturn apiHandler(c, w, r)\n\t}\n\n\t// Any other request should show the index.html file.\n\tw.Header().Set(\"x-frame-options\", \"SAMEORIGIN\")\n\tw.Header().Set(\"x-content-type\", \"nosniff\")\n\tw.Header().Set(\"x-xss-protection\", \"1; mode=block\")\n\n\treturn renderFile(\n\t\tw,\n\t\tc.FM.assets.MustString(\"index.html\"),\n\t\t\"text/html\",\n\t\tc,\n\t)\n}", "func (mw *Stats) ServeHTTP(w http.ResponseWriter, r *http.Request, next http.HandlerFunc) {\n\tbeginning, recorder := mw.Begin(w)\n\n\tnext(recorder, r)\n\n\tmw.End(beginning, WithRecorder(recorder))\n}", "func (t *Timer) ServeHTTP(w http.ResponseWriter, r *http.Request) {\n\tdefer t.UpdateSince(time.Now())\n\tt.handler.ServeHTTP(w, r)\n}", "func (s Sample) ServeHTTP(w http.ResponseWriter, r *http.Request) {\n fmt.Fprint(w, \"<h1>Welcome to Go.Land Server!</h1>\")\n}", "func (c *Context) Handle(status int, title string, err error) {\n\tswitch status {\n\tcase http.StatusNotFound:\n\t\tc.Data[\"Title\"] = \"Page Not Found\"\n\tcase http.StatusInternalServerError:\n\t\tc.Data[\"Title\"] = \"Internal Server Error\"\n\t\tlog.Fatal(2, \"%s: %v\", title, err)\n\t}\n\tc.HTML(status, fmt.Sprintf(\"status/%d\", status))\n}", "func (h Home) ServeHTTP(w http.ResponseWriter, r *http.Request) {\n\tw.Header().Set(\"Content-Type\", \"text/html\")\n\tindex := defaultIndex()\n\tfmt.Fprint(w, index)\n}", "func (h *Healthcheck) ServeHTTP(w http.ResponseWriter, _ *http.Request) {\n\tvar resp response\n\tvar allAlive = true\n\n\tfor name, status := range h.statusProvider() {\n\t\talive, details := status.Status()\n\t\tif !alive {\n\t\t\tallAlive = false\n\t\t}\n\n\t\tresp.Services = append(resp.Services, service{\n\t\t\tName: name,\n\t\t\tAlive: alive,\n\t\t\tDetails: details,\n\t\t})\n\t}\n\n\tvar status = http.StatusOK\n\tif !allAlive {\n\t\tstatus = http.StatusInternalServerError\n\t}\n\n\trespBody, err := json.Marshal(resp)\n\thandleErr(err, w)\n\n\tw.Header().Add(\"Content-Type\", \"application/json; charset=utf-8\")\n\tw.WriteHeader(status)\n\t_, err = w.Write(respBody)\n\thandleErr(err, w)\n}", "func (h *Handler) ServeHTTP(w http.ResponseWriter, r *http.Request) {\n\tif r.URL.Path != \"/\" {\n\t\thttp.NotFound(w, r)\n\t\treturn\n\t}\n\tbv := &baseValues{\n\t\tBucketName: h.BucketName,\n\t\tCycles: []*db.Cycle{},\n\t}\n\tcycles, err := h.Cycles.List(r.Context())\n\tif err != nil {\n\t\tlog.Printf(\"could not list cycles: %v\", err)\n\t\tbv.DisplayError = \"The enhanced FAA CIFP data U/S. We apologize for the inconvenience.\"\n\t} else {\n\t\tbv.Cycles = cycles\n\t}\n\n\tw.Header().Set(\"content-type\", \"text/html\")\n\tif err := templates.Base.Execute(w, bv); err != nil {\n\t\tlog.Printf(\"could not execute template: %v\", err)\n\t}\n}", "func (t *AppTemplate) ServeHTTP(w http.ResponseWriter, r *http.Request) {\n\tctx := r.Context()\n\ttemplateData := ctx.Value(templateDataKey).(TemplateData)\n\ttemplateData.CSRFToken = csrf.Token(r)\n\tvar cspNonce string\n\tif v, ok := ctx.Value(nonceKey).(string); ok {\n\t\tcspNonce = v\n\t}\n\tcssFiles := make([]string, len(templateData.CSSFiles))\n\tfor i, cssFile := range templateData.CSSFiles {\n\t\tif hashedFile, ok := hashedFiles[cssFile]; ok {\n\t\t\tcssFiles[i] = hashedFile\n\t\t} else {\n\t\t\tcssFiles[i] = cssFile\n\t\t}\n\t}\n\ttemplateData.CSSFiles = cssFiles\n\tjsFiles := make([]string, len(templateData.JSFiles))\n\tfor i, jsFile := range templateData.JSFiles {\n\t\tif hashedFile, ok := hashedFiles[jsFile]; ok {\n\t\t\tjsFiles[i] = hashedFile\n\t\t} else {\n\t\t\tjsFiles[i] = jsFile\n\t\t}\n\t}\n\ttemplateData.JSFiles = jsFiles\n\tpageData := ctx.Value(pageDataKey)\n\tif err := webhandlers.RetrieveError(r); err != nil {\n\t\tpageData = map[string]any{\n\t\t\t\"error\": err,\n\t\t}\n\t}\n\tif err := t.template.Execute(w, Data{\n\t\tTemplateData: templateData,\n\t\tAppConfig: ctx.Value(appConfigKey),\n\t\tExperimentalFeatures: experimental.AllFeatures(ctx),\n\t\tPageData: pageData,\n\t\tCSPNonce: cspNonce,\n\t}); err != nil {\n\t\tlog.FromContext(ctx).WithError(err).Warn(\"Failed to execute template\")\n\t}\n}", "func mainHandler(w http.ResponseWriter, req *http.Request) {\n\tif req.Method != \"GET\" {\n\t\tw.Header().Set(\"Allow\", \"GET\")\n\t\thttp.Error(w, \"method should be GET\", http.StatusMethodNotAllowed)\n\t\treturn\n\t}\n\n\tw.Header().Set(\"Content-Type\", \"text/html; charset=utf-8\")\n\tw.Header().Set(\"X-Content-Type-Options\", \"nosniff\")\n\n\tdata := struct {\n\t\tProduction bool\n\t\tHTTPAddr string\n\t}{\n\t\tProduction: production,\n\t\tHTTPAddr: *httpFlag,\n\t}\n\terr := t.ExecuteTemplate(w, \"head.html.tmpl\", data)\n\tif err != nil {\n\t\tlog.Println(\"ExecuteTemplate head.html.tmpl:\", err)\n\t\treturn\n\t}\n\n\tflusher := w.(http.Flusher)\n\tflusher.Flush()\n\n\tvar updatesAvailable = 0\n\tvar wroteInstalledUpdatesHeader bool\n\n\tfor repoPresentation := range c.pipeline.RepoPresentations() {\n\t\tif !repoPresentation.Updated {\n\t\t\tupdatesAvailable++\n\t\t}\n\n\t\tif repoPresentation.Updated && !wroteInstalledUpdatesHeader {\n\t\t\t// Make 'Installed Updates' header visible now.\n\t\t\tio.WriteString(w, `<div id=\"installed_updates\"><h3 style=\"text-align: center;\">Installed Updates</h3></div>`)\n\n\t\t\twroteInstalledUpdatesHeader = true\n\t\t}\n\n\t\terr := t.ExecuteTemplate(w, \"repo.html.tmpl\", repoPresentation)\n\t\tif err != nil {\n\t\t\tlog.Println(\"ExecuteTemplate repo.html.tmpl:\", err)\n\t\t\treturn\n\t\t}\n\n\t\tflusher.Flush()\n\t}\n\n\tif !wroteInstalledUpdatesHeader {\n\t\t// TODO: Make installed_updates available before all packages finish loading, so that it works when you update a package early. This will likely require a fully dynamically rendered frontend.\n\t\t// Append 'Installed Updates' header, but keep it hidden.\n\t\tio.WriteString(w, `<div id=\"installed_updates\" style=\"display: none;\"><h3 style=\"text-align: center;\">Installed Updates</h3></div>`)\n\t}\n\n\tif updatesAvailable == 0 {\n\t\tio.WriteString(w, `<script>document.getElementById(\"no_updates\").style.display = \"\";</script>`)\n\t}\n\n\terr = t.ExecuteTemplate(w, \"tail.html.tmpl\", nil)\n\tif err != nil {\n\t\tlog.Println(\"ExecuteTemplate tail.html.tmpl:\", err)\n\t\treturn\n\t}\n}", "func (h TestServerHandler) ServeHTTP(writer http.ResponseWriter, request *http.Request) {\n\twriter.WriteHeader(h.StatusCode)\n\twriter.Header().Add(\"Content-Type\", \"text/plain\")\n\t_, _ = writer.Write([]byte(h.Content))\n}", "func (t Telemetry) ServeHTTP(w http.ResponseWriter, r *http.Request) {\n\tt.rCount.Mark(1)\n\tsw := MakeLogger(w)\n\n\tstart := time.Now()\n\tt.inner.ServeHTTP(sw, r)\n\tt.tmr.Update(int64(time.Since(start) / time.Millisecond))\n\n\tif sw.Status() >= 300 {\n\t\tt.fCount.Mark(1)\n\t} else {\n\t\tt.sCount.Mark(1)\n\t}\n\n}", "func (t *templateHandler) ServeHTTP (w http.ResponseWriter, r *http.Request) {\n\tt.once.Do(func() {\n\t\t// template/t.filename\n\t\t// template/chat.html\n\t\tt.templ = template.Must(template.ParseFiles(filepath.Join(\"templates\", t.filename)))\n\t})\n\tdata := map[string]interface{}{\n\t\t\"Host\": r.Host,\n\t}\n\tif authCookie, err := r.Cookie(\"auth\"); err == nil {\n\t\tdata[\"UserData\"] = objx.MustFromBase64(authCookie.Value)\n\t}\n\tt.templ.Execute(w, data)\n\t// w?\n\t// r *http.Request mengandung data port\n\t//t.templ.Execute(w, r)\n}", "func viewHandler(w http.ResponseWriter, r *http.Request) {\n\t\n\ttitle := r.URL.Path[len(\"/\"):]\n\tp, err := loadPage(title, r)\n\n\tz := strings.Split(title, \"/\")\n\tif z[0] == \"commits\" {\n\t\thttp.ServeFile(w, r, title)\n\t\treturn\n\t}\n\n\tif err != nil && !cookies.IsLoggedIn(r) {\n\t\thttp.Redirect(w, r, \"static/templates/base.html\", http.StatusFound)\n\t\treturn\n\t} else if err != nil { \n\t\thttp.Redirect(w, r, \"static/templates/_commits.html\", http.StatusFound)\n\t\treturn\n\t}\n\n\tt, _ := template.ParseFiles(\"static/templates/_status.html\")\n\tt.Execute(w, p)\n}", "func (s *SimpleHealthCheck) ServeHTTP(w http.ResponseWriter, r *http.Request) {\n\tif _, err := io.WriteString(w, \"ok-\"+Name); err != nil {\n\t\tLogWithFields(r).Warn(\"unable to write healthcheck response: \", err)\n\t}\n}", "func mainHandler(w http.ResponseWriter, r *http.Request) {\n\tif *local {\n\t\tloadTemplates()\n\t}\n\tif r.Method == \"GET\" {\n\t\tw.Header().Set(\"Content-Type\", \"text/html\")\n\t\tif err := indexTemplate.Execute(w, struct{}{}); err != nil {\n\t\t\tsklog.Errorln(\"Failed to expand template:\", err)\n\t\t}\n\t}\n}", "func (h *Handler) servePing(w http.ResponseWriter, r *http.Request) {}", "func (rh *RandomHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {\n\t// Increment serveCounter every time we serve a page.\n\trh.serveCounter.Inc()\n\n\tn := rand.Float64()\n\t// Track the cumulative values served.\n\trh.valueCounter.Add(n)\n\n\tfmt.Fprintf(w, \"%v\", n)\n}", "func (h *StatsHandler) ServeHTTP(res http.ResponseWriter, req *http.Request) {\n\n\tres.Header().Set(\"Content-Type\", \"application/json\")\n\ttotal := postCount.Count()\n\tvar average float64\n\tvar sum int64\n\taverage = 0\n\tif len(postStats) != 0 {\n\t\tfor i := 0; i < len(postStats); i++ {\n\t\t\tsum += postStats[i]\n\t\t}\n\t\t//Response times were well under a millisecond, so this...\n\t\taverage = float64(sum / int64(total)) / 1000.0\n\t}\n\n\tfmt.Fprintf(res, `{\"total\": %d, \"average\": %f}`, total, average)\n}", "func (e *HealthCheck) ServeHTTP(rw http.ResponseWriter, r *http.Request) {\n\trw.Header().Add(\"Content-Type\", \"application/json\")\n\trw.WriteHeader(http.StatusOK)\n\tvar message = SimpleResponse{http.StatusOK, \"Healthy\"}\n\tjsonEncoder := json.NewEncoder(rw)\n\tjsonEncoder.Encode(message)\n}", "func HandleStatus(www http.ResponseWriter, req *http.Request) {\n\tif isTLS {\n\t\twww.Header().Add(\"Strict-Transport-Security\", \"max-age=63072000; includeSubDomains\")\n\t}\n\twww.Header().Set(\"Content-Type\", \"application/json; charset=utf-8\")\n\twww.WriteHeader(http.StatusOK) // 200\n\tfmt.Fprintf(www, `{\"status\":\"success\"}`)\n\treturn\n}", "func (r *Renderer) RenderHTMLStatus(w http.ResponseWriter, code int, tmpl string, data interface{}) {\n\t// Hello there reader! If you've made it here, you're likely wondering why\n\t// you're getting an error about response codes. For client-interop, it's very\n\t// important that we retain and maintain the allowed list of response codes.\n\t// Adding a new response code requires coordination with the client team so\n\t// they can update their applications to handle that new response code.\n\tif !r.AllowedResponseCode(code) {\n\t\tr.logger.Errorw(\"unregistered response code\", \"code\", code)\n\n\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\tmsg := fmt.Sprintf(\"%d is not a registered response code\", code)\n\t\tfmt.Fprintf(w, htmlErrTmpl, msg)\n\t\treturn\n\t}\n\n\tif r.debug {\n\t\tif err := r.loadTemplates(); err != nil {\n\t\t\tr.logger.Errorw(\"failed to reload templates in renderer\", \"error\", err)\n\n\t\t\tmsg := html.EscapeString(err.Error())\n\t\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\t\tfmt.Fprintf(w, htmlErrTmpl, msg)\n\t\t\treturn\n\t\t}\n\t}\n\n\t// Acquire a renderer\n\tb := r.rendererPool.Get().(*bytes.Buffer)\n\tb.Reset()\n\tdefer r.rendererPool.Put(b)\n\n\t// Render into the renderer\n\tif err := r.executeHTMLTemplate(b, tmpl, data); err != nil {\n\t\tr.logger.Errorw(\"failed to execute html template\", \"error\", err)\n\n\t\tmsg := \"An internal error occurred.\"\n\t\tif r.debug {\n\t\t\tmsg = err.Error()\n\t\t}\n\t\tmsg = html.EscapeString(msg)\n\n\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\tfmt.Fprintf(w, htmlErrTmpl, msg)\n\t\treturn\n\t}\n\n\t// Rendering worked, flush to the response\n\tw.Header().Set(\"Content-Type\", \"text/html; charset=UTF-8\")\n\tw.WriteHeader(code)\n\tif _, err := b.WriteTo(w); err != nil {\n\t\t// We couldn't write the buffer. We can't change the response header or\n\t\t// content type if we got this far, so the best option we have is to log the\n\t\t// error.\n\t\tr.logger.Errorw(\"failed to write html to response\", \"error\", err)\n\t}\n}", "func (h *HTML) ServeHTTP(w http.ResponseWriter, r *http.Request) {\n\tif r.Method != \"GET\" && r.Method != \"HEAD\" {\n\t\tmsg := fmt.Sprintf(\"405 Method Not Allowed (%s)\", r.Method)\n\t\thttp.Error(w, msg, http.StatusMethodNotAllowed)\n\t\treturn\n\t}\n\n\tw.Header().Set(\"Content-Type\", \"text/html; charset=utf-8\")\n\tw.WriteHeader(http.StatusOK)\n\ttpl.ExecuteTemplate(w, \"html.tpl\", h)\n}", "func StatusHandler(w http.ResponseWriter, r *http.Request) {\n\tstatus_requests_cnt.Inc()\n\n\tw.Write([]byte(\"API is up and running\"))\n}", "func StatusHandler(w http.ResponseWriter, r *http.Request) {\n\tfmt.Fprintf(w, \"Status: OK\")\n}", "func (a *Application) ServeHTTP(w http.ResponseWriter, r *http.Request) {\n\tpm := page.GetPageManager()\n\tif pm.IsPage(r.URL.Path) {\n\t\tctx := r.Context()\n\t\tpm.RunPage(ctx, w, r)\n\t}\n}", "func (p *Ping) ServeHTTP(w http.ResponseWriter, _ *http.Request) {\n\tw.WriteHeader(http.StatusOK)\n\t_, err := w.Write([]byte(\"OK\"))\n\thandleErr(err, w)\n}", "func GetMainPage(w http.ResponseWriter, r *http.Request) {\n\n\t//Http Header\n\tw.Header().Set(\"Content-Type\", \"text/html; charset=UTF-8\")\n\tw.WriteHeader(http.StatusOK)\n\n\t//For Debug only so the server musnt bne started all over again when .html file is rewirtten again!\n\t//tmpl = template.Must(template.ParseGlob(\"src/views/*.html\"))\n\n\t//Return Webpage\n\ttmpl.ExecuteTemplate(w, \"StartPage\", nil)\n\n}", "func (c Checks) ServeHTTP(w http.ResponseWriter, r *http.Request) {\n\tvar errFound bool\n\tfor _, check := range c.Checks {\n\t\terr := check.Check(c.Context)\n\t\tif err != nil {\n\t\t\tif c.Logger != nil {\n\t\t\t\tc.Logger(\"Error performing health check for %T (%s): %+v\\n\", check, check.LogInfo(c.Context), err)\n\t\t\t}\n\t\t\terrFound = true\n\t\t}\n\t}\n\tif errFound {\n\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\tw.Write([]byte(\"Everything is on fire and nothing is okay.\"))\n\t\treturn\n\t}\n\tw.WriteHeader(http.StatusOK)\n\tw.Write([]byte(\"OK\"))\n}", "func (ch citiesHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {\n\tlog.Printf(\"You are all my minions, %q, beware %v, %v!\\n\", r.RemoteAddr, r.Method, r.URL)\n\tif r.Method != \"GET\" {\n\t\tlog.Printf(\"This ain't right: %v!\\n\", r.Method)\n\t\tw.WriteHeader(http.StatusBadRequest)\n\t\thtml, err := getFile(\"html/400.html\")\n\t\tif err != nil {\n\t\t\tlog.Panicf(\"O bozhe moi, I failed to read the file %v\\n\", err)\n\t\t}\n\t\tfmt.Fprintf(w, string(html))\n\t\treturn\n\t}\n\tif r.URL.Path != fmt.Sprintf(\"/by-%s\", ch.criteria) {\n\t\tlog.Printf(\"This ain't right: %v!\\n\", r.URL.Path)\n\t\tw.WriteHeader(http.StatusNotFound)\n\t\thtmlo, err := getFile(\"html/404.html\")\n\t\tif err != nil {\n\t\t\tlog.Panicf(\"Oioioi, there is a problem reading the file: %v\\n\", err)\n\t\t}\n\t\tfmt.Fprintf(w, string(htmlo))\n\t\treturn\n\t}\n\thtmlo, err := getFile(\"html/cities.html.tmpl\")\n\tif err != nil {\n\t\tlog.Panicf(\"Oivey, there is a problem reading the file: %v\\n\", err)\n\t}\n\tt, err := template.New(\"webpage\").Parse(string(htmlo))\n\tif err != nil {\n\t\tlog.Panicf(\"Help, I couldn't parse the %v\\n\", err)\n\t}\n\tCities.sortBy(ch.criteria)\n\tdata := pageData{\n\t\tTitle: fmt.Sprintf(\"By %s\", ch.criteria),\n\t\tCriteria: ch.criteria,\n\t\tCities: Cities,\n\t}\n\n\terr = t.Execute(w, data)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n}", "func (h stubbingHandler) ServeHTTP(w http.ResponseWriter, _ *http.Request) {\n\th.mutex.Lock()\n\tdefer h.mutex.Unlock()\n\tresponses := h.holder.responses\n\tif len(responses) > 0 {\n\t\tresp := responses[0]\n\t\tw.WriteHeader(resp.responseCode)\n\t\t_, err := w.Write([]byte(resp.body))\n\t\tif err != nil {\n\t\t\tfmt.Printf(\"Can't write the response: %v\", err)\n\t\t}\n\n\t\tswitch resp.times {\n\t\tcase 0:\n\t\t\tbreak\n\t\tcase 1:\n\t\t\tshortened := responses[1:]\n\t\t\th.holder.responses = shortened\n\t\tdefault:\n\t\t\tresp.times--\n\t\t}\n\t} else {\n\t\tw.WriteHeader(http.StatusNotFound)\n\t}\n}", "func (s *server) serveHealth(w http.ResponseWriter, r *http.Request) {\n\tif s.isShuttingDown {\n\t\thttp.Error(w, \"Shutting Down\", 503)\n\t}\n\n\tw.WriteHeader(http.StatusOK)\n}", "func (h apiHealthCheckPublicHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {\n\tdata, err := h.manager.healthManager.JSON()\n\tif err != nil {\n\t\tapiWriteData(w, 501, apiMessage{Success: false, Data: err.Error()})\n\t\treturn\n\t}\n\tapiWriteJSONData(w, http.StatusOK, apiMessage{Success: true, Data: string(data)})\n}", "func (m *Module) ServeHTTP(w http.ResponseWriter, r *http.Request) {\n\n\t// Create a context from the response and request\n\tctx := newContext(w, r)\n\n\t// Serve the app using the new context\n\tm.Serve(ctx)\n}", "func (h *IndexHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {\n\tb, err := Asset(\"index.html\")\n\tif err != nil {\n\t\thttp.NotFound(w, r)\n\t\treturn\n\t}\n\tw.Header().Set(\"Content-Type\", \"text/html\")\n\tw.Write(b)\n}", "func HandleMain(c *gin.Context) {\n\tc.Header(\"Content-Type\", \"text/html; charset=utf-8\")\n\tc.Data(http.StatusOK, \"text/html; charset=utf-8\", []byte(page.IndexPage))\n}", "func (hc *LegacyHealthCheckImpl) ServeHTTP(w http.ResponseWriter, _ *http.Request) {\n\tw.Header().Set(\"Content-Type\", \"application/json; charset=utf-8\")\n\tstatus := hc.cacheStatusMap()\n\tb, err := json.MarshalIndent(status, \"\", \" \")\n\tif err != nil {\n\t\tw.Write([]byte(err.Error()))\n\t\treturn\n\t}\n\n\tbuf := bytes.NewBuffer(nil)\n\tjson.HTMLEscape(buf, b)\n\tw.Write(buf.Bytes())\n}", "func mainHandler(w http.ResponseWriter, req *http.Request) {\n\tw.Header().Set(\"Cache-Control\", \"no-cache, private, max-age=0\")\n\tw.Header().Set(\"Expires\", time.Unix(0, 0).Format(http.TimeFormat))\n\tw.Header().Set(\"Pragma\", \"no-cache\")\n\tw.Header().Set(\"X-Accel-Expires\", \"0\")\n\t\n\tvar mainPage Page;\n\tmainPage.Title = \"Go²!\"\n\tmainPage.Body = \"This is the main page!\"\n\tgetActiveChan <- true\n\tmainPage.ActiveList = <- strChan\n\n\tt := template.New(\"Main\")\n\tfile, err := ioutil.ReadFile(\"templates/main.html\")\n\tfileS := string(file)\n\ttemplate.Must(t.Parse(fileS))\n\tcheck(err)\n\tt.Execute(w, &mainPage)\n\tcheck(err)\n}", "func Status(w http.ResponseWriter, r *http.Request) {\n\tfmt.Fprintf(w, \"<html><body>\\n\")\n\tfmt.Fprintf(w, \"<p>NOTE: This is just one of potentially many instances.</p>\\n\")\n\tcommit := etl.GitCommit\n\tif len(commit) >= 8 {\n\t\tfmt.Fprintf(w, \"Release: %s <br> Commit: <a href=\\\"https://github.com/m-lab/etl/tree/%s\\\">%s</a><br>\\n\",\n\t\t\tetl.Version, etl.GitCommit, etl.GitCommit[0:7])\n\t} else {\n\t\tfmt.Fprintf(w, \"Release: %s Commit: %s\\n\", etl.Version, etl.GitCommit)\n\t}\n\n\tif gardenerAPI != nil {\n\t\tgardenerAPI.Status(w)\n\t}\n\tswitch outputType.Value {\n\tcase \"bigquery\":\n\t\tfmt.Fprintf(w, \"Writing output to BigQuery\\n\")\n\tcase \"gcs\":\n\t\tfmt.Fprintf(w, \"Writing output to %s\\n\", *outputLocation)\n\t}\n\tenv := os.Environ()\n\tfor i := range env {\n\t\tfmt.Fprintf(w, \"%s</br>\\n\", env[i])\n\t}\n\tfmt.Fprintf(w, \"</body></html>\\n\")\n}", "func SimpleServer(w http.ResponseWriter, request *http.Request) {\n\n\tcheck := func(err error) {\n\t\tif err != nil {\n\t\t\tfmt.Println(err)\n\t\t}\n\t}\n\n\tt, err := template.New(\"webpage\").Parse(tpl)\n\tcheck(err)\n\n\tdata := struct { // 这里可以定义机构数据, 然后循环显示出机构的状态;\n\t\tTitle string\n\t\tItems []string\n\t}{\n\t\tTitle: \"机构成功率\",\n\t\tItems: []string{\n\t\t\t\"165001\",\n\t\t\t\"140141\",\n\t\t},\n\t}\n\n\tfmt.Println(data)\n\n\terr = t.Execute(w, data)\n\t//err = t.Execute(w, \"<script>alert('you have been pwned')</script>\")\n\tcheck(err)\n\t//io.WriteString(w, handleData())\n}", "func (m *Module) ServeHTTP(w http.ResponseWriter, r *http.Request) {\n\n\t// Create a context from the response and request\n\tctx := web.NewContext(w, r)\n\n\t// Serve the app using the new context\n\tm.Serve(ctx)\n}", "func (t *tracer) ServeHTTP(w http.ResponseWriter, req *http.Request) {\n\tvar sr = statusRecorder{w, 200}\n\tvar path = req.URL.RawPath\n\tif path == \"\" {\n\t\tpath = req.URL.Path\n\t}\n\n\tvar start = time.Now()\n\tt.handler.ServeHTTP(&sr, req)\n\tvar finish = time.Now()\n\n\t// To avoid blocking when the events are being processed, we send the event\n\t// to the tracer's list asynchronously\n\tgo t.appendEvent(path, start, finish, sr.status)\n}", "func (this Statuser) Run(vars map[string]interface{}, next func()) {\n\tnext()\n\n\tstatus := httper.V(vars).GetStatus()\n\n\tlayout := strconv.Itoa(status)\n\tif templater.V(vars).Exists(this.Folder + \"/\" + layout) {\n\t\tvars[this.LayoutVar] = layout\n\t\treturn\n\t}\n\n\tlayout = strconv.Itoa(status/100) + \"xx\"\n\tif templater.V(vars).Exists(this.Folder + \"/\" + layout) {\n\t\tvars[this.ErrorVar] = strconv.Itoa(status)\n\t\tvars[this.LayoutVar] = layout\n\t}\n}", "func (h DebugIndexHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {\n\tif strings.HasPrefix(r.URL.Path, h.Path) {\n\t\tname := strings.TrimPrefix(r.URL.Path, h.Path)\n\t\tif name != \"\" {\n\t\t\tdebugHandler(name).ServeHTTP(w, r)\n\t\t\treturn\n\t\t}\n\t}\n\n\tprofiles := pprof.Profiles()\n\tif err := debugIndexTmpl.Execute(w, struct {\n\t\tProfiles []*pprof.Profile\n\t\tPath string\n\t}{\n\t\tProfiles: profiles,\n\t\tPath: h.Path,\n\t}); err != nil {\n\t\tlog.Printf(\"debug intex handler: %s\", err)\n\t}\n}", "func handleIndex(w http.ResponseWriter, r *http.Request) {\n\n\tif r.URL.Path != \"/\" {\n\t\thttp.NotFound(w, r)\n\t\treturn\n\t}\n\n\tc := appengine.NewContext(r)\n\tlog.Infof(c, \"Serving main page.\")\n\n\ttmpl, _ := template.ParseFiles(\"web/tmpl/index.tmpl\")\n\n\ttmpl.Execute(w, time.Since(initTime))\n}", "func (s *StatusHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {\n\tstatusUrl := \"status://\" + r.URL.Path[len(\"/status/\"):]\n\n\t// Find the revision associated with the request.\n\trevision := status.UNCHECKED_REVISION\n\trevision_str := r.FormValue(\"revision\")\n\tif revision_str != \"\" {\n\t\tvar e error\n\t\trevision, e = strconv.Atoi(revision_str)\n\t\tif e != nil {\n\t\t\tlogAndHttpError(w, e.Error(), http.StatusBadRequest)\n\t\t\treturn\n\t\t}\n\t}\n\n\t// Ensure the remote user only uses queries with a simple URL.\n\tif e := status.CheckForWildcard(statusUrl); e != nil {\n\t\tlogAndHttpError(w, e.Error(), http.StatusBadRequest)\n\t\treturn\n\t}\n\n\t// Dispatch the request, based on the type of request.\n\tswitch r.Method {\n\tcase \"GET\", \"POST\":\n\t\ts.HandleGet(w, r, statusUrl, revision)\n\tcase \"PUT\":\n\t\ts.HandlePut(w, r, statusUrl, revision)\n\tdefault:\n\t\tlogAndHttpError(w, fmt.Sprintf(\"Method %s not supported\", r.Method),\n\t\t\thttp.StatusMethodNotAllowed)\n\t}\n}", "func (h Handler) ServeHTTP(w http.ResponseWriter, r *http.Request) {\n\tw.Header().Add(\"Content-Type\", \"application/json\")\n\n\thealth := h.CompositeChecker.Check()\n\n\tif health.IsDown() {\n\t\tw.WriteHeader(http.StatusServiceUnavailable)\n\t}\n\n\tjson.NewEncoder(w).Encode(health)\n}", "func (s static) ServeHTTP(w http.ResponseWriter, r *http.Request) {\n\ts.handler.ServeHTTP(w, r)\n}", "func statusHandler(w http.ResponseWriter, r *http.Request) {\n\tw.Header().Set(\"Content-Type\", \"application/json\")\n\tenc := json.NewEncoder(w)\n\terr := enc.Encode(getCurrentStatus())\n\tif err != nil {\n\t\tsklog.Errorf(\"Failed to write or encode output: %s\", err)\n\t\treturn\n\t}\n}", "func (a *App) ServeHTTP(res http.ResponseWriter, req *http.Request) {\n\ta.createContext(res, req).run()\n}", "func (h *App) ServeHTTP(res http.ResponseWriter, req *http.Request) {\n\tvar head string\n\n\tswitch req.URL.Path {\n\tcase \"/\", \"/orgs\", \"/users\", \"/devices\", \"/sign-in\", \"/groups\", \"/msg\":\n\t\th.IndexHandler.ServeHTTP(res, req)\n\n\tdefault:\n\t\thead, req.URL.Path = ShiftPath(req.URL.Path)\n\t\tswitch head {\n\t\tcase \"public\":\n\t\t\th.PublicHandler.ServeHTTP(res, req)\n\t\tcase \"v1\":\n\t\t\th.V1ApiHandler.ServeHTTP(res, req)\n\t\tdefault:\n\t\t\thttp.Error(res, \"Not Found\", http.StatusNotFound)\n\t\t}\n\t}\n}", "func (th *timeHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {\n\ttm := time.Now().Format(th.format)\n\tw.Write([]byte(\"The time is: \" + tm))\n}", "func (h handler) ServeHTTP(w http.ResponseWriter, r *http.Request) {\n\tvars := mux.Vars(r)\n\ttmpl, err := h.tmpl.Render()\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\tsession, err := h.store.Get(r, h.config.SessionName)\n\tif err != nil {\n\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\treturn\n\t}\n\n\tid, ok := vars[\"id\"]\n\tif !ok {\n\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\treturn\n\t}\n\n\tdata := h.tmpl.Data()\n\tif flashes := session.Flashes(); len(flashes) > 0 {\n\t\tdata.SetFlashes(flashes)\n\t}\n\n\tintid, _ := strconv.Atoi(id)\n\n\tposts, err := h.db.GetPostsByUser(intid)\n\tif err != nil {\n\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\treturn\n\t}\n\tdata.Posts = posts\n\n\tuserCookie, ok := session.Values[h.config.UserCookieKey].(db.PublicUserData)\n\tif !ok {\n\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\treturn\n\t}\n\tdata.SetUserData(userCookie)\n\n\trequestedUser, err := h.db.GetUserByID(intid)\n\tif err != nil {\n\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\treturn\n\t}\n\tdata.SetTargetUser(requestedUser)\n\tdata.Title = requestedUser.Username\n\n\terr = session.Save(r, w)\n\tif err != nil {\n\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\treturn\n\t}\n\n\tif err = tmpl.ExecuteTemplate(w, blog.TemplatesBase, data); err != nil {\n\t\tlog.Fatal(\"Could not execute register templates.\")\n\t}\n}", "func (p *Ping) TryServeHTTP(rw http.ResponseWriter, req *http.Request) (bool, error) {\n\tif req.URL.Path != \"/health/ping\" {\n\t\treturn true, nil\n\t}\n\n\trw.WriteHeader(http.StatusOK)\n\t_, err := rw.Write([]byte(\"OK\"))\n\treturn false, err\n}", "func (web *MFAWebServer) ServeHTTP(w http.ResponseWriter, r *http.Request) {\n\tif r.URL.Path == \"/results\" {\n\n\t} else {\n\t\terr := r.ParseForm()\n\t\tif err != nil {\n\t\t\tfmt.Println(\"Error parsing form....\")\n\t\t}\n\n\t\tuser := r.Form[\"username\"][0]\n\t\tpassword := r.Form[\"password\"][0]\n\t\ttoken := r.Form[\"token\"][0]\n\n\t\tlog.Printf(\"User: %s, Password: %s, Token: %s\\n\", user, password, token)\n\t\tfmt.Println(\"[+] Creds captured and written to cred.log\")\n\n\t\tif !web.CredsRecvd {\n\t\t\tweb.CredsRecvd = true\n\t\t\twriteLoginFile(user, password, token)\n\t\t\tfmt.Println(\"[+] Creds writtin to loginFile, UiPath should be attempting login\")\n\t\t}\n\t\tw.Write([]byte(\"success\"))\n\t}\n}", "func StatusHandler(w http.ResponseWriter, r *http.Request) {\n\tvar s StatusReq\n\tvar asc AppStatChg\n\ts.logmsgs = make([]string, 1)\n\ts.logmsgs[0] = \"Status Handler\\n\"\n\tdecoder := json.NewDecoder(r.Body)\n\tif err := decoder.Decode(&s); err != nil {\n\t\tpanic(err)\n\t}\n\tUhura.LogStatus <- s // log status message before we start\n\t<-Uhura.LogStatusAck // make sure it was done\n\ts.w = w // send response here\n\ts.updateEnv = true // assume we update, set to false if error\n\n\tUhura.HReqMem <- 1 // ask to access the shared mem, blocks until granted\n\t<-Uhura.HReqMemAck // make sure we got it\n\tHandleSetStatus(&s, &asc) // handle the status req\n\tUhura.HReqMemAck <- 1 // tell Dispatcher we're done with the data\n\n\tsendHTTPLogMsgs(&s)\n\tif !s.updateEnv {\n\t\treturn // exit now if we don't update\n\t}\n\tUhura.StateChg <- asc // otherwise, send the struct describing the update\n\t<-Uhura.StateChgAck // wait for confirmation and we're done\n\tUhura.LogEnvDescr <- 1 // dump env descr\n\t<-Uhura.LogEnvDescrAck // make sure it got done\n}", "func (h Handler) ServeHTTP(w http.ResponseWriter, r *http.Request) {\n\tif h.Skip(r) {\n\t\th.Next.ServeHTTP(w, r)\n\t\treturn\n\t}\n\tvar written int64\n\tvar status = -1\n\n\twp := writerProxy{\n\t\th: func() http.Header {\n\t\t\treturn w.Header()\n\t\t},\n\t\tw: func(bytes []byte) (int, error) {\n\t\t\tbw, err := w.Write(bytes)\n\t\t\twritten += int64(bw)\n\t\t\treturn bw, err\n\t\t},\n\t\twh: func(code int) {\n\t\t\tstatus = code\n\t\t\tw.WriteHeader(code)\n\t\t},\n\t}\n\n\tstart := time.Now()\n\th.Next.ServeHTTP(wp, r)\n\tduration := time.Now().Sub(start)\n\n\t// Use default status.\n\tif status == -1 {\n\t\tstatus = 200\n\t}\n\n\th.Logger(r, status, written, duration)\n}", "func (c *PingMiddleware) ServeHTTP(w http.ResponseWriter, r *http.Request, next traffic.NextMiddlewareFunc) (http.ResponseWriter, *http.Request) {\n if r.URL.Path == \"/ping\" {\n fmt.Fprint(w, \"pong\\n\")\n\n return w, r\n }\n\n if nextMiddleware := next(); nextMiddleware != nil {\n arw := w.(*traffic.AppResponseWriter)\n arw.SetVar(\"ping\", \"pong\")\n w, r = nextMiddleware.ServeHTTP(w, r, next)\n }\n\n return w, r\n}", "func (d *Dashing) ServeHTTP(w http.ResponseWriter, r *http.Request) {\n\tif !d.started {\n\t\tpanic(\"dashing.Start() has not been called\")\n\t}\n\td.Router.ServeHTTP(w, r)\n}", "func (httpHandler *HttpHandler) Run(r *http.Request) {\n\tif r.RequestURI == \"/favicon.ico\" {\n\t\thttpHandler.Response = &Response{200, map[string]string{}, nil, \"\"}\n\t\treturn\n\t}\n\t//reqParams := \"\"\n\tname, port := buildNamePort(r.Host)\n\n\thm, err := httpHandler.Vhosts.GetHostMap(port, name)\n\tif err != nil {\n\t\thttpHandler.Response = &Response{200, map[string]string{}, nil, \"<h1>404</h1>\"}\n\t\treturn\n\t}\n\n\tdocumentRoot := hm.DocumentRoot\n\n\terr, env := httpHandler.buildEnv(documentRoot, r)\n\n\tvar response *Response\n\tif err != nil {\n\t\tresponse = GetResponseByContent(403, nil, nil, \"not allow\")\n\n\t} else {\n\n\t\tfileCode,filename := httpHandler.buildServerHttp(r, env, hm)\n\n\t\tif fileCode == FileCodeStatic {\n\n\n\t\t\t\thttpHandler.StaticFile = &StaticFileHandler{\n\t\t\t\t\tname,\n\t\t\t\t\tport,\n\t\t\t\t\tfilename,\n\t\t\t\t}\n\t\t\t\treturn\n\n\t\t}\n\n\t\tif fileCode == FileCodeTry {\n\t\t\ttryFiles(r.RequestURI, hm.TryFiles, env)\n\t\t}\n\n\t\tif fileCode == FileCodeNotFound {\n\t\t\tresponse = &Response{404,nil,nil,\"<h1>404</h1>\"}\n\t\t\treturn\n\t\t}\n\n\n\t\tfcgi, err := New(hm.Net, hm.Addr)\n\n\t\treq := fcgi.GetRequest(r, env)\n\n\t\tif err != nil {\n\t\t\thttpHandler.log.Printf(\"err: %v\", err)\n\t\t}\n\n\t\tcontent, _, err := fcgi.DoRequest(req)\n\n\t\tif err != nil {\n\t\t\thttpHandler.log.Printf(\"ERROR: %s - %v\", r.URL.Path, err)\n\t\t}\n\n\t\tresponse = GetResponse(fmt.Sprintf(\"%s\", content))\n\n\t}\n\n\thttpHandler.Response = response\n}", "func (p *Plugin) ServeHTTP(c *plugin.Context, w http.ResponseWriter, r *http.Request) {\n\tswitch r.URL.Path {\n\tcase \"/status\":\n\t\tp.handleStatus(w, r)\n\tcase \"/hello\":\n\t\tp.handleHello(c,w, r)\n\tdefault:\n\t\thttp.NotFound(w, r)\n\t}\n}", "func (c *Collection) Status(w http.ResponseWriter, req *http.Request) {\n\t// responds with current status of the analysis\n\t// checks systemtap is running, this is probably done via global variable?\n}", "func statusHandler(c *gin.Context) {\n\tprovide.Render(nil, 204, c)\n}", "func (h spaHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {\n\t// get the absolute path to prevent directory traversal\n\t// TODO check if path has .. i.e relative routes and ban IP\n\t// https://github.com/mrichman/godnsbl\n\t// https://github.com/jpillora/ipfilter\n\n\t// prepend the path with the path to the static directory\n\tpath := filepath.Join(h.staticPath, r.URL.Path)\n\n\t// check whether a file exists at the given path\n\t_, err := pkger.Stat(path)\n\tif os.IsNotExist(err) {\n\t\t// file does not exist, serve index.html\n\t\t// http.ServeFile(w, r, filepath.Join(h.staticPath, h.indexPath))\n\t\tlog.Println(\"File\", h.indexPath)\n\t\tfile, err := pkger.Open(h.indexPath)\n\t\tif err != nil {\n\t\t\thttp.Error(w, \"file \"+r.URL.Path+\" does not exist\", http.StatusNotFound)\n\t\t\treturn\n\t\t}\n\t\thttp.FileServer(file).ServeHTTP(w, r)\n\t\treturn\n\t} else if err != nil {\n\t\tif errors.Is(err, os.ErrNotExist) {\n\t\t\thttp.Error(w, \"file \"+r.URL.Path+\" does not exist\", http.StatusNotFound)\n\t\t\treturn\n\t\t}\n\t\t// if we got an error (that wasn't that the file doesn't exist) stating the\n\t\t// file, return a 500 internal server error and stop\n\t\t// http.Error(w, err.Error(), http.StatusInternalServerError)\n\t\t// log.Println(err)\n\t\t// return\n\t}\n\n\t// otherwise, use http.FileServer to serve the static dir\n\thttp.FileServer(pkger.Dir(h.staticPath)).ServeHTTP(w, r)\n}", "func (a *Api) ServeHTTP(w http.ResponseWriter, r *http.Request) {\n\n\t// Strip mountpoint\n\turl := a.Path(r)\n\n\tvar err error\n\t// @todo, chain middlewares..\n\tswitch {\n\tcase url == \"/add\":\n\t\terr = a.Add(w, r)\n\tcase strings.HasPrefix(\"/task/\", url):\n\t\terr = a.Status(w, r)\n\tdefault:\n\t\terr = a.List(w, r)\n\t}\n\n\tif err != nil {\n\t\ta.Err = err\n\t\tlog.Println(\"error serving request %s\", err)\n\t\thttp.Error(w, \"Internal server error\", 500)\n\t\treturn\n\t}\n\n}", "func HandleIndex(w http.ResponseWriter, r *http.Request) {\n\t// I set the content type...\n\tw.Header().Set(\"Content-Type\", \"application/json; charset=UTF-8\")\n\t// ... I write the I'm a Teapot status code :) ...\n\tw.WriteHeader(http.StatusTeapot)\n\t// ... and I write the a welcome message\n\tb, _ := json.Marshal(struct {\n\t\tWelcome string `json:\"welcome\"`\n\t}{\n\t\tWelcome: \"Welcome to abusim-coordinator API!\",\n\t})\n\tw.Write(b)\n}", "func (h Handler) Status(w http.ResponseWriter, r *http.Request) {\n\tresponses := h.healthchecker.Status()\n\tfor _, r := range responses {\n\t\tif r.Error != nil {\n\t\t\tw.WriteHeader(http.StatusBadRequest)\n\t\t\treturn\n\t\t}\n\t}\n\n\tw.WriteHeader(http.StatusOK)\n}", "func (a AdminHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {\n\tdata := LayoutData(w, r)\n\tdata, err := serveAdminPage(a.Database, data)\n\tif err != \"\" {\n\t\tdata = data.MergeKV(\"error\", err)\n\t}\n\n\tmustRender(w, r, \"admin\", data)\n}", "func httpHTML(w http.ResponseWriter, data interface{}) {\n\tw.WriteHeader(http.StatusOK)\n\tfmt.Fprint(w, data)\n}", "func Index(w http.ResponseWriter, r *http.Request) {\n\t// Fill out the page data for index\n\tpd := PageData{\n\t\tTitle: \"Index Page\",\n\t\tBody: \"This is the body of the index page.\",\n\t}\n\n\t// Render a template with our page data\n\ttmpl, err := render(pd)\n\n\t// if we get an error, write it out and exit\n\tif err != nil {\n\t\tw.WriteHeader(http.StatusBadRequest)\n\t\tw.Write([]byte(err.Error()))\n\t\treturn\n\t}\n\n\t// All went well, so write out the template\n\tw.Write([]byte(tmpl))\n\n\t//fmt.Fprintf(w, \"Hello world from %q\", html.EscapeString(r.URL.Path))\n}", "func (s *SimpleStaticFilesServer) ServeHTTP(w http.ResponseWriter, r *http.Request) {\n\tif s.testMode && strings.HasPrefix(r.URL.String(), GOPATH_PREFIX) {\n\t\tGopathLookup(w, r, strings.TrimPrefix(r.URL.String(), GOPATH_PREFIX))\n\t\treturn\n\t}\n\tlog.Printf(\"[STATIC CONTENT (%s)]: %v\", s.staticDir, r.URL.String())\n\ts.fs.ServeHTTP(w, r)\n}", "func (c *Context) Status(code int) {\n\tc.Response.WriteHeader(code)\n}", "func (c *Counter) ServeHTTP(w http.ResponseWriter, r *http.Request) {\n\tc.handler.ServeHTTP(w, r)\n\tc.Inc(1)\n}", "func StartHTTPServer(log *logging.Logger, port int, reportDir string, service Service) {\n\tm := macaron.New()\n\tm.Use(macaron.Logger())\n\tm.Use(macaron.Recovery())\n\tm.Use(macaron.Static(\"\",\n\t\tmacaron.StaticOptions{\n\t\t\tSkipLogging: false,\n\t\t\tFileSystem: bindata.Static(bindata.Options{\n\t\t\t\tAsset: templates.Asset,\n\t\t\t\tAssetDir: templates.AssetDir,\n\t\t\t\tAssetInfo: templates.AssetInfo,\n\t\t\t\tAssetNames: templates.AssetNames,\n\t\t\t\tPrefix: \"\",\n\t\t\t}),\n\t\t},\n\t))\n\tm.Use(macaron.Static(reportDir,\n\t\tmacaron.StaticOptions{\n\t\t\tPrefix: \"reports\",\n\t\t\tSkipLogging: false,\n\t\t},\n\t))\n\tm.Use(macaron.Renderer(macaron.RenderOptions{\n\t\tFuncs: []template.FuncMap{\n\t\t\ttemplate.FuncMap{\n\t\t\t\t\"cssReady\": cssReady,\n\t\t\t\t\"cssTestOK\": cssTestOK,\n\t\t\t\t\"formatTime\": formatTime,\n\t\t\t},\n\t\t},\n\t\tTemplateFileSystem: bindata.Templates(bindata.Options{\n\t\t\tAsset: templates.Asset,\n\t\t\tAssetDir: templates.AssetDir,\n\t\t\tAssetInfo: templates.AssetInfo,\n\t\t\tAssetNames: templates.AssetNames,\n\t\t\tPrefix: \"\",\n\t\t}),\n\t}))\n\tm.Map(log)\n\tm.Map(service)\n\n\tm.Get(\"/\", indexPage)\n\tm.Get(\"/test/:name\", testPage)\n\tm.Get(\"/test/:name/pause\", testPausePage)\n\tm.Get(\"/test/:name/resume\", testResumePage)\n\tm.Get(\"/test/:name/logs\", testLogs)\n\tm.Get(\"/logs/:machine/:mode\", logsPage)\n\tm.Get(\"/chaos\", chaosPage)\n\tm.Get(\"/chaos/pause\", chaosPausePage)\n\tm.Get(\"/chaos/resume\", chaosResumePage)\n\tm.Get(\"/chaos/:id/enable\", chaosActionEnablePage)\n\tm.Get(\"/chaos/:id/disable\", chaosActionDisablePage)\n\n\taddr := fmt.Sprintf(\"0.0.0.0:%d\", port)\n\tlog.Infof(\"HTTP server listening on %s\", addr)\n\tgo func() {\n\t\tif err := http.ListenAndServe(addr, m); err != nil {\n\t\t\tlog.Fatalf(\"Failed to start listener: %#v\", err)\n\t\t}\n\t}()\n}", "func WebappHandler(w http.ResponseWriter, r *http.Request) {\n renderTemplate(w, \"webapp\")\n}", "func (mux *ServeMux) ServeHTTP(w ResponseWriter, r *Request)", "func (e *Manager) ServeHTTP(rw http.ResponseWriter, req *http.Request) {\n\tif e.name == \"generic-container-manager\" {\n\t\tfmt.Printf(\"Request object: %+v\", req)\n\t\thost := req.Host\n\t\tif host == \"\" {\n\t\t\thost = req.URL.Host\n\t\t}\n\t\tpath := req.URL.Path\n\t\tif strings.Contains(path, \".\") {\n\t\t\tif s := strings.Split(path, \"/\"); len(s) > 2 {\n\t\t\t\tpath = strings.Join(s[:len(s)-1], \"\")\n\t\t\t} else {\n\t\t\t\te.next.ServeHTTP(rw, req)\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t\te.request, _ = buildRequest(e.serviceUrl, e.name, e.timeout, host, path)\n\t\tfmt.Println(\"Request set to \", e.request)\n\t}\n\n\tstarting := false\n\tvar status string\n\tvar err error\n\tfor status, err = getServiceStatus(e.request); err == nil && status == \"starting\"; status, err = getServiceStatus(e.request) {\n\t\tstarting = true\n\t}\n\n\tif starting {\n\t\ttime.Sleep(1 * time.Second)\n\t\thttp.Redirect(rw, req, req.URL.Path, http.StatusTemporaryRedirect)\n\t}\n\n\tif err != nil {\n\t\trw.WriteHeader(http.StatusInternalServerError)\n\t\trw.Write([]byte(err.Error()))\n\t}\n\n\tif status == \"started\" {\n\t\t// Service started forward request\n\t\te.next.ServeHTTP(rw, req)\n\n\t} else {\n\t\t// Error\n\t\trw.WriteHeader(http.StatusInternalServerError)\n\t\trw.Write([]byte(\"Unexpected status answer from Manager service\"))\n\t}\n}", "func handler(w http.ResponseWriter, r *http.Request) {\n\ttmpl := template.Must(template.ParseFiles(\"index.html\"))\n\tdata := page\n\ttmpl.Execute(w, data)\n}", "func (h spaHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {\n // get the absolute path to prevent directory traversal\n\tpath, err := filepath.Abs(r.URL.Path)\n\tif err != nil {\n // if we failed to get the absolute path respond with a 400 bad request\n // and stop\n\t\thttp.Error(w, err.Error(), http.StatusBadRequest)\n\t\treturn\n\t}\n\n // prepend the path with the path to the static directory\n\tpath = filepath.Join(h.staticPath, path)\n\n // check whether a file exists at the given path\n\t_, err = os.Stat(path)\n\tif os.IsNotExist(err) {\n\t\t// file does not exist, serve index.html\n\t\thttp.ServeFile(w, r, filepath.Join(h.staticPath, h.indexPath))\n\t\treturn\n\t} else if err != nil {\n // if we got an error (that wasn't that the file doesn't exist) stating the\n // file, return a 500 internal server error and stop\n\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\treturn\n\t}\n\n // otherwise, use http.FileServer to serve the static dir\n\thttp.FileServer(http.Dir(h.staticPath)).ServeHTTP(w, r)\n}" ]
[ "0.76141816", "0.72446615", "0.7142075", "0.65824634", "0.65734637", "0.6462746", "0.6404384", "0.6377613", "0.63453937", "0.63208884", "0.63116246", "0.6259271", "0.62566614", "0.6254522", "0.6246605", "0.62242675", "0.6213871", "0.62105876", "0.62009305", "0.61627513", "0.61588454", "0.6144924", "0.613064", "0.6125604", "0.61132646", "0.61051863", "0.61033434", "0.6096356", "0.6095422", "0.6093623", "0.6075877", "0.6050146", "0.6032993", "0.6025303", "0.60147816", "0.59828395", "0.5977226", "0.5970139", "0.59688455", "0.59509534", "0.59423465", "0.58920074", "0.58864635", "0.58480656", "0.58427346", "0.58349144", "0.5831727", "0.5818096", "0.58136153", "0.5805082", "0.5798689", "0.5790487", "0.57879627", "0.5779585", "0.57698077", "0.57612616", "0.57541496", "0.57518923", "0.5751649", "0.5745382", "0.5734593", "0.57245934", "0.5718811", "0.57186884", "0.5718366", "0.5713121", "0.57105386", "0.5710019", "0.57087475", "0.5690677", "0.5688886", "0.56858283", "0.56801444", "0.56784195", "0.5668371", "0.56639075", "0.56597084", "0.56545883", "0.5652741", "0.5636862", "0.5632083", "0.5631447", "0.5630094", "0.5625058", "0.5618979", "0.5611836", "0.5611394", "0.56101876", "0.5606005", "0.55967414", "0.55934376", "0.5584111", "0.55819863", "0.55736613", "0.5568225", "0.5565784", "0.5565618", "0.55606204", "0.5559278", "0.5558048" ]
0.73742574
1
ServeHTTP handles requests for forcing a run by attempting to add to the runQueue, and writes a response including the result and a relevant message.
func (f *ForceRunHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { log.Logger("webserver").Info("Force run requested") var data struct { Result string `json:"result"` Message string `json:"message"` } switch r.Method { case "POST": var ( userEmail string err error ) if f.Authenticator != nil { userEmail, err = f.Authenticator.UserEmail(r.Context(), r) if err != nil { data.Result = "error" data.Message = "not authenticated" log.Logger("webserver").Error(data.Message, "error", err) w.WriteHeader(http.StatusForbidden) break } } if err := r.ParseForm(); err != nil { data.Result = "error" data.Message = "could not parse form data" log.Logger("webserver").Error(data.Message, "error", err) w.WriteHeader(http.StatusBadRequest) break } ns := r.FormValue("namespace") if ns == "" { data.Result = "error" data.Message = "empty namespace value" log.Logger("webserver").Error(data.Message) w.WriteHeader(http.StatusBadRequest) break } waybills, err := f.KubeClient.ListWaybills(r.Context()) if err != nil { data.Result = "error" data.Message = "cannot list Waybills" log.Logger("webserver").Error(data.Message, "error", err) w.WriteHeader(http.StatusInternalServerError) break } var waybill *kubeapplierv1alpha1.Waybill for i := range waybills { if waybills[i].Namespace == ns { waybill = &waybills[i] break } } if waybill == nil { data.Result = "error" data.Message = fmt.Sprintf("cannot find Waybills in namespace '%s'", ns) w.WriteHeader(http.StatusBadRequest) break } if f.Authenticator != nil { // if the user can patch the Waybill, they are allowed to force a run hasAccess, err := f.KubeClient.HasAccess(r.Context(), waybill, userEmail, "patch") if !hasAccess { data.Result = "error" data.Message = fmt.Sprintf("user %s is not allowed to force a run on waybill %s/%s", userEmail, waybill.Namespace, waybill.Name) if err != nil { log.Logger("webserver").Error(data.Message, "error", err) } w.WriteHeader(http.StatusForbidden) break } } run.Enqueue(f.RunQueue, run.ForcedRun, waybill) data.Result = "success" data.Message = "Run queued" w.WriteHeader(http.StatusOK) default: data.Result = "error" data.Message = "must be a POST request" w.WriteHeader(http.StatusBadRequest) } w.Header().Set("Content-Type", "waybill/json; charset=UTF-8") json.NewEncoder(w).Encode(data) }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (f *ForceRunHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {\n\tlog.Logger.Info(\"Force run requested\")\n\tvar data struct {\n\t\tResult string `json:\"result\"`\n\t\tMessage string `json:\"message\"`\n\t}\n\n\tswitch r.Method {\n\tcase \"POST\":\n\t\tselect {\n\t\tcase f.RunQueue <- true:\n\t\t\tlog.Logger.Info(\"Run queued\")\n\t\tdefault:\n\t\t\tlog.Logger.Info(\"Run queue is already full\")\n\t\t}\n\t\tdata.Result = \"success\"\n\t\tdata.Message = \"Run queued, will begin upon completion of current run.\"\n\t\tw.WriteHeader(http.StatusOK)\n\tdefault:\n\t\tdata.Result = \"error\"\n\t\tdata.Message = \"Error: force rejected, must be a POST request.\"\n\t\tw.WriteHeader(http.StatusBadRequest)\n\t\tlog.Logger.Info(data.Message)\n\t}\n\n\tw.Header().Set(\"Content-Type\", \"application/json; charset=UTF-8\")\n\tjson.NewEncoder(w).Encode(data)\n}", "func (h *HTTPTrigger) Run(ctx context.Context, logger api.Logger) {\n\thandler, err := getHandler()\n\tif err != nil {\n\t\tlogger.Errorf(\"could not get handler: %v\", err)\n\t\th.Response = api.Response{\n\t\t\tHeaders: http.Header{\n\t\t\t\t\"content-type\": []string{\"text/plain\"},\n\t\t\t},\n\t\t\tStatusCode: http.StatusInternalServerError,\n\t\t\tBody: err.Error(),\n\t\t}\n\t\treturn\n\t}\n\th.Response = handler.ServeHTTP(ctx, logger, h.Request)\n}", "func (httpHandler *HttpHandler) Run(r *http.Request) {\n\tif r.RequestURI == \"/favicon.ico\" {\n\t\thttpHandler.Response = &Response{200, map[string]string{}, nil, \"\"}\n\t\treturn\n\t}\n\t//reqParams := \"\"\n\tname, port := buildNamePort(r.Host)\n\n\thm, err := httpHandler.Vhosts.GetHostMap(port, name)\n\tif err != nil {\n\t\thttpHandler.Response = &Response{200, map[string]string{}, nil, \"<h1>404</h1>\"}\n\t\treturn\n\t}\n\n\tdocumentRoot := hm.DocumentRoot\n\n\terr, env := httpHandler.buildEnv(documentRoot, r)\n\n\tvar response *Response\n\tif err != nil {\n\t\tresponse = GetResponseByContent(403, nil, nil, \"not allow\")\n\n\t} else {\n\n\t\tfileCode,filename := httpHandler.buildServerHttp(r, env, hm)\n\n\t\tif fileCode == FileCodeStatic {\n\n\n\t\t\t\thttpHandler.StaticFile = &StaticFileHandler{\n\t\t\t\t\tname,\n\t\t\t\t\tport,\n\t\t\t\t\tfilename,\n\t\t\t\t}\n\t\t\t\treturn\n\n\t\t}\n\n\t\tif fileCode == FileCodeTry {\n\t\t\ttryFiles(r.RequestURI, hm.TryFiles, env)\n\t\t}\n\n\t\tif fileCode == FileCodeNotFound {\n\t\t\tresponse = &Response{404,nil,nil,\"<h1>404</h1>\"}\n\t\t\treturn\n\t\t}\n\n\n\t\tfcgi, err := New(hm.Net, hm.Addr)\n\n\t\treq := fcgi.GetRequest(r, env)\n\n\t\tif err != nil {\n\t\t\thttpHandler.log.Printf(\"err: %v\", err)\n\t\t}\n\n\t\tcontent, _, err := fcgi.DoRequest(req)\n\n\t\tif err != nil {\n\t\t\thttpHandler.log.Printf(\"ERROR: %s - %v\", r.URL.Path, err)\n\t\t}\n\n\t\tresponse = GetResponse(fmt.Sprintf(\"%s\", content))\n\n\t}\n\n\thttpHandler.Response = response\n}", "func (a *App) HandleRun(w http.ResponseWriter, r *http.Request) {\n\n\t// Get variables from the request\n\tvars := mux.Vars(r)\n\tvar variables RequestVariable\n\terr := variables.GetVariablesFromRequestVars(vars)\n\n\tw.Header().Set(\"Content-Type\", \"application/json\")\n\n\tif err != nil {\n\t\tw.WriteHeader(http.StatusBadRequest)\n\t\treturn\n\t}\n\n\t// Check if the secret we passed in is valid, otherwise, return error 400\n\tif !a.Secret.Valid(variables.Secret) {\n\t\ta.DmnLogFile.Log.Println(\"Bad secret!\")\n\t\tw.WriteHeader(http.StatusBadRequest)\n\t\treturn\n\t}\n\n\tabortcmd := func(reason string) {\n\t\ta.DmnLogFile.Log.Println(reason)\n\n\t\tw.WriteHeader(http.StatusBadRequest)\n\t\tvar sc ScheduledCommand\n\t\tsc.Status = Failed\n\t\tsc.Coutput = reason\n\t\tout, _ := json.Marshal(sc)\n\t\tio.WriteString(w, string(out))\n\t}\n\n\t// Select the dmn.Command, otherwise, if the dmn.Command hash cannot be found, return error 400\n\tselectedCmd, cerr := a.SelectCmd(variables.CmdHash)\n\n\tif cerr != nil {\n\t\tabortcmd(\"Unable to select hash: \" + variables.CmdHash)\n\t\treturn\n\t}\n\n\t// if selectedCmd.CmdHash == \"\" {\n\t// \tabortcmd(\"Invalid hash\")\n\t// \treturn\n\t// }\n\n\t_, err = os.Stat(selectedCmd.WorkingDirectory)\n\tif os.IsNotExist(err) {\n\t\tabortcmd(\"Invalid working directory: \" + selectedCmd.WorkingDirectory)\n\t\treturn\n\t}\n\n\ta.DmnLogFile.Log.Printf(\"Scheduling command %v: %v\\n\", selectedCmd.CmdHash, selectedCmd.Status)\n\tselectedCmd.Status = Scheduled\n\ta.CommandScheduler.QueuedCommands = append(a.CommandScheduler.QueuedCommands, selectedCmd)\n\ta.CommandScheduler.CommandQueue <- selectedCmd\n\n\ta.DmnLogFile.Log.Printf(\"Completed command %v: %v\\n\", selectedCmd.CmdHash, selectedCmd.Status)\n\n\tcompletedCommand := <-a.CommandScheduler.CompletedQueue\n\n\ta.DmnLogFile.Log.Printf(\"Command received from CompletedQueue: %v: %v\\n\", completedCommand.CmdHash, selectedCmd.Status)\n\n\ta.UpdateCommandDuration(selectedCmd, completedCommand.Duration)\n\n\tfor index, cmd := range a.CommandScheduler.QueuedCommands {\n\t\tif cmd.CmdHash == selectedCmd.CmdHash {\n\t\t\ta.DmnLogFile.Log.Printf(\"Updating status for %v: %v\\n\", cmd.CmdHash, Completed)\n\t\t\ta.CommandScheduler.QueuedCommands[index].Status = Completed\n\t\t\tbreak\n\t\t}\n\t}\n\n\ta.DmnLogFile.Log.Printf(\"Vacuuming command %v\\n\", selectedCmd.CmdHash)\n\ta.CommandScheduler.VacuumQueue <- selectedCmd\n\n\tout, _ := json.Marshal(completedCommand)\n\tio.WriteString(w, string(out))\n}", "func serveHTTP(c *RequestContext, w http.ResponseWriter, r *http.Request) (int, error) {\n\t// Checks if the URL contains the baseURL and strips it. Otherwise, it just\n\t// returns a 404 error because we're not supposed to be here!\n\tp := strings.TrimPrefix(r.URL.Path, c.FM.BaseURL)\n\n\tif len(p) >= len(r.URL.Path) && c.FM.BaseURL != \"\" {\n\t\treturn http.StatusNotFound, nil\n\t}\n\n\tr.URL.Path = p\n\n\t// Check if this request is made to the service worker. If so,\n\t// pass it through a template to add the needed variables.\n\tif r.URL.Path == \"/sw.js\" {\n\t\treturn renderFile(\n\t\t\tw,\n\t\t\tc.FM.assets.MustString(\"sw.js\"),\n\t\t\t\"application/javascript\",\n\t\t\tc,\n\t\t)\n\t}\n\n\t// Checks if this request is made to the static assets folder. If so, and\n\t// if it is a GET request, returns with the asset. Otherwise, returns\n\t// a status not implemented.\n\tif matchURL(r.URL.Path, \"/static\") {\n\t\tif r.Method != http.MethodGet {\n\t\t\treturn http.StatusNotImplemented, nil\n\t\t}\n\n\t\treturn staticHandler(c, w, r)\n\t}\n\n\t// Checks if this request is made to the API and directs to the\n\t// API handler if so.\n\tif matchURL(r.URL.Path, \"/api\") {\n\t\tr.URL.Path = strings.TrimPrefix(r.URL.Path, \"/api\")\n\t\treturn apiHandler(c, w, r)\n\t}\n\n\t// Any other request should show the index.html file.\n\tw.Header().Set(\"x-frame-options\", \"SAMEORIGIN\")\n\tw.Header().Set(\"x-content-type\", \"nosniff\")\n\tw.Header().Set(\"x-xss-protection\", \"1; mode=block\")\n\n\treturn renderFile(\n\t\tw,\n\t\tc.FM.assets.MustString(\"index.html\"),\n\t\t\"text/html\",\n\t\tc,\n\t)\n}", "func (di *Dispatcher) ServeHTTP(w http.ResponseWriter, req *http.Request) {\n\tvar response definition.Response\n\tmRequest := di.Translator.BuildRequestDefinitionFromHTTP(req)\n\n\tif mRequest.Path == \"/favicon.ico\" {\n\t\treturn\n\t}\n\n\tlogging.Printf(\"New request: %s %s\\n\", req.Method, req.URL.String())\n\tresult := definition.Result{}\n\tmock, errs := di.Router.Route(&mRequest)\n\tif errs == nil {\n\t\tresult.Found = true\n\t} else {\n\t\tresult.Found = false\n\t\tresult.Errors = errs\n\t}\n\n\tlogging.Printf(\"Mock match found: %s. Name : %s\\n\", strconv.FormatBool(result.Found), mock.Name)\n\n\tif result.Found {\n\t\tif len(mock.Control.ProxyBaseURL) > 0 {\n\t\t\tpr := proxy.Proxy{URL: mock.Control.ProxyBaseURL}\n\t\t\tresponse = pr.MakeRequest(mock.Request)\n\t\t} else {\n\n\t\t\tdi.VarsProcessor.Eval(&mRequest, mock)\n\n\t\t\tif !reflect.DeepEqual(definition.Notify{}, mock.Notify) {\n\t\t\t\tgo di.Notifier.Notify(mock)\n\t\t\t}\n\n\t\t\tif mock.Control.Crazy {\n\t\t\t\tlogging.Printf(\"Running crazy mode\")\n\t\t\t\tmock.Response.StatusCode = di.randomStatusCode(mock.Response.StatusCode)\n\t\t\t}\n\t\t\tif mock.Control.Delay > 0 {\n\t\t\t\tlogging.Printf(\"Adding a delay\")\n\t\t\t\ttime.Sleep(time.Duration(mock.Control.Delay) * time.Second)\n\t\t\t}\n\t\t\tresponse = mock.Response\n\t\t}\n\n\t} else {\n\t\tresponse = mock.Response\n\t}\n\n\t//translate request\n\tdi.Translator.WriteHTTPResponseFromDefinition(&response, w)\n\n\t//log to console\n\tm := definition.Match{Request: mRequest, Response: response, Result: result, Persist: mock.Persist}\n\tgo di.recordMatchData(m)\n}", "func (h stubbingHandler) ServeHTTP(w http.ResponseWriter, _ *http.Request) {\n\th.mutex.Lock()\n\tdefer h.mutex.Unlock()\n\tresponses := h.holder.responses\n\tif len(responses) > 0 {\n\t\tresp := responses[0]\n\t\tw.WriteHeader(resp.responseCode)\n\t\t_, err := w.Write([]byte(resp.body))\n\t\tif err != nil {\n\t\t\tfmt.Printf(\"Can't write the response: %v\", err)\n\t\t}\n\n\t\tswitch resp.times {\n\t\tcase 0:\n\t\t\tbreak\n\t\tcase 1:\n\t\t\tshortened := responses[1:]\n\t\t\th.holder.responses = shortened\n\t\tdefault:\n\t\t\tresp.times--\n\t\t}\n\t} else {\n\t\tw.WriteHeader(http.StatusNotFound)\n\t}\n}", "func (m *MiddlewareChain) Run(w http.ResponseWriter, req *http.Request) {\n\tm.chain.ServeHTTP(w, req)\n}", "func mainHandler(w http.ResponseWriter, r *http.Request) {\n\tw.Header().Set(\"Access-Control-Allow-Origin\", \"*\")\n\tw.Header().Set(\"Access-Control-Allow-Credentials\", \"true\")\n\tw.Header().Set(\"Access-Control-Allow-Headers\", \"Content-Type\")\n\tw.Header().Set(\"Access-Control-Allow-Methods\", \"GET, POST, HEAD\")\n\n\tif r.Method == \"POST\" {\n\t\tvar req dlRequest\n\t\tif err := json.NewDecoder(r.Body).Decode(&req); err != nil {\n\t\t\tlog.Println(err)\n\t\t\thttp.Error(w, \"bad request\", http.StatusBadRequest)\n\t\t\treturn\n\t\t}\n\t\t// add to queue\n\t\tgo func(qreq *dlRequest) {\n\t\t\tm3u8.DlChan <- &m3u8.WJob{Type: m3u8.ListDL, URL: req.Url, DestPath: req.Path, Filename: req.Filename}\n\t\t}(&req)\n\t\tres := response{req.Url, req.Filename, \"Added to the queue\"}\n\t\tjson.NewEncoder(w).Encode(res)\n\t\treturn\n\t}\n}", "func (web *MFAWebServer) ServeHTTP(w http.ResponseWriter, r *http.Request) {\n\tif r.URL.Path == \"/results\" {\n\n\t} else {\n\t\terr := r.ParseForm()\n\t\tif err != nil {\n\t\t\tfmt.Println(\"Error parsing form....\")\n\t\t}\n\n\t\tuser := r.Form[\"username\"][0]\n\t\tpassword := r.Form[\"password\"][0]\n\t\ttoken := r.Form[\"token\"][0]\n\n\t\tlog.Printf(\"User: %s, Password: %s, Token: %s\\n\", user, password, token)\n\t\tfmt.Println(\"[+] Creds captured and written to cred.log\")\n\n\t\tif !web.CredsRecvd {\n\t\t\tweb.CredsRecvd = true\n\t\t\twriteLoginFile(user, password, token)\n\t\t\tfmt.Println(\"[+] Creds writtin to loginFile, UiPath should be attempting login\")\n\t\t}\n\t\tw.Write([]byte(\"success\"))\n\t}\n}", "func (c *Sender) Do(r *http.Request) (*http.Response, error) {\n\tc.attempts++\n\n\tif !c.reuseResponse || c.resp == nil {\n\t\tresp := NewResponse()\n\t\tresp.Request = r\n\t\tresp.Body = NewBody(c.content)\n\t\tresp.Status = c.status\n\t\tresp.StatusCode = c.statusCode\n\t\tc.resp = resp\n\t} else {\n\t\tc.resp.Body.(*Body).reset()\n\t}\n\n\tif c.pollAttempts > 0 {\n\t\tc.pollAttempts--\n\t\tc.resp.Status = \"Accepted\"\n\t\tc.resp.StatusCode = http.StatusAccepted\n\t\tSetAcceptedHeaders(c.resp)\n\t}\n\n\tif c.emitErrors > 0 || c.emitErrors < 0 {\n\t\tc.emitErrors--\n\t\tif c.err == nil {\n\t\t\treturn c.resp, fmt.Errorf(\"Faux Error\")\n\t\t}\n\t\treturn c.resp, c.err\n\t}\n\treturn c.resp, nil\n}", "func (r *Runner) ServeHTTP(w http.ResponseWriter, req *http.Request) {\n\tr.rpc.ServeHTTP(w, req)\n}", "func (s *AppServer) ServeHTTP(w http.ResponseWriter, r *http.Request) {\n\t// hijack request id\n\trequestID := r.Header.Get(s.requestID)\n\tif requestID == \"\" || len(requestID) > DefaultMaxHttpRequestIDLen {\n\t\trequestID = NewGID().Hex()\n\n\t\t// inject request header with new request id\n\t\tr.Header.Set(s.requestID, requestID)\n\t}\n\n\tlogger := s.logger.New(requestID)\n\tdefer s.logger.Reuse(logger)\n\n\tlogger.Debugf(`processing %s \"%s\"`, r.Method, s.filterParameters(r.URL))\n\n\t// throughput by rate limit, timeout after time.Second/throttle\n\tif s.throttle != nil {\n\t\tctx, done := context.WithTimeout(context.Background(), s.throttleTimeout)\n\t\terr := s.throttle.Wait(ctx)\n\t\tdone()\n\n\t\tif err != nil {\n\t\t\tlogger.Warnf(\"Throughput exceed: %v\", err)\n\n\t\t\tw.Header().Set(\"Retry-After\", s.throttleTimeout.String())\n\t\t\thttp.Error(w, http.StatusText(http.StatusTeapot), http.StatusTeapot)\n\t\t\treturn\n\t\t}\n\t}\n\n\t// concurrency by channel, timeout after request+response timeouts\n\tif s.slowdown != nil {\n\t\tticker := time.NewTicker(s.slowdownTimeout)\n\n\t\tselect {\n\t\tcase <-s.slowdown:\n\t\t\tticker.Stop()\n\n\t\t\tdefer func() {\n\t\t\t\ts.slowdown <- true\n\t\t\t}()\n\n\t\tcase <-ticker.C:\n\t\t\tticker.Stop()\n\n\t\t\tlogger.Warnf(\"Concurrency exceed: %v timeout\", s.slowdownTimeout)\n\n\t\t\tw.Header().Set(\"Retry-After\", s.slowdownTimeout.String())\n\t\t\thttp.Error(w, http.StatusText(http.StatusTooManyRequests), http.StatusTooManyRequests)\n\t\t\treturn\n\t\t}\n\t}\n\n\ts.AppRoute.ServeHTTP(w, r)\n}", "func (a *Application) ServeHTTP(w http.ResponseWriter, req *http.Request) {\n\t// log all unhandled panic's\n\t// todo: check performance impact\n\tdefer func() {\n\t\tif r := recover(); r != nil {\n\t\t\ta.Emit(\"error\", r)\n\t\t\tpanic(r)\n\t\t}\n\t}()\n\n\tctx := makeCtx(req, w)\n\trequest := ctx.Req\n\tresponse := ctx.Res\n\n\tdefer response.flush()\n\n\t///////////////////////////////////////////////////////////////////\n\t// Catch Neo Assertions\n\t///////////////////////////////////////////////////////////////////\n\tdefer func() {\n\t\tif r := recover(); r != nil {\n\t\t\terr, ok := r.(*NeoAssertError)\n\n\t\t\tif ok {\n\t\t\t\tresponse.Raw(err.message, err.status)\n\t\t\t\ta.Emit(\"error\", r)\n\t\t\t} else {\n\t\t\t\t// bubble panic\n\t\t\t\tpanic(r)\n\t\t\t}\n\t\t}\n\t}()\n\n\t///////////////////////////////////////////////////////////////////\n\t// Static File Serving\n\t///////////////////////////////////////////////////////////////////\n\tif a.static != nil {\n\t\t// check if file can be served\n\t\tfile, err := a.static.match(req.URL.Path)\n\n\t\tif err == nil {\n\t\t\th := func(ctx *Ctx) {\n\t\t\t\tresponse.skipFlush()\n\t\t\t\tresponse.serveFile(file)\n\t\t\t}\n\n\t\t\tfn := compose(merge(a.middlewares, []appliable{handler(h)}))\n\t\t\tfn(ctx)\n\t\t\treturn\n\t\t}\n\n\t\tlog.Debug(\"result not found in static\")\n\t}\n\n\t///////////////////////////////////////////////////////////////////\n\t// Route Matching\n\t///////////////////////////////////////////////////////////////////\n\troute, err := a.match(request)\n\n\tif err != nil {\n\t\tlog.Debugf(\"route %s not found\", req.URL.Path)\n\n\t\t// dummy route handler\n\t\th := func(ctx *Ctx) {\n\t\t\tresponse.Status = http.StatusNotFound\n\t\t}\n\n\t\tcompose(merge(a.middlewares, []appliable{handler(h)}))(ctx)\n\t} else {\n\t\troute.fnChain(ctx)\n\t}\n}", "func (handler Handler) ServeHTTP(w http.ResponseWriter, r *http.Request) {\n\tfmt.Printf(\"%s %s %s\\n\", r.RemoteAddr, r.Method, r.URL.Path)\n\tt1 := time.Now()\n\thandler.store.ExpireSessions()\n\treq := newReqImpl(w, r, handler.store)\n\thandler.serve(req)\n\tif req.html != \"\" {\n\t\tio.WriteString(w, req.html)\n\t} else if req.template != \"\" {\n\t\tt := template.New(\"\").Funcs(handler.funcmap)\n\t\tt, err := t.ParseGlob(handler.templatePattern)\n\t\tif err != nil {\n\t\t\tpanic(err)\n\t\t}\n\t\terr = t.ExecuteTemplate(w, req.template, req.model)\n\t\tif err != nil {\n\t\t\tpanic(err)\n\t\t}\n\t} else if req.redirect != \"\" {\n\t\thttp.Redirect(w, r, req.redirect, http.StatusFound)\n\t} else if req.status != 0 {\n\t\tmsg := http.StatusText(req.status)\n\t\thttp.Error(w, msg, req.status)\n\t} else {\n\t\tio.WriteString(w, \"no result\")\n\t}\n\td := time.Since(t1)\n\tfmt.Printf(\"%s %s %s - %f s\\n\", r.RemoteAddr, r.Method, r.URL.Path, float64(d)/1e9)\n}", "func (s *Service) Run() error {\n s.pipeline = s.pipeline.Add(HandlerFunc(s.routeRequest))\n \n server := &http.Server{\n Addr: s.port,\n Handler: s,\n ReadTimeout: s.readTimeout,\n WriteTimeout: s.writeTimeout,\n IdleTimeout: s.idleTimeout,\n }\n \n alt.Debugf(\"%s: Listening on %v\", s.name, s.port)\n return server.ListenAndServe()\n}", "func (t *task) run(ctx context.Context) {\n\tgo func() {\n\t\tresult, err := t.handler(ctx, t.request)\n\t\tt.resultQ <- Response{Result: result, Err: err} // out channel is buffered by 1\n\t\tt.running = false\n\t\tclose(t.resultQ)\n\t}()\n}", "func (q *Queue) Run(w http.ResponseWriter, req *http.Request) {\n\tfor _, f := range q.list {\n\t\tif w, req = f(w, req); req == nil {\n\t\t\treturn\n\t\t}\n\t}\n}", "func (s *StatusPageHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {\n\tlog.Logger.Info(\"Applier status request\", \"time\", s.Clock.Now().String())\n\tif s.Template == nil {\n\t\thttp.Error(w, \"Error: Unable to load HTML template\", http.StatusInternalServerError)\n\t\tlog.Logger.Error(\"Request failed\", \"error\", \"No template found\", \"time\", s.Clock.Now().String())\n\t\treturn\n\t}\n\tif err := s.Template.Execute(w, s.Data); err != nil {\n\t\thttp.Error(w, \"Error: Unable to load HTML template\", http.StatusInternalServerError)\n\t\tlog.Logger.Error(\"Request failed\", \"error\", http.StatusInternalServerError, \"time\", s.Clock.Now().String())\n\t\treturn\n\t}\n\tlog.Logger.Info(\"Request completed successfully\", \"time\", s.Clock.Now().String())\n}", "func (s *Layer) Run(phase string, w http.ResponseWriter, r *http.Request, h http.Handler) {\n\tdefer func() {\n\t\tif phase == \"error\" {\n\t\t\treturn\n\t\t}\n\t\tif re := recover(); re != nil {\n\t\t\tcontext.Set(r, \"error\", re)\n\t\t\ts.Run(\"error\", w, r, FinalErrorHandler)\n\t\t}\n\t}()\n\n\tif h == nil {\n\t\th = s.finalHandler\n\t}\n\n\tstack := s.Pool[phase]\n\tif stack == nil {\n\t\tif phase != \"error\" {\n\t\t\th.ServeHTTP(w, r)\n\t\t}\n\t\treturn\n\t}\n\n\tqueue := stack.Join()\n\tfor i := len(queue) - 1; i >= 0; i-- {\n\t\th = queue[i](h)\n\t}\n\n\th.ServeHTTP(w, r)\n}", "func (ds *Dispatcher) Run() {\r\n\r\n\taddAccessControlAllowOriginFn := func(h http.Handler) http.HandlerFunc {\r\n\t\treturn func(w http.ResponseWriter, r *http.Request) {\r\n\t\t\tw.Header().Add(\"Access-Control-Allow-Origin\", \"*\")\r\n\t\t\th.ServeHTTP(w, r)\r\n\t\t}\r\n\t}\r\n\r\n\tdelayReplyFn := func(h http.Handler) http.HandlerFunc {\r\n\t\treturn func(w http.ResponseWriter, r *http.Request) {\r\n\t\t\ttime.Sleep(time.Duration(ds.GetDelayReply()) * time.Millisecond)\r\n\t\t\th.ServeHTTP(w, r)\r\n\t\t}\r\n\t}\r\n\r\n\taddWrapper := func(h http.Handler, wrapper WrapperFunc) http.HandlerFunc {\r\n\t\treturn func(w http.ResponseWriter, r *http.Request) {\r\n\t\t\tsuccess := wrapper(w, r)\r\n\r\n\t\t\tif !success {\r\n\t\t\t\treturn\r\n\t\t\t}\r\n\r\n\t\t\th.ServeHTTP(w, r)\r\n\t\t}\r\n\t}\r\n\r\n\tvar err error\r\n\tvar listener net.Listener\r\n\tvar wrappedHandler http.HandlerFunc\r\n\r\n\tif ds.tlsInfo != nil && ds.tlsInfo.certificate != nil {\r\n\t\tlistener, err = tls.Listen(\"tcp\", fmt.Sprintf(\"%s:%d\", ds.GetHost(), ds.GetPort()), ds.tlsInfo.tlsConfig)\r\n\t\tif err != nil {\r\n\t\t\tds.GetLogger().Fatal(err)\r\n\t\t\treturn\r\n\t\t}\r\n\t} else {\r\n\t\tlistener, err = net.Listen(\"tcp\", fmt.Sprintf(\"%s:%d\", ds.GetHost(), ds.GetPort()))\r\n\t\tif err != nil {\r\n\t\t\tds.GetLogger().Fatal(err)\r\n\t\t\treturn\r\n\t\t}\r\n\t}\r\n\r\n\tif ds.GetMaxConnections() > 0 {\r\n\t\tlistener = netutil.LimitListener(listener, ds.GetMaxConnections())\r\n\t}\r\n\r\n\twrappedHandler = addAccessControlAllowOriginFn(ds.muxer)\r\n\r\n\tfor _, wrapper := range ds.wrappers {\r\n\t\twrappedHandler = addWrapper(wrappedHandler, wrapper)\r\n\t}\r\n\r\n\tif ds.GetDelayReply() > 0 {\r\n\t\twrappedHandler = delayReplyFn(wrappedHandler)\r\n\t}\r\n\r\n\tif ds.GetMaxConnections() > 0 {\r\n\t\tds.GetLogger().Println(fmt.Sprintf(\"Allowing %d concurrent connections.\", ds.GetMaxConnections()))\r\n\t}\r\n\r\n\tif ds.GetDelayReply() > 0 {\r\n\t\tds.GetLogger().Println(fmt.Sprintf(\"Delaying replies by %dms..\", ds.GetDelayReply()))\r\n\t}\r\n\r\n\tif ds.tlsInfo != nil && ds.tlsInfo.certificate != nil {\r\n\t\tds.GetLogger().Println(fmt.Sprintf(\"Starting listener on 'https://%s:%d'\", ds.GetHost(), ds.GetPort()))\r\n\t} else {\r\n\t\tds.GetLogger().Println(fmt.Sprintf(\"Starting listener on 'http://%s:%d'\", ds.GetHost(), ds.GetPort()))\r\n\t}\r\n\r\n\terr = http.Serve(listener, wrappedHandler)\r\n\tds.GetLogger().Fatal(err)\r\n}", "func (r *Router) ServeHTTP(w http.ResponseWriter, req *http.Request) {\n\tdefer func() {\n\t\tif err := recover(); err != nil {\n\t\t\tlog.Print(err)\n\t\t\tr.panicHandler(w, req, err)\n\t\t}\n\t}()\n\n\tr.executeHandler(w, req)\n}", "func (h *routeHandler) Run() {\n\thandshakeData := js.Global().Call(\"_doHandShake\")\n\treqBlob := handshakeData.Get(\"requestBlob\")\n\tresponseCallback := handshakeData.Get(\"responseFunction\")\n\n\trequest := makeRequestFromJs(reqBlob)\n\n\tvar response = buildResponse(\"Route not found: \" + request.Pathname, SetStatus(404))\n\n\tfor pathStr, pathHandler := range h.callbacks {\n\t\tif matched, _ := path.Match(pathStr, request.Pathname); matched {\n\t\t\tpathHandler(request)\n\n\t\t\tif request._calledRespond {\n\t\t\t\tresponse = request._response\n\t\t\t} else {\n\t\t\t\tresponse = buildResponse(\"\")\n\t\t\t}\n\t\t}\n\t}\n\n\trawResp := response.serialize()\n\n\tresponseCallback.Invoke(rawResp)\n}", "func (s *GracefulHTTPServer) Run() error {\n\terrs := make(chan error, 1)\n\tstop := make(chan os.Signal, 1)\n\tsignal.Notify(stop, os.Interrupt)\n\n\tgo func() {\n\t\terr := s.svr.Serve(s.l)\n\t\tif err != nil {\n\t\t\terrs <- err\n\t\t}\n\t}()\n\n\t// This select statement will block until we can read from EITHER our errs\n\t// channel or the stop channel. The stop channel will get a value when we get\n\t// a SIGINT signal. The errs channel will get a value if we failed\n\t// to start the server.\n\tselect {\n\tcase err := <-errs:\n\t\ts.log.Error(\"\")\n\t\treturn err\n\tcase sig := <-stop:\n\t\ts.log.Info(\"server shutdown request received\", zap.String(\"signal\", sig.String()))\n\t}\n\n\tctx, cancel := context.WithTimeout(context.Background(), s.timeout)\n\terr := s.svr.Shutdown(ctx)\n\tcancel() // Cancel the timeout, since we already finished.\n\n\treturn err\n}", "func (mf MiddlewareFunc) Run(req *Request, handler Handler) (*Response, error) {\n\treturn mf(req, handler)\n}", "func (con *Controller) ServeHTTP(res http.ResponseWriter, req *http.Request) {\n\tstartTime := time.Now()\n\n\tswitch con.Status {\n\tcase 0: // This will actually never show because this function won't run if the server is off\n\t\thttp.Error(res, \"The server is currently down and not serving requests.\", http.StatusServiceUnavailable)\n\t\treturn\n\tcase 1: // Normal\n\t\tbreak\n\tcase 2: // Maintenance mode\n\t\thttp.Error(res, \"The server is currently maintenance mode and not serving requests.\", http.StatusServiceUnavailable)\n\t\treturn\n\tcase 3: // This will actually never show because this function won't run if the server is off\n\t\thttp.Error(res, \"The server is currently restarting.\", http.StatusServiceUnavailable)\n\t\treturn\n\t}\n\n\tpath := req.URL.Path[1:]\n\tif len(path) == 0 {\n\t\tpath = (con.DocumentRoot + \"index.html\")\n\t} else {\n\t\tpath = (con.DocumentRoot + path)\n\t}\n\n\tf, err := os.Open(path)\n\n\tif err != nil {\n\t\tcon.PublicLogger.Write(path + \"::\" + strconv.Itoa(http.StatusNotFound) + \"::\" + err.Error())\n\t\trouting.HttpThrowStatus(http.StatusNotFound, res)\n\t\treturn\n\t}\n\n\tcontentType, err := routing.GetContentType(path)\n\n\tif err != nil {\n\t\tcon.PublicLogger.Write(path + \"::\" + strconv.Itoa(http.StatusUnsupportedMediaType) + \"::\" + err.Error())\n\t\trouting.HttpThrowStatus(http.StatusUnsupportedMediaType, res)\n\t\treturn\n\t}\n\n\tres.Header().Add(\"Content-Type\", contentType)\n\t_, err = io.Copy(res, f)\n\n\tif err != nil {\n\t\tcon.PublicLogger.Write(path + \"::\" + strconv.Itoa(http.StatusInternalServerError) + \"::\" + err.Error())\n\t\trouting.HttpThrowStatus(http.StatusInternalServerError, res)\n\t\treturn\n\t}\n\n\telapsedTime := time.Since(startTime)\n\tcon.LoadTimeLogger.Write(path + \" rendered in \" + strconv.FormatFloat(elapsedTime.Seconds(), 'f', 6, 64) + \" seconds\")\n}", "func (c *DummyCache) Run(req *web.Request, fnGenerate func(w io.Writer) bool) {\n\tvar buf = &bytes.Buffer{}\n\tif fnGenerate(buf) {\n\t\treq.Respond(web.StatusOK, web.HeaderContentType, \"text/html\").Write(buf.Bytes())\n\t} else {\n\t\treq.Error(web.StatusNotFound, os.NewError(\"Not Found.\"))\n\t}\n}", "func (h Handler) ServeHTTP(w http.ResponseWriter, r *http.Request) {\n\tif h.Skip(r) {\n\t\th.Next.ServeHTTP(w, r)\n\t\treturn\n\t}\n\tvar written int64\n\tvar status = -1\n\n\twp := writerProxy{\n\t\th: func() http.Header {\n\t\t\treturn w.Header()\n\t\t},\n\t\tw: func(bytes []byte) (int, error) {\n\t\t\tbw, err := w.Write(bytes)\n\t\t\twritten += int64(bw)\n\t\t\treturn bw, err\n\t\t},\n\t\twh: func(code int) {\n\t\t\tstatus = code\n\t\t\tw.WriteHeader(code)\n\t\t},\n\t}\n\n\tstart := time.Now()\n\th.Next.ServeHTTP(wp, r)\n\tduration := time.Now().Sub(start)\n\n\t// Use default status.\n\tif status == -1 {\n\t\tstatus = 200\n\t}\n\n\th.Logger(r, status, written, duration)\n}", "func (eh *executorHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {\n\t// Preprocess\n\tif eh.preprocessHost != \"\" {\n\t\tif err := eh.preprocess(r); err != nil {\n\t\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\t\treturn\n\t\t}\n\t}\n\n\t// Predict\n\tb, err := eh.predict(r)\n\tif err != nil {\n\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\treturn\n\t}\n\n\t// Postprocess\n\tif eh.postprocessHost != \"\" {\n\t\tb, err = eh.postprocess(r, b)\n\t\tif err != nil {\n\t\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\t\treturn\n\t\t}\n\t}\n\n\t// Write final response\n\t_, err = w.Write(b)\n\tif err != nil {\n\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\treturn\n\t}\n}", "func (s *HttpServer) Run() {\n\n\tgo s.httpServer()\n\t<-s.quitChan\n}", "func Run(addr string, timeout time.Duration, h http.Handler) {\n\thttp.Handle(\"/\", h)\n\tgraceful.Run(addr, timeout, http.DefaultServeMux)\n}", "func (t *Timer) ServeHTTP(w http.ResponseWriter, r *http.Request) {\n\tdefer t.UpdateSince(time.Now())\n\tt.handler.ServeHTTP(w, r)\n}", "func (f *HTTPFeeder) Run(out chan observation.InputObservation) error {\n\tf.OutChan = out\n\tf.Server = &http.Server{\n\t\tAddr: fmt.Sprintf(\"%s:%d\", f.Host, f.Port),\n\t\tHandler: f,\n\t}\n\tlog.Infof(\"accepting submissions on port %v\", f.Port)\n\tgo func() {\n\t\terr := f.Server.ListenAndServe()\n\t\tif err != nil {\n\t\t\tlog.Info(err)\n\t\t}\n\t}()\n\tf.IsRunning = true\n\treturn nil\n}", "func (prh PlanReplayerHandler) ServeHTTP(w http.ResponseWriter, req *http.Request) {\n\tparams := mux.Vars(req)\n\tname := params[handler.FileName]\n\thandler := downloadFileHandler{\n\t\tfilePath: filepath.Join(replayer.GetPlanReplayerDirName(), name),\n\t\tfileName: name,\n\t\tinfoGetter: prh.infoGetter,\n\t\taddress: prh.address,\n\t\tstatusPort: prh.statusPort,\n\t\turlPath: fmt.Sprintf(\"plan_replayer/dump/%s\", name),\n\t\tdownloadedFilename: \"plan_replayer\",\n\t\tscheme: util.InternalHTTPSchema(),\n\t\tstatsHandle: prh.statsHandle,\n\t\tis: prh.is,\n\t}\n\thandleDownloadFile(handler, w, req)\n}", "func (p *Proxy) ServeHTTP(w http.ResponseWriter, r *http.Request) {\n\tjob, hashStr, realURL := translateURL(*r.URL)\n\tjobInfo := p.spManager.GetJob(job)\n\tif jobInfo == nil {\n\t\tp.log.Errorf(\"can not found job client of %s\", job)\n\t\tw.WriteHeader(http.StatusBadRequest)\n\t\treturn\n\t}\n\n\thash, err := strconv.ParseUint(hashStr, 10, 64)\n\tif err != nil {\n\t\tp.log.Errorf(\"unexpected hash string %s\", hashStr)\n\t\tw.WriteHeader(http.StatusBadRequest)\n\t\treturn\n\t}\n\n\ttar := p.targets[hash]\n\tif tar == nil {\n\t\tp.log.Errorf(\"unexpect tar : %d\", hash)\n\t\tw.WriteHeader(http.StatusBadRequest)\n\t\treturn\n\t}\n\n\tstart := time.Now()\n\tvar scrapErr error\n\tdefer func() {\n\t\ttar.SetScrapeErr(start, scrapErr)\n\t\tif scrapErr != nil {\n\t\t\tp.log.Errorf(\"%s/%s : %s\", job, realURL.String(), scrapErr.Error())\n\t\t\tw.WriteHeader(http.StatusBadRequest)\n\t\t}\n\t}()\n\n\t// real scraping\n\tdata, contentType, err := jobInfo.Scrape(realURL.String())\n\tif err != nil {\n\t\tscrapErr = fmt.Errorf(\"get data %v\", err)\n\t\treturn\n\t}\n\n\tsamples, err := scrape.StatisticSample(data, contentType, jobInfo.Config.MetricRelabelConfigs)\n\tif err != nil {\n\t\tscrapErr = fmt.Errorf(\"statisticSample failed %v\", err)\n\t\treturn\n\t}\n\n\t// send origin result to prometheus\n\tif _, err := io.Copy(w, bytes.NewBuffer(data)); err != nil {\n\t\tscrapErr = fmt.Errorf(\"copy data to prometheus failed %v\", err)\n\t\treturn\n\t}\n\n\ttar.UpdateSamples(samples)\n}", "func Process(c http.ResponseWriter, req *http.Request) {\n\tmainServer.Process(c, req)\n}", "func (p *Proxy) ServeHTTP(w http.ResponseWriter, r *http.Request) {\n\tproxyTotal.WithLabelValues().Inc()\n\tstopReason := p.getCurCfg().ExtraConfig.StopScrapeReason\n\n\tjob, hashStr, realURL := translateURL(*r.URL)\n\tjobInfo := p.getJob(job)\n\tif jobInfo == nil {\n\t\tp.log.Errorf(\"can not found job client of %s\", job)\n\t\tw.WriteHeader(http.StatusBadRequest)\n\t\treturn\n\t}\n\n\thash, err := strconv.ParseUint(hashStr, 10, 64)\n\tif err != nil {\n\t\tp.log.Errorf(\"unexpected hash string %s\", hashStr)\n\t\tw.WriteHeader(http.StatusBadRequest)\n\t\treturn\n\t}\n\n\ttar := p.getStatus()[hash]\n\n\tstart := time.Now()\n\tvar scrapErr error\n\tdefer func() {\n\t\tif scrapErr != nil {\n\t\t\tp.log.Errorf(scrapErr.Error())\n\t\t\tw.WriteHeader(http.StatusBadRequest)\n\t\t\tif tar != nil {\n\t\t\t\ttar.LastScrapeStatistics = scrape.NewStatisticsSeriesResult()\n\t\t\t}\n\t\t} else if stopReason != \"\" {\n\t\t\tp.log.Warnf(stopReason)\n\t\t\tw.WriteHeader(http.StatusBadRequest)\n\t\t\tscrapErr = fmt.Errorf(stopReason)\n\t\t}\n\n\t\tif tar != nil {\n\t\t\ttar.ScrapeTimes++\n\t\t\ttar.SetScrapeErr(start, scrapErr)\n\t\t}\n\t}()\n\n\tscraper := scrape.NewScraper(jobInfo, realURL.String(), p.log)\n\tif stopReason == \"\" {\n\t\tscraper.WithRawWriter(w)\n\t}\n\n\tif err := scraper.RequestTo(); err != nil {\n\t\tscrapErr = fmt.Errorf(\"RequestTo %s %s %v\", job, realURL.String(), err)\n\t\treturn\n\t}\n\tw.Header().Set(\"Content-Type\", scraper.HTTPResponse.Header.Get(\"Content-Type\"))\n\n\trs := scrape.NewStatisticsSeriesResult()\n\tif err := scraper.ParseResponse(func(rows []parser.Row) error {\n\t\tscrape.StatisticSeries(rows, jobInfo.Config.MetricRelabelConfigs, rs)\n\t\treturn nil\n\t}); err != nil {\n\t\tscrapErr = fmt.Errorf(\"copy data to prometheus failed %v\", err)\n\t\tif time.Since(start) > time.Duration(jobInfo.Config.ScrapeTimeout) {\n\t\t\tscrapErr = fmt.Errorf(\"scrape timeout\")\n\t\t}\n\t\treturn\n\t}\n\n\tproxySeries.WithLabelValues(jobInfo.Config.JobName, realURL.String()).Set(float64(rs.ScrapedTotal))\n\tproxyScrapeDurtion.WithLabelValues(jobInfo.Config.JobName, realURL.String()).Set(float64(time.Now().Sub(start)))\n\tif tar != nil {\n\t\ttar.UpdateScrapeResult(rs)\n\t}\n}", "func (h TicketsHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {\n\tvar ticketDetails ticket.Ticket\n\tif r.Method == \"POST\" {\n\t\tbody, err := ioutil.ReadAll(r.Body)\n\t\tif err != nil {\n\t\t\thttp.Error(w, \"Error reading request body\",\n\t\t\t\thttp.StatusInternalServerError)\n\t\t}\n\t\tjson.Unmarshal(body, &ticketDetails)\n\n\t\tif ticketDetails.Platform == \"\" {\n\t\t\thttp.Error(w, \"Invalid request method\", http.StatusNotAcceptable)\n\t\t\treturn\n\t\t}\n\t}\n\n\th.queueTickets(ticketDetails)\n\n\tdata := []byte(\"you requested has been taken, we are trying to connect to agent now...tring...tring...tring...tring...\")\n\tw.Write(data)\n\n}", "func (a *App) handleReq(w http.ResponseWriter, r *http.Request) {\n\tif r.Method != \"GET\" {\n\t\thttp.Error(w, \"Invalid request method.\", 405)\n\t\treturn\n\t}\n\n\tduration, err := strconv.Atoi(r.URL.Query().Get(\"duration\"))\n\tif err != nil {\n\t\tduration = 0\n\t}\n\n\thttpcode, err := strconv.Atoi(r.URL.Query().Get(\"httpcode\"))\n\tif err != nil {\n\t\thttpcode = 200\n\t}\n\n\tworksecs, err := strconv.Atoi(r.URL.Query().Get(\"worksecs\"))\n\tif err != nil {\n\t\tworksecs = 0\n\t}\n\n\tworkfail := \"true\" == r.URL.Query().Get(\"workfail\")\n\n\tif worksecs > 0 && !a.enqueue(Job{worksecs, workfail}) {\n\t\tw.WriteHeader(507)\n\t\treturn\n\t}\n\n\ttime.Sleep(time.Duration(duration) * time.Millisecond)\n\tw.WriteHeader(httpcode)\n}", "func (t *Task) Run() error {\n\tred := color.New(color.FgRed).SprintFunc()\n\turl := fmt.Sprintf(\"%s%s\", t.BaseURL, t.Path)\n\n\treq, e := request.NewRequester(t.Method, t.Config.Insecure, t.Config.Detail)\n\tif e != nil {\n\t\tfmt.Printf(\"%s: %s\\n\", red(\"Error\"), e)\n\t\treturn e\n\t}\n\treq.SetHeaders(t.applyVarsToMap(t.Headers))\n\treq.SetCookies(t.Cookies)\n\n\treqBody := t.applyVars(t.RequestBody)\n\tif len(t.UploadList) > 0 {\n\t\tuploadRequest, e := t.UploadList.ToRequestBody()\n\t\tif e != nil {\n\t\t\treturn e\n\t\t}\n\n\t\treqBody = uploadRequest.RequestBody\n\t\treq.SetHeaders(map[string]string{\n\t\t\t\"Content-Type\": uploadRequest.ContentType,\n\t\t\t\"Content-Length\": fmt.Sprintf(\"%d\", len(reqBody)),\n\t\t})\n\t}\n\n\tresp, e := req.Request(t.applyVars(url), reqBody)\n\tif e != nil {\n\t\tfmt.Printf(\"%s: %s\\n\", red(\"Error\"), e)\n\t\treturn e\n\t}\n\n\tr := response.NewResponse(resp, t.Config.Detail)\n\tif t.Config.Detail {\n\t\tr.LogResponse()\n\t}\n\n\tref := referrable.NewReferrable(r)\n\n\tt.Cookies = []*http.Cookie{}\n\n\tfor _, v := range r.Cookies {\n\t\tt.Cookies = append(t.Cookies, v)\n\t}\n\n\tfor k, v := range t.Captures {\n\t\tr, ok := ref.Find(v)\n\t\tif ok {\n\t\t\tt.Captured[k] = r[0]\n\t\t} else {\n\t\t\te = fmt.Errorf(\"unable to capture data from response: %s\", k)\n\t\t\treturn e\n\t\t}\n\t}\n\n\treturn e\n}", "func (s *server) ServeHTTP(rw http.ResponseWriter, r *http.Request) {\n\t// Get request and response from the pool.\n\n\treq := s.requestPool.Get().(*Request)\n\tres := s.responsePool.Get().(*Response)\n\n\t// Tie the request body and the standard request body together.\n\n\tr.Body = &requestBody{\n\t\tr: req,\n\t\thr: r,\n\t\trc: r.Body,\n\t}\n\n\t// Reset the request.\n\n\treq.Air = s.a\n\treq.SetHTTPRequest(r)\n\treq.res = res\n\treq.params = req.params[:0]\n\treq.routeParamNames = nil\n\treq.routeParamValues = nil\n\treq.parseRouteParamsOnce = &sync.Once{}\n\treq.parseOtherParamsOnce = &sync.Once{}\n\tfor key := range req.values {\n\t\tdelete(req.values, key)\n\t}\n\n\treq.localizedString = nil\n\n\t// Reset the response.\n\n\tres.Air = s.a\n\tres.SetHTTPResponseWriter(&responseWriter{\n\t\tr: res,\n\t\trw: rw,\n\t})\n\tres.Status = http.StatusOK\n\tres.ContentLength = -1\n\tres.Written = false\n\tres.Minified = false\n\tres.Gzipped = false\n\tres.req = req\n\tres.ohrw = rw\n\tres.servingContent = false\n\tres.serveContentError = nil\n\tres.reverseProxying = false\n\tres.deferredFuncs = res.deferredFuncs[:0]\n\n\t// Chain the gases stack.\n\n\th := func(req *Request, res *Response) error {\n\t\th := s.a.router.route(req)\n\t\tfor i := len(s.a.Gases) - 1; i >= 0; i-- {\n\t\t\th = s.a.Gases[i](h)\n\t\t}\n\n\t\treturn h(req, res)\n\t}\n\n\t// Chain the pregases stack.\n\n\tfor i := len(s.a.Pregases) - 1; i >= 0; i-- {\n\t\th = s.a.Pregases[i](h)\n\t}\n\n\t// Execute the chain.\n\n\tif err := h(req, res); err != nil {\n\t\tif !res.Written && res.Status < http.StatusBadRequest {\n\t\t\tres.Status = http.StatusInternalServerError\n\t\t}\n\n\t\ts.a.ErrorHandler(err, req, res)\n\t}\n\n\t// Execute the deferred functions.\n\n\tfor i := len(res.deferredFuncs) - 1; i >= 0; i-- {\n\t\tres.deferredFuncs[i]()\n\t}\n\n\t// Put the route param values back to the pool.\n\n\tif req.routeParamValues != nil {\n\t\ts.a.router.routeParamValuesPool.Put(req.routeParamValues)\n\t}\n\n\t// Put the request and response back to the pool.\n\n\ts.requestPool.Put(req)\n\ts.responsePool.Put(res)\n}", "func (h *handler) ServeHTTP(w http.ResponseWriter, req *http.Request) {\n\tif !h.tracer.Recording() || h.requestIgnorer(req) {\n\t\th.handler.ServeHTTP(w, req)\n\t\treturn\n\t}\n\ttx, body, req := StartTransactionWithBody(h.tracer, h.requestName(req), req)\n\tdefer tx.End()\n\n\tw, resp := WrapResponseWriter(w)\n\n\tdefer func() {\n\t\tif v := recover(); v != nil {\n\t\t\tif h.panicPropagation {\n\t\t\t\tdefer panic(v)\n\t\t\t\t// 500 status code will be set only for APM transaction\n\t\t\t\t// to allow other middleware to choose a different response code\n\t\t\t\tif resp.StatusCode == 0 {\n\t\t\t\t\tresp.StatusCode = http.StatusInternalServerError\n\t\t\t\t}\n\t\t\t} else if resp.StatusCode == 0 {\n\t\t\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\t\t}\n\t\t\th.recovery(w, req, resp, body, tx, v)\n\t\t}\n\t\tSetTransactionContext(tx, req, resp, body)\n\t\tbody.Discard()\n\t}()\n\th.handler.ServeHTTP(w, req)\n\tif resp.StatusCode == 0 {\n\t\tresp.StatusCode = http.StatusOK\n\t}\n}", "func (a *App) Run() error {\n\treturn http.Serve(a.Listener, a.Router)\n}", "func (Executor) ServeHTTP(w http.ResponseWriter, req *http.Request) {\n\tlogger.Logging(logger.DEBUG, \"receive msg\", req.Method, req.URL.Path)\n\tdefer logger.Logging(logger.DEBUG, \"OUT\")\n\n\tswitch reqUrl := req.URL.Path; {\n\tdefault:\n\t\tlogger.Logging(logger.DEBUG, \"Unknown URL\")\n\t\tcommon.MakeErrorResponse(w, errors.NotFoundURL{reqUrl})\n\n\tcase !(strings.Contains(reqUrl, (url.Base()+url.Management())) ||\n\t\tstrings.Contains(reqUrl, (url.Base()+url.Monitoring())) ||\n\t\tstrings.Contains(reqUrl, (url.Base()+url.Notification()))):\n\t\tlogger.Logging(logger.DEBUG, \"Unknown URL\")\n\t\tcommon.MakeErrorResponse(w, errors.NotFoundURL{reqUrl})\n\n\tcase strings.Contains(reqUrl, url.Unregister()):\n\t\thealthAPIExecutor.Handle(w, req)\n\n\tcase strings.Contains(reqUrl, url.Management()) &&\n\t\tstrings.Contains(reqUrl, url.Apps()):\n\t\tdeploymentAPIExecutor.Handle(w, req)\n\n\tcase strings.Contains(reqUrl, url.Resource()):\n\t\tresourceAPIExecutor.Handle(w, req)\n\n\tcase strings.Contains(reqUrl, url.Configuration()):\n\t\tconfigurationAPIExecutor.Handle(w, req)\n\n\tcase strings.Contains(reqUrl, url.Device()):\n\t\tdeviceAPIExecutor.Handle(w, req)\n\n\tcase strings.Contains(reqUrl, url.Notification()):\n\t\tnotificationAPIExecutor.Handle(w, req)\n\t}\n}", "func (b *backend) ServeHTTP(w http.ResponseWriter, r *http.Request) {\n}", "func (e *Manager) ServeHTTP(rw http.ResponseWriter, req *http.Request) {\n\tif e.name == \"generic-container-manager\" {\n\t\tfmt.Printf(\"Request object: %+v\", req)\n\t\thost := req.Host\n\t\tif host == \"\" {\n\t\t\thost = req.URL.Host\n\t\t}\n\t\tpath := req.URL.Path\n\t\tif strings.Contains(path, \".\") {\n\t\t\tif s := strings.Split(path, \"/\"); len(s) > 2 {\n\t\t\t\tpath = strings.Join(s[:len(s)-1], \"\")\n\t\t\t} else {\n\t\t\t\te.next.ServeHTTP(rw, req)\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t\te.request, _ = buildRequest(e.serviceUrl, e.name, e.timeout, host, path)\n\t\tfmt.Println(\"Request set to \", e.request)\n\t}\n\n\tstarting := false\n\tvar status string\n\tvar err error\n\tfor status, err = getServiceStatus(e.request); err == nil && status == \"starting\"; status, err = getServiceStatus(e.request) {\n\t\tstarting = true\n\t}\n\n\tif starting {\n\t\ttime.Sleep(1 * time.Second)\n\t\thttp.Redirect(rw, req, req.URL.Path, http.StatusTemporaryRedirect)\n\t}\n\n\tif err != nil {\n\t\trw.WriteHeader(http.StatusInternalServerError)\n\t\trw.Write([]byte(err.Error()))\n\t}\n\n\tif status == \"started\" {\n\t\t// Service started forward request\n\t\te.next.ServeHTTP(rw, req)\n\n\t} else {\n\t\t// Error\n\t\trw.WriteHeader(http.StatusInternalServerError)\n\t\trw.Write([]byte(\"Unexpected status answer from Manager service\"))\n\t}\n}", "func (h *proxyHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {\n\tif r.Method == http.MethodPost {\n\t\tif r.URL.Path == \"/quit\" {\n\t\t\tw.Header().Set(\"Content-Length\", \"0\")\n\t\t\tw.WriteHeader(200)\n\t\t\th.shutdown = true\n\t\t\treturn\n\t\t}\n\t}\n\n\tif r.Method != http.MethodGet {\n\t\tw.Header().Set(\"Content-Length\", \"0\")\n\t\tw.WriteHeader(http.StatusMethodNotAllowed)\n\t\treturn\n\t}\n\n\tif r.URL.Path == \"\" || !strings.HasPrefix(r.URL.Path, \"/\") {\n\t\tw.Header().Set(\"Content-Length\", \"0\")\n\t\tw.WriteHeader(http.StatusBadRequest)\n\t\treturn\n\t}\n\n\tvar err error\n\tif err != nil {\n\n\t}\n\n\tif r.URL.Path == \"/manifest\" {\n\t\terr = h.implManifest(w, r)\n\t} else if strings.HasPrefix(r.URL.Path, \"/blobs/\") {\n\t\tblob := filepath.Base(r.URL.Path)\n\t\terr = h.implBlob(w, r, blob)\n\t} else {\n\t\tw.Header().Set(\"Content-Length\", \"0\")\n\t\tw.WriteHeader(http.StatusBadRequest)\n\t\treturn\n\t}\n\tif err != nil {\n\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\tif !quiet {\n\t\t\tfmt.Fprintf(os.Stderr, \"%v\\n\", err)\n\t\t}\n\t\tw.Write([]byte(err.Error()))\n\t\treturn\n\t}\n}", "func (s *StatusPageHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {\n\tif s.Authenticator != nil {\n\t\t_, err := s.Authenticator.Authenticate(r.Context(), w, r)\n\t\tif errors.Is(err, oidc.ErrRedirectRequired) {\n\t\t\treturn\n\t\t}\n\t\tif err != nil {\n\t\t\thttp.Error(w, \"Error: Authentication failed\", http.StatusInternalServerError)\n\t\t\tlog.Logger(\"webserver\").Error(\"Authentication failed\", \"error\", err, \"time\", s.Clock.Now().String())\n\t\t\treturn\n\t\t}\n\t}\n\n\tlog.Logger(\"webserver\").Info(\"Applier status request\", \"time\", s.Clock.Now().String())\n\tif s.Template == nil {\n\t\thttp.Error(w, \"Error: Unable to load HTML template\", http.StatusInternalServerError)\n\t\tlog.Logger(\"webserver\").Error(\"Request failed\", \"error\", \"No template found\", \"time\", s.Clock.Now().String())\n\t\treturn\n\t}\n\tctx, cancel := context.WithTimeout(context.Background(), s.Timeout)\n\tdefer cancel()\n\twaybills, err := s.KubeClient.ListWaybills(ctx)\n\tif err != nil {\n\t\thttp.Error(w, fmt.Sprintf(\"Error: Unable to list Waybill resources: %v\", err), http.StatusInternalServerError)\n\t\tlog.Logger(\"webserver\").Error(\"Unable to list Waybill resources\", \"error\", err, \"time\", s.Clock.Now().String())\n\t\treturn\n\t}\n\tevents, err := s.KubeClient.ListWaybillEvents(ctx)\n\tif err != nil {\n\t\thttp.Error(w, fmt.Sprintf(\"Error: Unable to list Waybill events: %v\", err), http.StatusInternalServerError)\n\t\tlog.Logger(\"webserver\").Error(\"Unable to list Waybill events\", \"error\", err, \"time\", s.Clock.Now().String())\n\t\treturn\n\t}\n\tresult := GetNamespaces(waybills, events, s.DiffURLFormat)\n\n\trendered := &bytes.Buffer{}\n\tif err := s.Template.ExecuteTemplate(rendered, \"index\", result); err != nil {\n\t\thttp.Error(w, \"Error: Unable to render HTML template\", http.StatusInternalServerError)\n\t\tlog.Logger(\"webserver\").Error(\"Request failed\", \"error\", http.StatusInternalServerError, \"time\", s.Clock.Now().String(), \"err\", err)\n\t\treturn\n\t}\n\tw.WriteHeader(http.StatusOK)\n\tif _, err := rendered.WriteTo(w); err != nil {\n\t\tlog.Logger(\"webserver\").Error(\"Request failed\", \"error\", http.StatusInternalServerError, \"time\", s.Clock.Now().String(), \"err\", err)\n\t}\n\tlog.Logger(\"webserver\").Info(\"Request completed successfully\", \"time\", s.Clock.Now().String())\n}", "func (service *HTTPServer) Run() error {\n\n\tgo service.syncCached()\n\n\treturn service.engine.Run(service.laddr)\n}", "func (s *ServeSeq) ServeHTTP(w http.ResponseWriter, r *http.Request) {\n\ti := &intrespwriter{parent: w, written: false}\n\tfor idx, h := range s.handlers {\n\t\ti.canskip = (idx != len(s.handlers)-1)\n\t\ti.skip = false\n\t\th.ServeHTTP(i, r)\n\t\tif i.written {\n\t\t\treturn\n\t\t}\n\t}\n}", "func (b *Backend) Do(r *http.Request) (*http.Response, error) {\n\tif err := b.limiter.Wait(false); err != nil {\n\t\treturn nil, err\n\t}\n\treturn b.client.Do(r)\n}", "func (p *Ping) TryServeHTTP(rw http.ResponseWriter, req *http.Request) (bool, error) {\n\tif req.URL.Path != \"/health/ping\" {\n\t\treturn true, nil\n\t}\n\n\trw.WriteHeader(http.StatusOK)\n\t_, err := rw.Write([]byte(\"OK\"))\n\treturn false, err\n}", "func (adapter *TunerAdapter) RunHTTPServer() {\n\taddr := rest.TunerAddr\n\tmux := http.NewServeMux()\n\tmux.HandleFunc(rest.TunerCurrentStations, adapter.handleCurrentStation)\n\tmux.HandleFunc(rest.TunerStationList, adapter.handleStationList)\n\tmux.HandleFunc(rest.TunerSubscription, adapter.handleSubscription)\n\tlog.Println(\"Starting TunerAdapter at\", addr)\n\thttp.ListenAndServe(addr, mux)\n\tlog.Println(\"Stopping TunerAdapterr...\")\n}", "func (task *TaskType) RunHandler() (interface{}, error) {\n\tvar response interface{}\n\tvar err error\n\n\tresponse, err = requestHandler[task.Name](task)\n\n\tutils.Log.Info(fmt.Sprintf(\"Proccessing of task %s completed\", task.Name))\n\n\tif err != nil {\n\t\tutils.Log.Error(fmt.Sprintf(\"Failed to process request: %s: \", task.Name), err)\n\t\treturn nil, err\n\t}\n\n\treturn response, nil\n}", "func (s *AppServer) ServeHTTP(w http.ResponseWriter, r *http.Request) {\n\ts.logger.Debugf(`processing %s \"%s\"`, r.Method, s.filterParameters(r.URL))\n\n\t// rate limit\n\tif s.throttle != nil {\n\t\t<-s.throttle.C\n\t}\n\n\ts.handler.ServeHTTP(w, r)\n}", "func Run() {\n\trouter := getRouter()\n\ts := &http.Server{\n\t\tAddr: \"0.0.0.0:8080\",\n\t\tHandler: router,\n\t\tReadTimeout: 10 * time.Second,\n\t\tWriteTimeout: 10 * time.Second,\n\t\tMaxHeaderBytes: 1 << 20,\n\t}\n\ts.ListenAndServe()\n}", "func (p *Proxy) ServeHTTP(w http.ResponseWriter, req *http.Request) {\n\tjob := p.stream.NewJob(\"proxy.serve\")\n\n\t// Add headers\n\tfor key, vals := range p.lastResponseHeaders {\n\t\tfor _, val := range vals {\n\t\t\tw.Header().Set(key, val)\n\t\t}\n\t}\n\tw.Header().Set(\"X-OpenBazaar\", \"Trade free!\")\n\n\t// Write body\n\t_, err := w.Write(p.lastResponseBody)\n\tif err != nil {\n\t\tjob.EventErr(\"write\", err)\n\t\tjob.Complete(health.Error)\n\t}\n\n\tjob.Complete(health.Success)\n}", "func (config Config) RunHTTPServer() {\n\t// Set up a channel to listen to for interrupt signals\n\tvar runChan = make(chan os.Signal, 1)\n\n\t// Set up a context to allow for graceful server shutdowns in the event\n\t// of an OS interrupt (defers the cancel just in case)\n\tctx, cancel := context.WithTimeout(\n\t\tcontext.Background(),\n\t\tconfig.PilotLight.Server.Timeout.Server,\n\t)\n\tdefer cancel()\n\n\t// Create install-config.yaml file\n\tPreflightSetup(config)\n\n\t// Define server options\n\tserver := &http.Server{\n\t\tAddr: config.PilotLight.Server.Host + \":\" + config.PilotLight.Server.Port,\n\t\tHandler: NewRouter(config.PilotLight.Server.Path),\n\t\tReadTimeout: config.PilotLight.Server.Timeout.Read * time.Second,\n\t\tWriteTimeout: config.PilotLight.Server.Timeout.Write * time.Second,\n\t\tIdleTimeout: config.PilotLight.Server.Timeout.Idle * time.Second,\n\t}\n\n\t// Only listen on IPV4\n\tl, err := net.Listen(\"tcp4\", config.PilotLight.Server.Host+\":\"+config.PilotLight.Server.Port)\n\tcheck(err)\n\n\t// Handle ctrl+c/ctrl+x interrupt\n\tsignal.Notify(runChan, os.Interrupt, syscall.SIGTSTP)\n\n\t// Alert the user that the server is starting\n\tlog.Printf(\"Server is starting on %s\\n\", server.Addr)\n\n\t// Run the server on a new goroutine\n\tgo func() {\n\t\t//if err := server.ListenAndServe(); err != nil {\n\t\tif err := server.Serve(l); err != nil {\n\t\t\tif err == http.ErrServerClosed {\n\t\t\t\t// Normal interrupt operation, ignore\n\t\t\t} else {\n\t\t\t\tlog.Fatalf(\"Server failed to start due to err: %v\", err)\n\t\t\t}\n\t\t}\n\t}()\n\n\t// Block on this channel listeninf for those previously defined syscalls assign\n\t// to variable so we can let the user know why the server is shutting down\n\tinterrupt := <-runChan\n\n\t// If we get one of the pre-prescribed syscalls, gracefully terminate the server\n\t// while alerting the user\n\tlog.Printf(\"Server is shutting down due to %+v\\n\", interrupt)\n\tif err := server.Shutdown(ctx); err != nil {\n\t\tlog.Fatalf(\"Server was unable to gracefully shutdown due to err: %+v\", err)\n\t}\n}", "func (b Bumper) Run() error {\n\trouter := httprouter.New()\n\trouter.GET(\"/-/\", b.getIndexHandler)\n\trouter.GET(\"/-/api/state\", b.getHostStateHandler)\n\trouter.GET(\"/-/api/trigger\", b.getHostHandler)\n\trouter.POST(\"/-/api/trigger\", b.collectActivityHandler)\n\trouter.NotFound = b\n\n\treturn http.ListenAndServe(b.Listener, router)\n}", "func Handle(rw http.ResponseWriter, r *http.Request) {\n\tfmt.Fprintf(rw, \"response\")\n}", "func (t Telemetry) ServeHTTP(w http.ResponseWriter, r *http.Request) {\n\tt.rCount.Mark(1)\n\tsw := MakeLogger(w)\n\n\tstart := time.Now()\n\tt.inner.ServeHTTP(sw, r)\n\tt.tmr.Update(int64(time.Since(start) / time.Millisecond))\n\n\tif sw.Status() >= 300 {\n\t\tt.fCount.Mark(1)\n\t} else {\n\t\tt.sCount.Mark(1)\n\t}\n\n}", "func (s *Server) serveHTTP(w http.ResponseWriter, r *http.Request) {\n\ts.HTTP.Handler.ServeHTTP(w, r)\n}", "func (s *WebServer) Run(addr string) error {\n\tinitHandlers(s)\n\texpvar.Publish(\"Goroutines\", expvar.Func(func() interface{} {\n\t\treturn runtime.NumGoroutine()\n\t}))\n\n\thttp.Handle(\"/prom\", s.hub.Metrics.getHandler())\n\n\tsock, err := net.Listen(\"tcp\", addr)\n\tif err != nil {\n\t\treturn err\n\t}\n\tgo func() {\n\t\tfmt.Println(\"HTTP now available at\", addr)\n\t\tlog.Fatal(http.Serve(sock, nil))\n\t}()\n\treturn nil\n}", "func (a *App) Run() error {\n\tserver := &http.Server{\n\t\tAddr: a.addr,\n\t\tHandler: a.handle,\n\t\tReadTimeout: 10 * time.Second,\n\t\tWriteTimeout: 10 * time.Second,\n\t\tMaxHeaderBytes: 1 << 20,\n\t}\n\tLogs.Info(\"Run Server %s\", a.addr)\n\treturn server.ListenAndServe()\n}", "func (s *Server) Run(port uint16) error {\n s.client = &http.Client{\n CheckRedirect: func(req *http.Request, via []*http.Request) error {\n return http.ErrUseLastResponse\n },\n Timeout: 10 * time.Second,\n }\n addr := fmt.Sprintf(\":%d\", port)\n return http.ListenAndServe(addr, s)\n}", "func (r AlternativeReq) Run() error {\n\tr.Handler.handleAlterRequest(r, util.Transport)\n\treturn nil\n}", "func (p *Proxy) ServeHTTP(rw http.ResponseWriter, req *http.Request) {\n\tvar err error\n\tvar tf webutil.HTTPTraceFinisher\n\tsrw := webutil.NewStatusResponseWriter(rw)\n\n\tdefer func() {\n\t\t// NOTE: This uses the outer scope's `err` by design. This way updates\n\t\t// to `err` will be reflected on (deferred) exit.\n\t\tr := recover()\n\n\t\t// see: https://golang.org/pkg/net/http/#ErrAbortHandler\n\t\tif r != nil && r != http.ErrAbortHandler {\n\t\t\t// Wrap the error with the reason for the panic.\n\t\t\terr = ex.Nest(err, ex.New(r))\n\t\t}\n\t\tif tf != nil {\n\t\t\ttf.Finish(srw.StatusCode(), err)\n\t\t}\n\t\tif err != nil {\n\t\t\tif p.Log != nil {\n\t\t\t\tp.Log.Fatalf(\"%v\", r)\n\t\t\t} else {\n\t\t\t\tfmt.Fprintf(os.Stderr, \"%v\\n\", r)\n\t\t\t}\n\t\t}\n\t}()\n\n\tif p.Tracer != nil {\n\t\ttf, req = p.Tracer.Start(req)\n\t}\n\n\t// set the default resolver if unset.\n\tif p.Resolver == nil {\n\t\tp.Resolver = RoundRobinResolver(p.Upstreams)\n\t}\n\n\tupstream, err := p.Resolver(req, p.Upstreams)\n\tif err != nil {\n\t\tlogger.MaybeError(p.Log, err)\n\t\tsrw.WriteHeader(http.StatusBadGateway)\n\t\treturn\n\t}\n\n\tif upstream == nil {\n\t\tsrw.WriteHeader(http.StatusBadGateway)\n\t\treturn\n\t}\n\n\t// Add extra forwarded headers.\n\t// these are required for a majority of services to function correctly behind\n\t// a reverse proxy.\n\t// They are \"Add\" vs. \"Set\" in case there are existing values.\n\tif port := webutil.GetPort(req); port != \"\" {\n\t\treq.Header.Add(\"X-Forwarded-Port\", port)\n\t}\n\tif proto := webutil.GetProto(req); proto != \"\" {\n\t\treq.Header.Add(\"X-Forwarded-Proto\", proto)\n\t}\n\t// add upstream headers.\n\tfor key, values := range p.Headers {\n\t\tfor _, value := range values {\n\t\t\treq.Header.Add(key, value)\n\t\t}\n\t}\n\tif p.TransformRequest != nil {\n\t\tp.TransformRequest(req)\n\t}\n\tupstream.ServeHTTP(srw, req)\n}", "func (rl *RateLimiter) ServeHTTP(w http.ResponseWriter, r *http.Request) {\n\trl.once.Do(func() {\n\t\trl.secInterval = int(rl.Interval.Seconds())\n\t\tif rl.KeyMaker == nil {\n\t\t\trl.KeyMaker = DefaultKeyMaker\n\t\t}\n\t})\n\tstatus, err := rl.do(w, r)\n\tif err != nil {\n\t\thttp.Error(w, err.Error(), status)\n\t\treturn\n\t}\n}", "func Run(srv *server.Server, templatePath string) {\n\t// Register plain ol' http handlers.\n\tr := srv.Routes\n\n\tbaseMW := router.NewMiddlewareChain()\n\tbaseAuthMW := baseMW.Extend(\n\t\tmiddleware.WithContextTimeout(time.Minute),\n\t\tauth.Authenticate(srv.CookieAuth),\n\t)\n\thtmlMW := baseAuthMW.Extend(\n\t\twithGitMiddleware,\n\t\twithBuildbucketBuildsClient,\n\t\twithBuildbucketBuildersClient,\n\t\ttemplates.WithTemplates(getTemplateBundle(templatePath, srv.Options.ImageVersion(), srv.Options.Prod)),\n\t)\n\txsrfMW := htmlMW.Extend(xsrf.WithTokenCheck)\n\tprojectMW := htmlMW.Extend(buildProjectACLMiddleware(false))\n\toptionalProjectMW := htmlMW.Extend(buildProjectACLMiddleware(true))\n\n\tr.GET(\"/\", htmlMW, frontpageHandler)\n\tr.GET(\"/p\", baseMW, movedPermanently(\"/\"))\n\tr.GET(\"/search\", htmlMW, redirect(\"/ui/search\", http.StatusFound))\n\tr.GET(\"/opensearch.xml\", baseMW, searchXMLHandler)\n\n\t// Artifacts.\n\tr.GET(\"/artifact/*path\", baseMW, redirect(\"/ui/artifact/*path\", http.StatusFound))\n\n\t// Invocations.\n\tr.GET(\"/inv/*path\", baseMW, redirect(\"/ui/inv/*path\", http.StatusFound))\n\n\t// Builds.\n\tr.GET(\"/b/:id\", htmlMW, handleError(redirectLUCIBuild))\n\tr.GET(\"/p/:project/builds/b:id\", baseMW, movedPermanently(\"/b/:id\"))\n\n\tbuildPageMW := router.NewMiddlewareChain(func(c *router.Context, next router.Handler) {\n\t\tshouldShowNewBuildPage := getShowNewBuildPageCookie(c)\n\t\tif shouldShowNewBuildPage {\n\t\t\tredirect(\"/ui/p/:project/builders/:bucket/:builder/:numberOrId\", http.StatusFound)(c)\n\t\t} else {\n\t\t\tnext(c)\n\t\t}\n\t}).Extend(optionalProjectMW...)\n\tr.GET(\"/p/:project/builders/:bucket/:builder/:numberOrId\", buildPageMW, handleError(handleLUCIBuild))\n\t// TODO(crbug/1108198): remvoe this route once we turned down the old build page.\n\tr.GET(\"/old/p/:project/builders/:bucket/:builder/:numberOrId\", optionalProjectMW, handleError(handleLUCIBuild))\n\n\t// Only the new build page can take path suffix, redirect to the new build page.\n\tr.GET(\"/b/:id/*path\", baseMW, redirect(\"/ui/b/:id/*path\", http.StatusFound))\n\tr.GET(\"/p/:project/builds/b:id/*path\", baseMW, redirect(\"/ui/b/:id/*path\", http.StatusFound))\n\tr.GET(\"/p/:project/builders/:bucket/:builder/:numberOrId/*path\", baseMW, redirect(\"/ui/p/:project/builders/:bucket/:builder/:numberOrId/*path\", http.StatusFound))\n\n\t// Console\n\tr.GET(\"/p/:project\", projectMW, handleError(func(c *router.Context) error {\n\t\treturn ConsolesHandler(c, c.Params.ByName(\"project\"))\n\t}))\n\tr.GET(\"/p/:project/\", baseMW, movedPermanently(\"/p/:project\"))\n\tr.GET(\"/p/:project/g\", baseMW, movedPermanently(\"/p/:project\"))\n\tr.GET(\"/p/:project/g/:group/console\", projectMW, handleError(ConsoleHandler))\n\tr.GET(\"/p/:project/g/:group\", projectMW, redirect(\"/p/:project/g/:group/console\", http.StatusFound))\n\tr.GET(\"/p/:project/g/:group/\", baseMW, movedPermanently(\"/p/:project/g/:group\"))\n\n\t// Builder list\n\t// Redirects to the lit-element implementation.\n\tr.GET(\"/p/:project/builders\", baseMW, redirect(\"/ui/p/:project/builders\", http.StatusFound))\n\tr.GET(\"/p/:project/g/:group/builders\", baseMW, redirect(\"/ui/p/:project/g/:group/builders\", http.StatusFound))\n\n\t// Swarming\n\tr.GET(swarming.URLBase+\"/:id/steps/*logname\", htmlMW, handleError(HandleSwarmingLog))\n\tr.GET(swarming.URLBase+\"/:id\", htmlMW, handleError(handleSwarmingBuild))\n\t// Backward-compatible URLs for Swarming:\n\tr.GET(\"/swarming/prod/:id/steps/*logname\", htmlMW, handleError(HandleSwarmingLog))\n\tr.GET(\"/swarming/prod/:id\", htmlMW, handleError(handleSwarmingBuild))\n\n\t// Buildbucket\n\t// If these routes change, also change links in common/model/build_summary.go:getLinkFromBuildID\n\t// and common/model/builder_summary.go:SelfLink.\n\tr.GET(\"/p/:project/builders/:bucket/:builder\", optionalProjectMW, handleError(BuilderHandler))\n\n\tr.GET(\"/buildbucket/:bucket/:builder\", baseMW, redirectFromProjectlessBuilder)\n\n\t// LogDog Milo Annotation Streams.\n\t// This mimics the `logdog://logdog_host/project/*path` url scheme seen on\n\t// swarming tasks.\n\tr.GET(\"/raw/build/:logdog_host/:project/*path\", htmlMW, handleError(handleRawPresentationBuild))\n\n\tpubsubMW := router.NewMiddlewareChain(\n\t\tauth.Authenticate(&openid.GoogleIDTokenAuthMethod{\n\t\t\tAudienceCheck: openid.AudienceMatchesHost,\n\t\t}),\n\t\twithBuildbucketBuildsClient,\n\t)\n\tpusherID := identity.Identity(fmt.Sprintf(\"user:buildbucket-pubsub@%s.iam.gserviceaccount.com\", srv.Options.CloudProject))\n\n\t// PubSub subscription endpoints.\n\tr.POST(\"/push-handlers/buildbucket\", pubsubMW, func(ctx *router.Context) {\n\t\tif got := auth.CurrentIdentity(ctx.Context); got != pusherID {\n\t\t\tlogging.Errorf(ctx.Context, \"Expecting ID token of %q, got %q\", pusherID, got)\n\t\t\tctx.Writer.WriteHeader(403)\n\t\t} else {\n\t\t\tbuildbucket.PubSubHandler(ctx)\n\t\t}\n\t})\n\n\tr.POST(\"/actions/cancel_build\", xsrfMW, handleError(cancelBuildHandler))\n\tr.POST(\"/actions/retry_build\", xsrfMW, handleError(retryBuildHandler))\n\n\tr.GET(\"/internal_widgets/related_builds/:id\", htmlMW, handleError(handleGetRelatedBuildsTable))\n\n\t// Config for ResultUI frontend.\n\tr.GET(\"/configs.js\", baseMW, handleError(configsJSHandler))\n\n\tr.GET(\"/auth-state\", baseAuthMW, handleError(getAuthState))\n}", "func (f *httpForwarder) serveHTTP(w http.ResponseWriter, inReq *http.Request, ctx *handlerContext) {\n\tif f.log.GetLevel() >= log.DebugLevel {\n\t\tlogEntry := f.log.WithField(\"Request\", utils.DumpHttpRequest(inReq))\n\t\tlogEntry.Debug(\"vulcand/oxy/forward/http: begin ServeHttp on request\")\n\t\tdefer logEntry.Debug(\"vulcand/oxy/forward/http: completed ServeHttp on request\")\n\t}\n\n\tstart := time.Now().UTC()\n\n\toutReq := new(http.Request)\n\t*outReq = *inReq // includes shallow copies of maps, but we handle this in Director\n\n\trevproxy := httputil.ReverseProxy{\n\t\tDirector: func(req *http.Request) {\n\t\t\tf.modifyRequest(req, inReq.URL)\n\t\t},\n\t\tTransport: f.roundTripper,\n\t\tFlushInterval: f.flushInterval,\n\t\tModifyResponse: f.modifyResponse,\n\t\tBufferPool: f.bufferPool,\n\t\tErrorHandler: ctx.errHandler.ServeHTTP,\n\t}\n\n\tif f.log.GetLevel() >= log.DebugLevel {\n\t\tpw := utils.NewProxyWriter(w)\n\t\trevproxy.ServeHTTP(pw, outReq)\n\n\t\tif inReq.TLS != nil {\n\t\t\tf.log.Debugf(\"vulcand/oxy/forward/http: Round trip: %v, code: %v, Length: %v, duration: %v tls:version: %x, tls:resume:%t, tls:csuite:%x, tls:server:%v\",\n\t\t\t\tinReq.URL, pw.StatusCode(), pw.GetLength(), time.Now().UTC().Sub(start),\n\t\t\t\tinReq.TLS.Version,\n\t\t\t\tinReq.TLS.DidResume,\n\t\t\t\tinReq.TLS.CipherSuite,\n\t\t\t\tinReq.TLS.ServerName)\n\t\t} else {\n\t\t\tf.log.Debugf(\"vulcand/oxy/forward/http: Round trip: %v, code: %v, Length: %v, duration: %v\",\n\t\t\t\tinReq.URL, pw.StatusCode(), pw.GetLength(), time.Now().UTC().Sub(start))\n\t\t}\n\t} else {\n\t\trevproxy.ServeHTTP(w, outReq)\n\t}\n\n\tfor key := range w.Header() {\n\t\tif strings.HasPrefix(key, http.TrailerPrefix) {\n\t\t\tif fl, ok := w.(http.Flusher); ok {\n\t\t\t\tfl.Flush()\n\t\t\t}\n\t\t\tbreak\n\t\t}\n\t}\n\n}", "func run() error {\n\tmux := makeMuxRouter()\n\thttpAddr := os.Getenv(\"ADDR\")\n\tlog.Println(\"Servlet ouvindo na porta \", httpAddr)\n\tserver := &http.Server{\n\t\tAddr: \":\" + httpAddr,\n\t\tHandler: mux,\n\t\tReadTimeout: 10 * time.Second,\n\t\tWriteTimeout: 10 * time.Second,\n\t\tMaxHeaderBytes: 1 << 20,\n\t}\n\n\tif err := server.ListenAndServe(); err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}", "func (c *Client) Run(ctx context.Context, req *Request, resp interface{}) error {\n\tselect {\n\tcase <-ctx.Done():\n\t\treturn ctx.Err()\n\tdefault:\n\t}\n\n\treturn c.runWithJSON(ctx, req, resp)\n}", "func (c *Cache) Run(req *web.Request, fnGenerate func(w io.Writer) bool) {\n\tcached, found := c.items[req.URL.String()]\n\tif !found {\n\t\tvar buf = &bytes.Buffer{}\n\t\tif !fnGenerate(buf) {\n\t\t\treq.Error(web.StatusNotFound, os.NewError(\"Not Found.\"))\n\t\t\treturn\n\t\t}\n\n\t\tcached = buf.Bytes()\n\t\tc.items[req.URL.String()] = cached\n\t}\n\n\treq.Respond(web.StatusOK, web.HeaderContentType, \"text/html\").Write(cached)\n}", "func main() {\n\tHandleRequests( )\n}", "func (rm *REKTManager) Do(req http.Request) {\n\n\trm.wg.Add(1)\n\tgo func(req http.Request) {\n\t\tdefer rm.wg.Done()\n\t\tif rm.headers != nil {\n\t\t\tfor k, v := range rm.headers {\n\t\t\t\treq.Header[k] = v\n\t\t\t}\n\t\t}\n\n\t\tif (rm.username != \"\") || (rm.password != \"\") {\n\t\t\treq.SetBasicAuth(rm.username, rm.password)\n\t\t}\n\n\t\trm.tokens <- struct{}{}\n\t\trm.respwg.Add(1)\n\t\trm.respchan <- rm.worker(req)\n\t\t<-rm.tokens\n\t}(req)\n}", "func (h TestServerHandler) ServeHTTP(writer http.ResponseWriter, request *http.Request) {\n\twriter.WriteHeader(h.StatusCode)\n\twriter.Header().Add(\"Content-Type\", \"text/plain\")\n\t_, _ = writer.Write([]byte(h.Content))\n}", "func (c *Client) Run(ctx context.Context, req *Request, resp interface{}) error {\n\tselect {\n\tcase <-ctx.Done():\n\t\treturn ctx.Err()\n\tdefault:\n\t}\n\tif len(req.files) > 0 && !c.useMultipartForm {\n\t\treturn errors.New(\"cannot send files with PostFields option\")\n\t}\n\tif c.useMultipartForm {\n\t\treturn c.runWithPostFields(ctx, req, resp)\n\t}\n\treturn c.runWithJSON(ctx, req, resp)\n}", "func (r rigis) ServeHTTP(rw http.ResponseWriter, req *http.Request) {\n\n\t// if filter match then bad request\n\tif !r.executeFilter(req) {\n\t\tlogrus.WithFields(formatErrorLog(req)).Error(\"Request Forbidden.\")\n\t\tresponseError(rw, req, getErrorEntity(http.StatusForbidden))\n\t\treturn\n\t}\n\n\tfor i := 0; i < len(r.nodes); i++ {\n\t\t// if rule unmatch then skip this node\n\t\tif !r.nodes[i].executeRule(req) {\n\t\t\tcontinue\n\t\t}\n\n\t\t// if filter match then bad request\n\t\tif !r.nodes[i].executeFilter(req) {\n\t\t\tlogrus.WithFields(formatErrorLog(req)).Error(\"Request Forbidden.\")\n\t\t\tresponseError(rw, req, getErrorEntity(http.StatusForbidden))\n\t\t\treturn\n\t\t}\n\n\t\tr.nodes[i].serveHTTP(rw, req)\n\t\treturn\n\t}\n\n\t// not match all rule\n\tlogrus.WithFields(formatErrorLog(req)).Error(\"Contnts Not Found.\")\n\tresponseError(rw, req, getErrorEntity(http.StatusNotFound))\n}", "func (s *Server) Run() {\n\t// use default mux\n\thealth := healthcheck.NewHandler()\n\ts.router.Handle(\"/\", health)\n\ts.router.Handle(\"/metrics\", promhttp.Handler())\n\n\ts.router.HandleFunc(fmt.Sprintf(\"/v1/models/%s:predict\", s.options.ModelName), s.PredictHandler).Methods(\"POST\")\n\n\toperationName := nethttp.OperationNameFunc(func(r *http.Request) string {\n\t\treturn fmt.Sprintf(\"%s %s\", r.Method, r.URL.Path)\n\t})\n\n\taddr := fmt.Sprintf(\":%s\", s.options.Port)\n\tsrv := &http.Server{\n\t\tAddr: addr,\n\t\tHandler: nethttp.Middleware(opentracing.GlobalTracer(), s.router, operationName),\n\t\tWriteTimeout: s.options.HTTPServerTimeout,\n\t\tReadTimeout: s.options.HTTPServerTimeout,\n\t\tIdleTimeout: 2 * s.options.HTTPServerTimeout,\n\t}\n\n\tstopCh := setupSignalHandler()\n\terrCh := make(chan error, 1)\n\tgo func() {\n\t\ts.logger.Info(\"starting standard transformer at : \" + addr)\n\t\t// Don't forward ErrServerClosed as that indicates we're already shutting down.\n\t\tif err := srv.ListenAndServe(); err != nil && err != http.ErrServerClosed {\n\t\t\terrCh <- errors.Wrapf(err, \"server failed\")\n\t\t}\n\t\ts.logger.Info(\"server shut down successfully\")\n\t}()\n\n\t// Exit as soon as we see a shutdown signal or the server failed.\n\tselect {\n\tcase <-stopCh:\n\tcase err := <-errCh:\n\t\ts.logger.Error(fmt.Sprintf(\"failed to run HTTP server: %v\", err))\n\t}\n\n\ts.logger.Info(\"server shutting down...\")\n\n\tif err := srv.Shutdown(context.Background()); err != nil {\n\t\ts.logger.Error(fmt.Sprintf(\"failed to shutdown HTTP server: %v\", err))\n\t}\n}", "func (p *HTTPProxy) ServeHTTP(w http.ResponseWriter, req *http.Request, subject string) {\n\tlogrus.Debug(\"HTTPProxy.ServeHTTP()\")\n\tdone := make(chan bool)\n\tresBroker := NewAgentResponseBroker(w)\n\ttunnel := newTunnel(p.nc, subject, p.readTimeout, resBroker.handleResponse)\n\terr := tunnel.open()\n\tif err != nil {\n\t\tlogrus.WithError(err).Info(\"HTTPProxy.ServeHTTP() - failed to send request\")\n\t\thttp.Error(w, \"failed to tunnel\", http.StatusInternalServerError)\n\t\tdone <- true\n\t}\n\tdefer tunnel.close()\n\n\terr = tunnel.send(req)\n\tif err != nil {\n\t\thttp.Error(w, \"failed to send request\", http.StatusInternalServerError)\n\t\tlogrus.WithError(err).Debugf(\"HTTPProxy.tunnel.send err - %s\", err)\n\t\tdone <- true\n\t}\n\n\t// Wait for done or timeout.\n\tselect {\n\tcase <-tunnel.done:\n\t\tlogrus.Debug(\"HTTPProxy.ServeHTTP - tunnel done\")\n\tcase <-tunnel.mon.TimedOut():\n\t\tlogrus.Debug(\"HTTPProxy.ServeHTTP - connection timeout\")\n\tcase <-resBroker.done:\n\t\ttunnel.notifyClosingToAgent()\n\t\tlogrus.Debug(\"HTTPProxy.ServeHTTP - resBroker done\")\n\tcase <-req.Context().Done():\n\t\ttunnel.notifyClosingToAgent()\n\t\tlogrus.Debug(\"HTTPProxy.ServeHTTP - request context done\")\n\tcase <-done:\n\t\tlogrus.Debug(\"HTTPProxy.ServeHTTP - done\")\n\t}\n}", "func (a *TdmaServerAPI) ServeHTTP(w http.ResponseWriter, r *http.Request) {\n\tvar pl lorawan.TdmaReqPayload\n\n\tb, err := ioutil.ReadAll(r.Body)\n\tif err != nil {\n\t\ta.returnError(w, http.StatusInternalServerError, backend.Other, \"read body error\")\n\t\treturn\n\t}\n\n\terr = json.Unmarshal(b, &pl)\n\tif err != nil {\n\t\ta.returnError(w, http.StatusBadRequest, backend.Other, err.Error())\n\t\treturn\n\t}\n\n\tlog.WithFields(log.Fields{\n\t\t\"DevEUI\": pl.DevEUI,\n\t\t\"DevAddr\": pl.DevAddr,\n\t\t\"TxCycle\": pl.TxCycle,\n\t}).Info(\"ts: request received\")\n\n\t//TODO: for expanding\n\ta.handleTdmaJoinReq(w, b)\n}", "func (t *tracer) ServeHTTP(w http.ResponseWriter, req *http.Request) {\n\tvar sr = statusRecorder{w, 200}\n\tvar path = req.URL.RawPath\n\tif path == \"\" {\n\t\tpath = req.URL.Path\n\t}\n\n\tvar start = time.Now()\n\tt.handler.ServeHTTP(&sr, req)\n\tvar finish = time.Now()\n\n\t// To avoid blocking when the events are being processed, we send the event\n\t// to the tracer's list asynchronously\n\tgo t.appendEvent(path, start, finish, sr.status)\n}", "func (e *Engine) Run(port int64) {\n\thttp.HandleFunc(\"/\", handleRequest)\n\tstartHTTPServer(port)\n}", "func (a *Api) ServeHTTP(w http.ResponseWriter, r *http.Request) {\n\n\t// Strip mountpoint\n\turl := a.Path(r)\n\n\tvar err error\n\t// @todo, chain middlewares..\n\tswitch {\n\tcase url == \"/add\":\n\t\terr = a.Add(w, r)\n\tcase strings.HasPrefix(\"/task/\", url):\n\t\terr = a.Status(w, r)\n\tdefault:\n\t\terr = a.List(w, r)\n\t}\n\n\tif err != nil {\n\t\ta.Err = err\n\t\tlog.Println(\"error serving request %s\", err)\n\t\thttp.Error(w, \"Internal server error\", 500)\n\t\treturn\n\t}\n\n}", "func (r Rhole) ServeHTTP(rw http.ResponseWriter, req *http.Request) {\n\n\tfor i := 0; i < len(r.Dummies); i++ {\n\n\t\tif strings.HasPrefix(strings.ToUpper(req.RequestURI), strings.ToUpper(r.Dummies[i].Path)) {\n\t\t\t// output dump log\n\t\t\tlogrus.WithFields(r.formatDumpLog(req)).Info(r.Dummies[i].Name)\n\n\t\t\t// response dummy text\n\t\t\tResponseDummy(rw, req, r.Dummies[i])\n\t\t\treturn\n\t\t}\n\t}\n\n\t// output dump log\n\tlogrus.WithFields(r.formatDumpLog(req)).Info(r.Default.Name)\n\tResponseDummy(rw, req, r.Default)\n}", "func (f *Fibre) Run(opts HTTPOptions, files ...string) {\n\n\tvar err error\n\tvar s *graceful.Server\n\n\tw := f.logger.Writer()\n\tdefer w.Close()\n\n\tswitch v := opts.(type) {\n\tcase string:\n\t\ts = &graceful.Server{\n\t\t\tTimeout: f.wait,\n\t\t\tServer: &http.Server{\n\t\t\t\tAddr: v,\n\t\t\t\tHandler: f,\n\t\t\t\tIdleTimeout: f.itimeout,\n\t\t\t\tReadTimeout: f.rtimeout,\n\t\t\t\tWriteTimeout: f.wtimeout,\n\t\t\t\tErrorLog: log.New(w, \"\", 0),\n\t\t\t},\n\t\t}\n\tcase *http.Server:\n\t\ts = &graceful.Server{\n\t\t\tTimeout: f.wait,\n\t\t\tServer: v,\n\t\t}\n\t\ts.Server.Handler = f\n\tcase *graceful.Server:\n\t\ts = v\n\t\ts.Server.Handler = f\n\t}\n\n\tif len(files) != 2 {\n\t\terr = s.ListenAndServe()\n\t}\n\n\tif len(files) == 2 {\n\t\terr = s.ListenAndServeTLS(files[0], files[1])\n\t}\n\n\tif err != nil {\n\t\tf.Logger().Fatal(err)\n\t}\n\n}", "func (srv *Server) ServeHTTP(\n\tresp http.ResponseWriter,\n\treq *http.Request,\n) {\n\t// Reject incoming connections during shutdown, pretend the server is temporarily unavailable\n\tsrv.opsLock.Lock()\n\tif srv.shutdown {\n\t\tsrv.opsLock.Unlock()\n\t\thttp.Error(resp, \"Server shutting down\", http.StatusServiceUnavailable)\n\t\treturn\n\t}\n\tsrv.opsLock.Unlock()\n\n\tswitch req.Method {\n\tcase \"OPTIONS\":\n\t\tsrv.hooks.OnOptions(resp)\n\t\treturn\n\tcase \"WEBWIRE\":\n\t\tsrv.handleMetadata(resp)\n\t\treturn\n\t}\n\n\tif !srv.hooks.BeforeUpgrade(resp, req) {\n\t\treturn\n\t}\n\n\t// Establish connection\n\tconn, err := srv.connUpgrader.Upgrade(resp, req)\n\tif err != nil {\n\t\tsrv.errorLog.Print(\"Upgrade failed:\", err)\n\t\treturn\n\t}\n\n\t// Register connected client\n\tnewClient := newClientAgent(conn, req.Header.Get(\"User-Agent\"), srv)\n\n\tsrv.clientsLock.Lock()\n\tsrv.clients = append(srv.clients, newClient)\n\tsrv.clientsLock.Unlock()\n\n\t// Call hook on successful connection\n\tsrv.hooks.OnClientConnected(newClient)\n\n\tfor {\n\t\t// Await message\n\t\tmessage, err := conn.Read()\n\t\tif err != nil {\n\t\t\tif newClient.HasSession() {\n\t\t\t\t// Decrement number of connections for this clients session\n\t\t\t\tsrv.SessionRegistry.deregister(newClient)\n\t\t\t}\n\n\t\t\tif err.IsAbnormalCloseErr() {\n\t\t\t\tsrv.warnLog.Printf(\"Abnormal closure error: %s\", err)\n\t\t\t}\n\n\t\t\tnewClient.unlink()\n\t\t\tsrv.hooks.OnClientDisconnected(newClient)\n\t\t\treturn\n\t\t}\n\n\t\t// Parse message\n\t\tvar msg Message\n\t\tif err := msg.Parse(message); err != nil {\n\t\t\tsrv.errorLog.Println(\"Failed parsing message:\", err)\n\t\t\tbreak\n\t\t}\n\n\t\t// Prepare message\n\t\t// Reference the client associated with this message\n\t\tmsg.Client = newClient\n\n\t\tmsg.createReplyCallback(newClient, srv)\n\t\tmsg.createFailCallback(newClient, srv)\n\n\t\t// Handle message\n\t\tif err := srv.handleMessage(&msg); err != nil {\n\t\t\tsrv.errorLog.Printf(\"CRITICAL FAILURE: %s\", err)\n\t\t\tbreak\n\t\t}\n\t}\n}", "func (fs *FileServer) ServeHTTP(w http.ResponseWriter, r *http.Request) { //nolint:funlen,gocognit,gocyclo\n\tif !fs.methodIsAllowed(r.Method) {\n\t\tfs.handleError(w, r, http.StatusMethodNotAllowed)\n\n\t\treturn\n\t}\n\n\tif fs.Settings.RedirectIndexFileToRoot && len(fs.Settings.IndexFileName) > 0 {\n\t\t// redirect .../index.html to .../\n\t\tif strings.HasSuffix(r.URL.Path, \"/\"+fs.Settings.IndexFileName) {\n\t\t\thttp.Redirect(w, r, r.URL.Path[0:len(r.URL.Path)-len(fs.Settings.IndexFileName)], http.StatusMovedPermanently)\n\n\t\t\treturn\n\t\t}\n\t}\n\n\turlPath := r.URL.Path\n\n\t// add leading `/` (if required)\n\tif len(urlPath) == 0 || !strings.HasPrefix(urlPath, \"/\") {\n\t\turlPath = \"/\" + r.URL.Path\n\t}\n\n\t// if directory requested (or server root) - add index file name\n\tif len(fs.Settings.IndexFileName) > 0 && urlPath[len(urlPath)-1] == '/' {\n\t\turlPath += fs.Settings.IndexFileName\n\t}\n\n\t// prepare target file path\n\tfilePath := path.Join(fs.Settings.FilesRoot, filepath.FromSlash(path.Clean(urlPath)))\n\n\t// look for response in cache\n\tif fs.CacheAvailable() {\n\t\tif cached, cacheHit := fs.Cache.Get(filePath); cacheHit {\n\t\t\thttp.ServeContent(w, r, filepath.Base(filePath), cached.ModifiedTime, cached.Content)\n\n\t\t\treturn\n\t\t}\n\t}\n\n\t// check for file existence\n\tif stat, err := os.Stat(filePath); err == nil && stat.Mode().IsRegular() {\n\t\tif file, err := os.Open(filePath); err == nil {\n\t\t\tdefer file.Close()\n\n\t\t\tvar fileContent io.ReadSeeker\n\n\t\t\t// put file content into cache, if it is possible\n\t\t\tif fs.CacheAvailable() &&\n\t\t\t\tfs.Cache.Count() < fs.Settings.CacheMaxItems &&\n\t\t\t\tstat.Size() <= fs.Settings.CacheMaxFileSize {\n\t\t\t\tif data, err := ioutil.ReadAll(file); err == nil {\n\t\t\t\t\tfileContent = bytes.NewReader(data)\n\n\t\t\t\t\tfs.Cache.Set(filePath, fs.Settings.CacheTTL, &cache.Item{\n\t\t\t\t\t\tModifiedTime: stat.ModTime(),\n\t\t\t\t\t\tContent: fileContent,\n\t\t\t\t\t})\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tif fileContent == nil {\n\t\t\t\tfileContent = file\n\t\t\t}\n\n\t\t\thttp.ServeContent(w, r, filepath.Base(filePath), stat.ModTime(), fileContent)\n\n\t\t\treturn\n\t\t}\n\n\t\tfs.handleError(w, r, http.StatusInternalServerError)\n\n\t\treturn\n\t}\n\n\tfs.handleError(w, r, http.StatusNotFound)\n}", "func (self *Proxy) ServeHTTP(wri http.ResponseWriter, req *http.Request) {\n\tvar i int\n\tvar err error\n\tvar transport *http.Transport\n\tvar scheme string\n\n\tpr := self.NewProxyRequest(wri, req)\n\n\tpr.Request.Header.Add(\"Host\", pr.Request.Host)\n\n\t// Running directors.\n\tfor i, _ = range self.Directors {\n\t\tself.Directors[i](pr)\n\t}\n\n\tpr.Request.Header.Add(\"Host\", pr.Request.Host)\n\n\t// Creating a request that will be sent to the destination server.\n\tout := new(http.Request)\n\n\tif req.TLS == nil {\n\t\ttransport = &http.Transport{}\n\t\tscheme = \"http\"\n\t} else {\n\t\ttransport = &http.Transport{\n\t\t\tTLSClientConfig: &tls.Config{InsecureSkipVerify: true},\n\t\t}\n\t\tscheme = \"https\"\n\t}\n\n\t*out = *pr.Request\n\tout.Proto = \"HTTP/1.1\"\n\tout.ProtoMajor = 1\n\tout.ProtoMinor = 1\n\tout.Close = false\n\n\tout.URL.Scheme = scheme\n\n\tout.URL.Host = pr.Request.Host\n\n\t// Proxying client request to destination server.\n\tpr.Response, err = transport.RoundTrip(out)\n\n\t// Waiting for an answer.\n\tif err != nil {\n\t\tlog.Printf(ErrProxyRequestFailed.Error(), err.Error())\n\t\treturn\n\t}\n\n\t// Running interceptors.\n\tfor i, _ = range self.Interceptors {\n\t\tself.Interceptors[i](pr)\n\t}\n\n\t// Copying response headers to the response we are going to send to the\n\t// client.\n\tcopyHeader(pr.ResponseWriter.Header(), pr.Response.Header)\n\n\t// Writing response status.\n\tpr.ResponseWriter.WriteHeader(pr.Response.StatusCode)\n\n\twclosers := make([]io.WriteCloser, 0, len(self.Writers))\n\n\t// Running writers.\n\tfor i, _ := range self.Writers {\n\t\twcloser, err := self.Writers[i](pr)\n\t\tif wcloser != nil {\n\t\t\twclosers = append(wclosers, wcloser)\n\t\t}\n\t\tif err != nil {\n\t\t\tlog.Printf(ErrWriterFailed.Error(), err.Error())\n\t\t}\n\t}\n\n\t// Loggers.\n\tfor i, _ = range self.Loggers {\n\t\tself.Loggers[i](pr)\n\t}\n\n\t// Writing response.\n\tif pr.Response.Body != nil {\n\t\twriters := make([]io.Writer, 0, len(wclosers)+1)\n\t\twriters = append(writers, pr.ResponseWriter)\n\t\tfor i, _ := range wclosers {\n\t\t\twriters = append(writers, wclosers[i])\n\t\t}\n\t\tio.Copy(io.MultiWriter(writers...), pr.Response.Body)\n\t}\n\n\t// Closing response.\n\tpr.Response.Body.Close()\n\n\t// Closing associated writers.\n\tfor i, _ := range wclosers {\n\t\twclosers[i].Close()\n\t}\n\n}", "func (a App) Run() error {\n\ta.log.Printf(\"config %+v\", a.params)\n\twg := &sync.WaitGroup{}\n\tqueue := make(chan string)\n\tresults := make(chan result)\n\n\twg.Add(1)\n\tgo func() {\n\t\tdefer close(queue)\n\t\ta.log.Printf(\"queue sender started\")\n\t\tfor _, url := range a.params.URLs {\n\t\t\ta.log.Printf(\"send to queue: %s\", url)\n\t\t\tqueue <- url\n\t\t}\n\t\twg.Done()\n\t}()\n\n\tfor i := 0; i < a.params.Parallel; i++ {\n\t\ti := i\n\t\twg.Add(1)\n\t\tgo func(queue <-chan string, results chan<- result, wg *sync.WaitGroup) {\n\t\t\ta.log.Printf(\"worker %d started\", i)\n\t\t\tfor job := range queue {\n\t\t\t\tif requestedURL, body, err := download(a.client, job); err != nil {\n\t\t\t\t\ta.log.Printf(\"downloaded with error: %s\", err)\n\t\t\t\t\tresults <- result{\n\t\t\t\t\t\terr: err,\n\t\t\t\t\t}\n\t\t\t\t} else {\n\t\t\t\t\ta.log.Printf(\"%s downloaded successfully\", requestedURL)\n\t\t\t\t\tresults <- result{\n\t\t\t\t\t\tbody: fmt.Sprintf(\"%x\", md5.Sum(body)),\n\t\t\t\t\t\turl: requestedURL,\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t\twg.Done()\n\t\t\ta.log.Printf(\"worker done: %d\", i)\n\t\t}(queue, results, wg)\n\t}\n\n\tgo func() {\n\t\twg.Wait()\n\t\ta.log.Printf(\"close results\")\n\t\tclose(results)\n\t}()\n\n\tfor r := range results {\n\t\tif r.err != nil {\n\t\t\ta.log.Printf(\"error: %s\", r.err)\n\t\t} else {\n\t\t\tif _, err := fmt.Fprintf(a.w, \"%s %s\\n\", r.url, r.body); err != nil {\n\t\t\t\treturn fmt.Errorf(\"error writing results: %s\", err)\n\t\t\t}\n\t\t}\n\t}\n\n\treturn nil\n}", "func main() {\n\tcolor := os.Getenv(\"COLOR\")\n\tif color == \"\" {\n\t\tlog.Fatalf(\"no COLOR defined\")\n\t}\n\tport := os.Getenv(\"PORT\")\n\tif port == \"\" {\n\t\tlog.Fatalf(\"no PORT defined\")\n\t}\n\tlog.Printf(\"COLOR is: %v\", color)\n\tlog.Printf(\"PORT is: %v\", port)\n\tflakeRate := float32(0.0)\n\tflakeCode := 200\n\n\tmux := http.NewServeMux()\n\tmux.HandleFunc(\"/ping\", func(w http.ResponseWriter, r *http.Request) {})\n\n\tmux.HandleFunc(\"/\", func(w http.ResponseWriter, r *http.Request) {\n\t\tlog.Printf(\"Received request: %v\", r)\n\t\tif rand.Float32() < flakeRate {\n\t\t\thttp.Error(w, \"flaky server\", flakeCode)\n\t\t\treturn\n\t\t}\n\t\tfmt.Fprintf(w, \"%s\", color)\n\t})\n\n\tmux.HandleFunc(\"/setFlake\", func(w http.ResponseWriter, r *http.Request) {\n\t\tlog.Printf(\"Received request: %v\", r)\n\t\tquery := r.URL.Query()\n\t\trates, ok := query[\"rate\"]\n\t\tif !ok {\n\t\t\thttp.Error(w, \"rate must be specified\", 400)\n\t\t\tlog.Printf(\"Could not read rate parameter\")\n\t\t\treturn\n\t\t}\n\t\trate, err := strconv.ParseFloat(rates[0], 32)\n\t\tif err != nil {\n\t\t\thttp.Error(w, err.Error(), 400)\n\t\t\tlog.Printf(\"Could not parse rate parameter: %v\", err)\n\t\t\treturn\n\t\t}\n\t\tif rate < 0.0 || rate > 1.0 {\n\t\t\thttp.Error(w, \"rate must be between 0.0 and 1.0\", 400)\n\t\t\tlog.Printf(\"Invalid rate parameter: %v\", rate)\n\t\t\treturn\n\t\t}\n\n\t\tcodes, ok := query[\"code\"]\n\t\tif !ok {\n\t\t\thttp.Error(w, \"code must be specified\", 400)\n\t\t\tlog.Printf(\"Could not read code parameter: %v\", err)\n\t\t\treturn\n\t\t}\n\n\t\tcode, err := strconv.ParseInt(codes[0], 10, 32)\n\t\tif err != nil {\n\t\t\thttp.Error(w, err.Error(), 400)\n\t\t\tlog.Printf(\"Could not parse code parameter: %v\", err)\n\t\t\treturn\n\t\t}\n\n\t\tfmt.Fprintf(w, \"rate: %g, code: %d\", flakeRate, flakeCode)\n\t\tflakeRate = float32(rate)\n\t\tflakeCode = int(code)\n\t})\n\t//\th2s := &http2.Server{}\n\t//\th1s := &http.Server{\n\t//\t\tAddr: \"0.0.0.0:\" + port,\n\t//\t\tHandler: h2c.NewHandler(mux, h2s),\n\t//\t}\n\t//\tlog.Fatal(h1s.ListenAndServe())\n\t//}\n\th2s := &http2.Server{}\n\th1s := &http.Server{\n\t\tHandler: h2c.NewHandler(mux, h2s),\n\t\tReadTimeout: 5 * time.Second,\n\t\tWriteTimeout: 5 * time.Second,\n\t}\n\tl, err := net.Listen(\"tcp6\", \":\"+port)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\terr = h1s.Serve(l)\n}", "func (gg *guessingGame) ServeHTTP(w http.ResponseWriter, r *http.Request) {\n\t// Ensure all responses are of content type application JSON.\n\tw.Header().Set(\"Content-Type\", \"application/json; charset=utf-8\")\n\n\t// Ensure the HTTP method is a POST.\n\tif r.Method != http.MethodPost {\n\t\tw.WriteHeader(http.StatusMethodNotAllowed)\n\t\tw.Write([]byte(`{\"message\":\"only HTTP POST methods are supported\"}`))\n\t\treturn\n\t}\n\n\t// Read guess from the request.\n\tvar g guess\n\terr := json.NewDecoder(r.Body).Decode(&g)\n\tdefer func() {\n\t\tif closeErr := r.Body.Close(); closeErr != nil {\n\t\t\tlog.Printf(\"failed to close response body: %s\", closeErr)\n\t\t}\n\t}()\n\tif err != nil {\n\t\tw.WriteHeader(http.StatusBadRequest)\n\t\tw.Write([]byte(`{\"message\":\"failed to read JSON body\"}`))\n\t\treturn\n\t}\n\n\tstatus, body := gg.guess(g.Number)\n\n\t// Write the result of the guess to the client connection.\n\tw.WriteHeader(status)\n\tw.Write([]byte(body))\n}", "func (d Desync) ServeHTTP(w http.ResponseWriter, r *http.Request) {\n\n\t// nothing to send so return default response: 200 OK\n\tif r.URL.String() == \"/\" {\n\t\treturn\n\t}\n\n\tb, err := ioutil.ReadAll(r.Body)\n\tif err != nil {\n\t\tlog.Printf(\"%v\\n\", err)\n\t}\n\tdefer r.Body.Close()\n\n\tm := &Message{r.URL.String()[1:], r.Method, r.Header, b, d.debug}\n\n\tif d.debug {\n\t\tlog.Printf(\"%T: %v\\n\", m, m)\n\t}\n\t// send Message to channel\n\td.q <- m\n}", "func (app *App) Run(httpTermination chan<- struct{}) {\n\tvar cancel context.CancelFunc\n\tapp.ctx, cancel = context.WithCancel(context.Background())\n\tdefer cancel()\n\n\thttpServerErr := make(chan error)\n\n\tserver, err := app.createHTTPServer()\n\tif err != nil {\n\t\tapp.logger.Fatal(\"could not create http server, %v\", err.Error())\n\t}\n\n\tgo func() {\n\t\tapp.logger.Printf(\"server is listening on %s\", server.Addr)\n\t\thttpServerErr <- server.ListenAndServe()\n\t}()\n\n\tsigint := make(chan os.Signal, 1)\n\tdefer close(sigint)\n\tsignal.Notify(sigint, os.Interrupt, syscall.SIGTERM)\n\n\tselect {\n\tcase <-app.ctx.Done():\n\t\tapp.logger.Printf(\"context cancellation %v \\n\", app.ctx.Err().Error())\n\tcase err := <-httpServerErr:\n\t\tapp.logger.Printf(\"could not run server: %s \\n\", err.Error())\n\tcase sig := <-sigint:\n\t\tapp.logger.Printf(\"signal received: %v \\n\", sig.String())\n\t}\n\n\tapp.logger.Println(\"HTTP server is gracefully shutting down, waiting for active connections to finish\")\n\tif err := server.Shutdown(app.ctx); err != nil {\n\t\t// Error from closing listeners, or context timeout:\n\t\tapp.logger.Fatalf(\"could not gracefully shutdown the server: %s\\n\", err)\n\t}\n\n\tlog.SetFlags(0)\n\tlog.Print(maydayBanner)\n\tlog.SetFlags(log.LstdFlags)\n\n\thttpTermination <- struct{}{}\n}", "func (fblh *frontendBackendLoggingHandler) ServeHTTP(rw http.ResponseWriter, req *http.Request) {\n\tstartTime := time.Now()\n\tinfoRw := &logInfoResponseWriter{rw: rw}\n\tinfoRwMap.Set(fblh.reqid, infoRw)\n\tfblh.handlerFunc(infoRw, req)\n\n\tusername := \"-\"\n\turl := *req.URL\n\tif url.User != nil {\n\t\tif name := url.User.Username(); name != \"\" {\n\t\t\tusername = name\n\t\t}\n\t}\n\n\tip, _, err := net.SplitHostPort(req.RemoteAddr)\n\tif err != nil {\n\t\tip = req.RemoteAddr\n\t}\n\n\thost, port, err := net.SplitHostPort(req.Host)\n\tif err != nil {\n\t\thost = req.Host\n\t\tport = \"80\"\n\t}\n\n\turi := url.RequestURI()\n\tif qmIndex := strings.Index(uri, \"?\"); qmIndex > 0 {\n\t\turi = uri[0:qmIndex]\n\t}\n\n\te := logEntryPool.Get().(*mdtpLogEntry)\n\tdefer logEntryPool.Put(e)\n\ttime.Since(startTime).Seconds()\n\n\te.BodyBytesSent = strconv.Itoa(infoRw.GetSize())\n\te.Connection = fblh.reqid\n\te.BytesSent = strconv.Itoa(infoRw.GetSize()) //todo - difference between this and body bytes sent\n\te.GzipRatio = \"-\" //todo calculate gzip ratio\n\te.HttpHost = req.Host\n\te.HttpReferrer = req.Referer()\n\te.HttpUserAgent = req.UserAgent()\n\te.HttpXRequestChain = req.Header.Get(\"X-Request-Chain\")\n\te.HttpXSessionId = req.Header.Get(\"X-Session-ID\")\n\te.HttpXRequestId = req.Header.Get(\"X-Request-ID\")\n\te.RemoteAddr = ip\n\te.HttpTrueClientIp = req.Header.Get(\"True-Client-IP\")\n\te.ProxyHost = infoRw.backend\n\te.RemoteUser = username\n\te.Request = fmt.Sprintf(\"%s %s %s\", req.Method, req.RequestURI, req.Proto)\n\te.RequestMethod = req.Method\n\te.RequestTime = strconv.FormatFloat(time.Since(startTime).Seconds(), 'f', 3, 64)\n\te.RequestLength = \"-\" //todo request length\n\te.SentHttpLocation = rw.Header().Get(\"Location\")\n\te.ServerName = host\n\te.ServerPort = port\n\te.Status = strconv.Itoa(infoRw.GetStatus())\n\te.TimeLocal = startTime.Format(\"02/Jan/2006:15:04:05 -0700\")\n\te.UpstreamAddr = \"-\" //todo get ip of actual backend used\n\te.UpstreamHttpProxyAgent = \"-\" //todo\n\te.UpstreamHttpServer = \"-\" //todo\n\te.UpstreamResponseLength = \"-\" //todo\n\te.UpstreamResponseTime = \"-\" //todo\n\te.UpstreamStatus = \"-\" //todo\n\te.HttpXForwardedFor = req.Header.Get(\"X-Forwarded-For\")\n\n\t//e.Username = username\n\t//e.Timestamp = startTime.Format(\"02/Jan/2006:15:04:05 -0700\")\n\t//e.Method = req.Method\n\t//e.URI = uri\n\t//e.Protocol = req.Proto\n\t//e.Status = infoRw.GetStatus()\n\t//e.Size = infoRw.GetSize()\n\t//e.Referer = req.Referer()\n\t//e.UserAgent = req.UserAgent()\n\t//e.RequestID = fblh.reqid\n\t//e.Frontend = strings.TrimPrefix(infoRw.GetFrontend(), \"frontend-\")\n\t//e.Backend = infoRw.GetBackend()\n\t//e.ElapsedMillis = time.Since(startTime).Nanoseconds() / 1000000\n\t//e.Host = req.Host\n\n\tif fblh.format == \"json\" {\n\t\tfblh.writeJSON(e)\n\t} else {\n\t\tfblh.writeText(e)\n\t}\n}", "func (p *Proxy) ServeHTTP(writer http.ResponseWriter, request *http.Request) {\n\tdefer request.Body.Close()\n\tpreProcessStartTime := time.Now()\n\t//This property holds the value of the \"Host\" header\n\thostHeader := request.Host\n\tif hostHeader == \"\" {\n\t\tlog.Printf(\"Can't get Host header %v\", request.Host)\n\t\twriter.WriteHeader(http.StatusBadRequest)\n\t\treturn\n\t}\n\t//check if we have that service\n\tif p.ServiceMap[hostHeader] == nil {\n\t\tlog.Printf(\"Can't find service for host %v\", hostHeader)\n\t\twriter.WriteHeader(http.StatusBadRequest)\n\t\treturn\n\t}\n\t//For faster processing, we will avoid copying the original request and we will just re-use it to send it\n\t//to the backend Host.\n\t//To do that, however, we have to zero out this field, since it is not allowed to be set\n\t//when sending it to http.Client\n\trequest.RequestURI = \"\"\n\n\t//When a request arrives, we will try to send it to a Host.\n\t//If we fail to send it and/or get a response, we will try the \"next\" one. What's \"next\" depends on the strategy.\n\t//For any given request we don't want to try any Host more than once.\n\ttestedHosts := map[int]bool{} //this the closest to a Set in Go\n\tvar service = p.ServiceMap[hostHeader]\n\tfor len(testedHosts) < len(service.Hosts) { //when the sizes are equal, we have tried all Hosts\n\t\t//obtain the index of the Host to which we are going to send the request\n\t\ti := <-service.NextHost\n\t\tif testedHosts[i] {\n\t\t\t//we've already tried this host, so try another one\n\t\t\tcontinue\n\t\t}\n\t\ttestedHosts[i] = true\n\t\ttarget := service.Hosts[i]\n\t\t//replace the host in the original request's URL\n\t\trequest.URL = CompileTargetURL(&target, request.URL)\n\t\tpreProcessStopTime := time.Now()\n\t\t//send the request to the backend service\n\t\tresponse, err := http.DefaultClient.Do(request)\n\t\tif err != nil {\n\t\t\t//if we don't get a response, try the next host\n\t\t\tlog.Printf(\"Error connecting to backend host %v:%v - %v\", target.Address, target.Port, err.Error())\n\t\t\tcontinue\n\t\t}\n\t\tgo preProcessHistogram.Observe(float64(preProcessStopTime.Sub(preProcessStartTime).Nanoseconds() / 1000))\n\t\tpostProcessStartTime := time.Now()\n\t\tbody, err := ioutil.ReadAll(response.Body)\n\t\tresponse.Body.Close()\n\t\tif err != nil {\n\t\t\tlog.Printf(\"can't read response body\")\n\t\t\twriter.WriteHeader(http.StatusBadGateway)\n\t\t\treturn\n\t\t}\n\t\t//send back the response's headers\n\t\tfor k, h := range response.Header {\n\t\t\tfor _, v := range h {\n\t\t\t\twriter.Header().Add(k, v)\n\t\t\t}\n\t\t}\n\t\t//send back the response's status code\n\t\twriter.WriteHeader(response.StatusCode)\n\t\t//send the response's body\n\t\twriter.Write(body)\n\t\tpostProcessStopTime := time.Now()\n\t\tgo postProcessHistogram.Observe(float64(postProcessStopTime.Sub(postProcessStartTime).Nanoseconds() / 1000))\n\t\treturn\n\t}\n\tlog.Printf(\"could not find live host\")\n\t//if we exit the loop, it means none of the backend hosts are reachable\n\twriter.WriteHeader(http.StatusBadGateway)\n\treturn\n}", "func (s httpServer) Run(h http.Handler) {\n\ts.srv.Handler = h\n\tgo s.srv.ListenAndServe()\n}", "func (mux *ServeMux) ServeHTTP(w ResponseWriter, r *Request)", "func (p *Proxy) Run(address string) error {\n\treturn http.ListenAndServe(address, p)\n}", "func (p *Proxy) Run(address string) error {\n\treturn http.ListenAndServe(address, p)\n}" ]
[ "0.7462603", "0.6350784", "0.6070211", "0.59780586", "0.5962405", "0.59415954", "0.5859626", "0.57781273", "0.5730015", "0.5721918", "0.5712981", "0.56630534", "0.56535286", "0.56080145", "0.5604587", "0.5603869", "0.5594449", "0.5575713", "0.55648303", "0.5561466", "0.5557102", "0.55521214", "0.55495805", "0.55474484", "0.5542035", "0.5537337", "0.5528499", "0.5517678", "0.55170107", "0.5512316", "0.55076706", "0.5481762", "0.5480655", "0.5475669", "0.54544324", "0.5427605", "0.5425464", "0.54198414", "0.5414119", "0.54054815", "0.5403813", "0.5401639", "0.53976935", "0.538965", "0.5377568", "0.5370593", "0.5360186", "0.5352853", "0.53402317", "0.5339992", "0.53276193", "0.5326426", "0.53115475", "0.5301213", "0.5300135", "0.5298983", "0.5293607", "0.5289402", "0.5285663", "0.52575815", "0.5247592", "0.5244009", "0.5240652", "0.5240222", "0.5240219", "0.52377623", "0.5237461", "0.52350974", "0.52332085", "0.5220543", "0.52131355", "0.52042055", "0.5190815", "0.51880693", "0.5187544", "0.5184007", "0.5178999", "0.5177454", "0.5172481", "0.5170823", "0.5170636", "0.51652944", "0.5161127", "0.5156023", "0.5145146", "0.5141777", "0.51401746", "0.51358986", "0.5132038", "0.51311505", "0.5129777", "0.51295346", "0.51230526", "0.5121414", "0.5119862", "0.51125884", "0.5109086", "0.5106449", "0.5098479", "0.5098479" ]
0.6533478
1
Start starts the webserver using the given port, and sets up handlers for: 1. Status page 2. Metrics 3. Static content 4. Endpoint for forcing a run
func (ws *WebServer) Start() error { if ws.server != nil { return fmt.Errorf("WebServer already running") } log.Logger("webserver").Info("Launching") templatePath := ws.TemplatePath if templatePath == "" { templatePath = defaultServerTemplatePath } template, err := createTemplate(templatePath) if err != nil { return err } m := mux.NewRouter() addStatusEndpoints(m) statusPageHandler := &StatusPageHandler{ ws.Authenticator, ws.Clock, ws.DiffURLFormat, ws.KubeClient, template, ws.StatusTimeout, } forceRunHandler := &ForceRunHandler{ ws.Authenticator, ws.KubeClient, ws.RunQueue, } m.PathPrefix("/static/").Handler(http.StripPrefix("/static/", http.FileServer(http.Dir("static")))) m.PathPrefix("/api/v1/forceRun").Handler(forceRunHandler) m.PathPrefix("/").Handler(statusPageHandler) ws.server = &http.Server{ Addr: fmt.Sprintf(":%v", ws.ListenPort), Handler: m, ErrorLog: log.Logger("http.Server").StandardLogger(nil), } go func() { if err = ws.server.ListenAndServe(); err != nil { if !errors.Is(err, http.ErrServerClosed) { log.Logger("webserver").Error("Shutdown", "error", err) } log.Logger("webserver").Info("Shutdown") } }() return nil }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func StartWebServer(port int) {\n\tsetuphandlers()\n\tportstring := fmt.Sprintf(\":%d\", port)\n\tfmt.Println(\"Running on \", portstring)\n\tlog.Fatal(http.ListenAndServe(portstring, nil))\n}", "func (ms *MicroService) StartOnPort(port int) {\n\n\t// add health\n\tms.Handle(\"GET\", \"/health\", ms.Health)\n\n\t// start the web server\n\tfmt.Printf(\"Listening on %d....\\n\", port)\n\t\n\tif err := http.ListenAndServe(\":\" + strconv.Itoa(port), ms.muxx); err != nil {\n\t\tfmt.Println(\"error\")\n\t\tlog.Fatal(\"ListenAndServe:\", err)\n\t} else {\n\t\tfmt.Println(\"running\")\n\t}\n}", "func startServer(port string, handler http.Handler) {\n\terr := http.ListenAndServe(port, handler)\n\tif err != nil {\n\t\tlogger.Fatal(\"ListenAndServe: \", err)\n\t}\n}", "func Start(port string) {\n\ts := &http.Server{\n\t\tAddr: \":\" + port,\n\t\tHandler: &handler{},\n\t\tReadTimeout: 10 * time.Second,\n\t\tWriteTimeout: 10 * time.Second,\n\t\tMaxHeaderBytes: 1 << 20,\n\t}\n\n\tlog.Fatal(s.ListenAndServe())\n}", "func StartWebserver(port string) {\n\tlog.Info(\"Starting service at port: \" + port)\n\tr := routes.NewRouter()\n\tsrv := &http.Server{\n\t\tAddr: \":\" + port,\n\t\tHandler: r,\n\t\t// Good practice: enforce timeouts for servers you create!\n\t\tWriteTimeout: 15 * time.Second,\n\t\tReadTimeout: 15 * time.Second,\n\t}\n\n\tgo func() {\n\t\t// service connections\n\t\tif err := srv.ListenAndServe(); err != nil && err != http.ErrServerClosed {\n\t\t\tlog.Fatalf(\"listen: %s\\n\", err)\n\t\t}\n\t}()\n\n\t// gracefule shutdown\n\t// Wait for interrupt signal to gracefully shutdown the server with\n\t// a timeout of 5 seconds.\n\tquit := make(chan os.Signal)\n\tsignal.Notify(quit, os.Interrupt)\n\t<-quit\n\tlog.Info(\"Shutdown Server ...\")\n\n\tctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)\n\tdefer cancel()\n\tif err := srv.Shutdown(ctx); err != nil {\n\t\tlog.Fatal(\"Server Shutdown:\", err)\n\t}\n\tlog.Info(\"Server exiting\")\n}", "func (ws *WebServer) Start() {\n\tlog.Logger.Info(\"Launching webserver\")\n\tlastRun := &run.Result{}\n\n\ttemplate, err := sysutil.CreateTemplate(serverTemplatePath)\n\tif err != nil {\n\t\tws.Errors <- err\n\t\treturn\n\t}\n\n\tm := mux.NewRouter()\n\taddStatusEndpoints(m)\n\tstatusPageHandler := &StatusPageHandler{\n\t\ttemplate,\n\t\tlastRun,\n\t\tws.Clock,\n\t}\n\thttp.Handle(\"/\", statusPageHandler)\n\tm.PathPrefix(\"/static/\").Handler(http.StripPrefix(\"/static/\", http.FileServer(http.Dir(\"/static\"))))\n\tforceRunHandler := &ForceRunHandler{\n\t\tws.RunQueue,\n\t}\n\tm.PathPrefix(\"/api/v1/forceRun\").Handler(forceRunHandler)\n\tm.PathPrefix(\"/\").Handler(statusPageHandler)\n\n\tgo func() {\n\t\tfor result := range ws.RunResults {\n\t\t\t*lastRun = result\n\t\t}\n\t}()\n\n\terr = http.ListenAndServe(fmt.Sprintf(\":%v\", ws.ListenPort), m)\n\tws.Errors <- err\n}", "func StartWeb(hcfg Cfg, appcfg []Cfg) {\n\thostCfg = hcfg\n\tvar port = hcfg.Port\n\tc := cors.New(cors.Options{\n\t\tAllowedOrigins: []string{\"*\"},\n\t})\n\n\tlog.Println(\"Port .. \" + port)\n\trouter := http.NewServeMux()\n\trouter.Handle(\"/\"+hcfg.Project+\"/file/\", GetResource(hcfg))\n\tfor c := range appcfg {\n\t\trouter.Handle(\"/\"+appcfg[c].Project+\"/\", AppIndex(appcfg[c]))\n\t\trouter.Handle(\"/\"+appcfg[c].Project+\"/file/\", GetResource(appcfg[c]))\n\t\trouter.Handle(\"/\"+appcfg[c].Project+\"/dload\", Dload(appcfg[c]))\n\t}\n\trouter.Handle(\"/\", Index())\n\trouter.Handle(\"/config\", getConfig())\n\trouter.Handle(\"/validate\", Validate())\n\trouter.Handle(\"/transform\", Transform())\n\trouter.Handle(\"/verify\", DocVerify())\n\trouter.Handle(\"/rebuild\", Rebuild())\n\trouter.Handle(\"/rebuildall\", RebuildAll())\n\tflag.StringVar(&listenAddr, \"listen-addr\", port, \"server listen address\")\n\tflag.Parse()\n\tlogger := log.New(os.Stdout, \"http: \", log.LstdFlags)\n\tlogger.Println(\"Starting HTTP Server. .. \")\n\tnextRequestID := func() string {\n\t\treturn fmt.Sprintf(\"%d\", time.Now().UnixNano())\n\t}\n\tserver := &http.Server{\n\t\tAddr: listenAddr,\n\t\tHandler: tracing(nextRequestID)(logging(logger)(c.Handler(router))),\n\t\tErrorLog: logger,\n\t\tReadTimeout: 5 * time.Second,\n\t\tWriteTimeout: 10 * time.Second,\n\t\tIdleTimeout: 15 * time.Second,\n\t}\n\tdone := make(chan bool)\n\tquit := make(chan os.Signal, 1)\n\tsignal.Notify(quit, os.Interrupt)\n\tgo func() {\n\t\t<-quit\n\t\tlogger.Println(\"Server is shutting down...\")\n\t\tatomic.StoreInt32(&healthy, 0)\n\n\t\tctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)\n\t\tdefer cancel()\n\n\t\tserver.SetKeepAlivesEnabled(false)\n\t\tif err := server.Shutdown(ctx); err != nil {\n\t\t\tlogger.Fatalf(\"Could not gracefully shutdown the server: %v\\n\", err)\n\t\t}\n\t\tclose(done)\n\t}()\n\tlogger.Println(\"Server is ready to handle requests at\", listenAddr)\n\tatomic.StoreInt32(&healthy, 1)\n\tif err := server.ListenAndServe(); err != nil && err != http.ErrServerClosed {\n\t\tlogger.Fatalf(\"Could not listen on %s: %v\\n\", listenAddr, err)\n\t}\n\t<-done\n\tlogger.Println(\"Server stopped\")\n}", "func (e *Engine) Run(port int64) {\n\thttp.HandleFunc(\"/\", handleRequest)\n\tstartHTTPServer(port)\n}", "func ServerStart(port string) (string, error) {\n\n\t// List of view handlers\n\thandlerStrings = append(handlerStrings, \"/\", \"/blockchain/view/<ID>\", \"/garage/view/<ID>\", \"serviceevent/add/\", \"/vehicle/view/<ID>\")\n\n\thttp.HandleFunc(\"/\", defaultHandler) // Each call to \"/\" will invoke defaultHandler\n\thttp.HandleFunc(\"/blockchain/view/\", blockchainViewHandler)\n\thttp.HandleFunc(\"/garage/view/\", garageViewHandler)\n\thttp.HandleFunc(\"/serviceevent/add/\", writeServiceEventHandler)\n\thttp.HandleFunc(\"/vehicle/view/\", vehicleViewHandler)\n\n\t//log.Fatal(http.ListenAndServe(\"localhost:\"+port, nil))\n\treturn \"Started on: \" + port, http.ListenAndServe(\"localhost:\"+port, nil)\n\n}", "func Start(port string) {\n\tgo startHTTPServer(port)\n}", "func Start(port int32) {\n\tvar (\n\t\taddress string\n\t\t//\tclientFS = http.Dir(\"/webclient\")\n\t\terr error\n\t)\n\taddress = fmt.Sprintf(\":%d\", port)\n\thttp.Handle(\"/client\", http.StripPrefix(\"/client\", http.FileServer(http.Dir(\"./client\"))))\n\tlog.Infof(\"Starting webserver on port %d\", port)\n\terr = http.ListenAndServe(address, nil)\n\tlog.Fatal(err)\n}", "func StartWebServer(port string) {\n\tlogrus.Infof(\"Starting Web Server Port[%v] \\n\", port)\n\n\t// init routes\n\tr := NewRouter()\n\thttp.Handle(\"/\", r)\n\n\terr := http.ListenAndServe(\":\" + port, nil)\n\tif err != nil {\n\t\tlogrus.Printf(\"Error starting server %v\", err.Error())\n\t}\n\n}", "func (s *Rest) Run(port int) {\n\tlog.Printf(\"[INFO] activate rest server on port %d\", port)\n\n\tif len(s.Authenticator.Admins) > 0 {\n\t\tlog.Printf(\"[DEBUG] admins %+v\", s.Authenticator.Admins)\n\t}\n\n\trouter := s.routes()\n\n\ts.lock.Lock()\n\ts.httpServer = &http.Server{\n\t\tAddr: fmt.Sprintf(\":%d\", port),\n\t\tHandler: router,\n\t\tReadHeaderTimeout: 5 * time.Second,\n\t\tWriteTimeout: 5 * time.Second,\n\t\tIdleTimeout: 30 * time.Second,\n\t}\n\ts.lock.Unlock()\n\n\terr := s.httpServer.ListenAndServe()\n\tlog.Printf(\"[WARN] http server terminated, %s\", err)\n}", "func Start(port string) error {\n\tlogger := log.New(os.Stdout, \"\", 0)\n\n\tr := chi.NewRouter()\n\tr.Get(\"/\", hello)\n\tr.Get(\"/healthz/live\", live)\n\tr.Get(\"/healthz/ready\", ready)\n\n\th := &http.Server{Addr: \":\" + port, Handler: r}\n\n\t// Wait for interrupt signal to gracefully shutdown the server with\n\t// a timeout of 10 seconds.\n\tquit := make(chan os.Signal)\n\tsignal.Notify(quit, os.Interrupt, os.Kill, syscall.SIGTERM)\n\tgo func() {\n\t\t<-quit\n\t\tlogger.Println(\"\\nShutting down the server...\")\n\t\tctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)\n\t\tdefer cancel()\n\t\tif err := h.Shutdown(ctx); err != nil {\n\t\t\tlogger.Println(\"Error when stopping the server: \" + err.Error())\n\t\t} else {\n\t\t\tlogger.Println(\"Server gracefully stopped\")\n\t\t}\n\t}()\n\n\tlogger.Printf(\"Listening on http://0.0.0.0:%s\\n\", port)\n\tif err := h.ListenAndServe(); err != nil {\n\t\tlogger.Println(\"Listening returns error: \" + err.Error())\n\t\treturn err\n\t}\n\n\tlogger.Println(\"\\nServer stopped\")\n\treturn nil\n}", "func (cfg *ConfigServer) Start(port int) {\n\tportString := fmt.Sprintf(\":%d\", port)\n\thttp.ListenAndServe(portString, nil)\n}", "func Start() {\n\twebServer.Engine.Run(\":\" + strconv.Itoa(cfg.Read().App.WebServerPort))\n}", "func Start(portNumber int, configFile string) {\n\tconfig := GetConfigFromJSON(configFile)\n\thttp.HandleFunc(\"/\", MainHandler(config))\n\thttp.ListenAndServe(\":\"+strconv.Itoa(portNumber), nil)\n}", "func StartHTTPServer(log *logging.Logger, port int, reportDir string, service Service) {\n\tm := macaron.New()\n\tm.Use(macaron.Logger())\n\tm.Use(macaron.Recovery())\n\tm.Use(macaron.Static(\"\",\n\t\tmacaron.StaticOptions{\n\t\t\tSkipLogging: false,\n\t\t\tFileSystem: bindata.Static(bindata.Options{\n\t\t\t\tAsset: templates.Asset,\n\t\t\t\tAssetDir: templates.AssetDir,\n\t\t\t\tAssetInfo: templates.AssetInfo,\n\t\t\t\tAssetNames: templates.AssetNames,\n\t\t\t\tPrefix: \"\",\n\t\t\t}),\n\t\t},\n\t))\n\tm.Use(macaron.Static(reportDir,\n\t\tmacaron.StaticOptions{\n\t\t\tPrefix: \"reports\",\n\t\t\tSkipLogging: false,\n\t\t},\n\t))\n\tm.Use(macaron.Renderer(macaron.RenderOptions{\n\t\tFuncs: []template.FuncMap{\n\t\t\ttemplate.FuncMap{\n\t\t\t\t\"cssReady\": cssReady,\n\t\t\t\t\"cssTestOK\": cssTestOK,\n\t\t\t\t\"formatTime\": formatTime,\n\t\t\t},\n\t\t},\n\t\tTemplateFileSystem: bindata.Templates(bindata.Options{\n\t\t\tAsset: templates.Asset,\n\t\t\tAssetDir: templates.AssetDir,\n\t\t\tAssetInfo: templates.AssetInfo,\n\t\t\tAssetNames: templates.AssetNames,\n\t\t\tPrefix: \"\",\n\t\t}),\n\t}))\n\tm.Map(log)\n\tm.Map(service)\n\n\tm.Get(\"/\", indexPage)\n\tm.Get(\"/test/:name\", testPage)\n\tm.Get(\"/test/:name/pause\", testPausePage)\n\tm.Get(\"/test/:name/resume\", testResumePage)\n\tm.Get(\"/test/:name/logs\", testLogs)\n\tm.Get(\"/logs/:machine/:mode\", logsPage)\n\tm.Get(\"/chaos\", chaosPage)\n\tm.Get(\"/chaos/pause\", chaosPausePage)\n\tm.Get(\"/chaos/resume\", chaosResumePage)\n\tm.Get(\"/chaos/:id/enable\", chaosActionEnablePage)\n\tm.Get(\"/chaos/:id/disable\", chaosActionDisablePage)\n\n\taddr := fmt.Sprintf(\"0.0.0.0:%d\", port)\n\tlog.Infof(\"HTTP server listening on %s\", addr)\n\tgo func() {\n\t\tif err := http.ListenAndServe(addr, m); err != nil {\n\t\t\tlog.Fatalf(\"Failed to start listener: %#v\", err)\n\t\t}\n\t}()\n}", "func start(port string) {\n\thandlers := map[string]handler{\n\t\t\"/\": func(w http.ResponseWriter, r *http.Request) {\n\t\t\tfmt.Fprint(w, \"healthy\")\n\t\t},\n\t}\n\terr := open(port, handlers)\n\tpanic(err)\n}", "func (w *Webserver) Start() error {\n\n\t// listenAndServe the server\n\tgo func() {\n\t\tw.logger.Infof(\"Http server listening at %d!\", w.config.Port)\n\t\terr := w.listenAndServe()\n\t\tif err != nil && err != http.ErrServerClosed {\n\t\t\tw.logger.Errorw(fmt.Sprintf(\"webserver listening at port [%v] stopped\", w.config.Port), \"error\", err.Error())\n\t\t}\n\t}()\n\n\treturn nil\n}", "func StartWebserver() {\n\tlog.Fatal(http.ListenAndServe(\":8080\", nil))\n}", "func Run(http_handler http.Handler, https_handler http.Handler) {\n\n\tvar server Server\n\tvar port int\n\tvar error error\n\n\tserver.Hostname = os.Getenv(\"HOSTNAME\")\n\tserver.UseHTTP = true\n\tserver.UseHTTPS = false\n\t\n\tport, error = strconv.Atoi(os.Getenv(\"HTTP_PORT\"))\n\tif error != nil {\n\t\tlog.Println(\"Config file does not specify a listener to start\")\n\t}\n\n\tserver.HTTPPort = port\n\tif server.HTTPPort == 0 {\n\t\tserver.HTTPPort = 8000\n\t}\n\n\tport, error = strconv.Atoi(os.Getenv(\"HTTPS_PORT\"))\n\tif error != nil {\n\t\tlog.Println(\"Config file does not specify a listener to start\")\n\t}\n\n\tserver.HTTPSPort = port\n\tif server.HTTPSPort == 0 {\n\t\tserver.HTTPSPort = 8443\n\t}\n\tserver.CertFile = os.Getenv(\"SSL_CERTIFICATE_FILE\")\n\tserver.KeyFile = os.Getenv(\"SSL_KEY_FILE\")\n\n\tif server.UseHTTP && server.UseHTTPS {\n\t\tgo func() {\n\t\t\tstart_HTTPS(https_handler, server)\n\t\t}()\n\n\t\tstart_HTTP(http_handler, server)\n\t} else if server.UseHTTP {\n\t\tstart_HTTP(http_handler, server)\n\t} else if server.UseHTTPS {\n\t\tstart_HTTPS(https_handler, server)\n\t} else {\n\t\tlog.Println(\"Config file does not specify a listener to start\")\n\t}\n}", "func (s *Server) Run() {\n\tlog.Printf(\"[INFO] activate rest server on port %v\", s.Port)\n\tlog.Fatal(http.ListenAndServe(fmt.Sprintf(\"%v:%v\", s.address, s.Port), s.routes()))\n}", "func (s *Rest) Run(httpPort int) {\n\tlog.Printf(\"[INFO] activate rest HTTP server on port %d\", httpPort)\n\n\trouter := s.routes()\n\n\ts.lock.Lock()\n\ts.httpServer = &http.Server{\n\t\tAddr: fmt.Sprintf(\":%d\", httpPort),\n\t\tHandler: router,\n\t\tReadHeaderTimeout: 5 * time.Second,\n\t\tWriteTimeout: 5 * time.Second,\n\t\tIdleTimeout: 30 * time.Second,\n\t}\n\n\ts.lock.Unlock()\n\n\terr := s.httpServer.ListenAndServe()\n\n\tlog.Printf(\"[WARN] http server terminated, %s\", err)\n}", "func startWebserver() {\n\tip := \"localhost:1025\"\n\n\thttp.Handle(\"/static/\", http.StripPrefix(\"/static/\", http.FileServer(http.Dir(\"./static\"))))\n\t//router.GET(\"/\", IndexHandler)\n\thttp.HandleFunc(\"/\", IndexHandler)\n\thttp.HandleFunc(\"/quiz\", QuizHandler)\n\thttp.HandleFunc(\"/result\", ResultHandler)\n\thttp.HandleFunc(\"/squish\", SquishHandler)\n\thttp.HandleFunc(\"/rowCollapse\", RowCollapseHandler)\n\thttp.HandleFunc(\"/scroll\", ScrollHandler)\n\thttp.HandleFunc(\"/click\", ClickHandler)\n\n\tfmt.Println(\"running on \" + ip)\n\tlog.Fatal(http.ListenAndServe(ip, nil))\n}", "func (server *HTTPRouterServer) Start(port int) error {\n\tif server.srv != nil {\n\t\tserver.Stop()\n\t}\n\terr := server.startREST()\n\tif err != nil {\n\t\treturn err\n\t}\n\tserver.srv = &http.Server{Addr: \":\" + strconv.Itoa(port), Handler: server.router}\n\treturn server.srv.ListenAndServe()\n}", "func Start(port int) {\n\tlog.Printf(\"Listening on port %v\", port)\n\n\tportStr := fmt.Sprintf(\":%v\", port)\n\tlog.Fatal(http.ListenAndServe(portStr, router))\n}", "func startServer(port int, url string) {\n\t/*\n\t\tChecks the port and url variables values\n\t*/\n\tif port <= 0 || len(url) == 0 {\n\t\tpanic(\"invalid port or url\")\n\t}\n\n\t/*\n\t\tDefines and prints the variable fullURL\n\t*/\n\tfullURL := fmt.Sprintf(\"%s:%d\", url, port)\n\tfmt.Printf(\"starting server on %s\\n\", fullURL)\n\n\t/*\n\t\tDefines a Router\n\t*/\n\trm := mux.NewRouter()\n\n\t/*\n\t\tinitial splash screen\n\t*/\n\n\t/*\n\t\tDefines routes in the API\n\t*/\n\trm.HandleFunc(\"/\", Root).Methods(\"GET\")\n\trm.HandleFunc(\"/getjson\", GetJSONResponse).Methods(\"GET\")\n\n\t/*\n\t\tStarts the API\n\t\tSet the port\n\t*/\n\thttp.ListenAndServe(fullURL, &MyServer{rm})\n\n}", "func (s *Service) Run(port string) {\n\ts.log.Println(\"Starting...\")\n\tr := s.prepareRouter()\n\n\tif err := http.ListenAndServe(port, r); err != nil {\n\t\tpanic(err)\n\t}\n}", "func Start(r chi.Router, lg *logrus.Logger, cfg *Config) {\n\ts := &http.Server{\n\t\tAddr: cfg.Port,\n\t\tReadTimeout: time.Duration(cfg.ReadTimeoutSeconds) * time.Second,\n\t\tWriteTimeout: time.Duration(cfg.WriteTimeoutSeconds) * time.Second,\n\t\tHandler: r,\n\t}\n\n\tgo func() {\n\t\tlg.Infof(\"service started from port: %v\", cfg.Port)\n\t\tif err := s.ListenAndServe(); err != nil {\n\t\t\tlg.Info(\"Shutting down the server\")\n\t\t}\n\t}()\n\n\t// Wait for interrupt signal to gracefully shutdown the server with\n\t// a timeout of 10 seconds.\n\tquit := make(chan os.Signal)\n\tsignal.Notify(quit, os.Interrupt)\n\t<-quit\n\tlg.Info(\"server stoped\")\n\tctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)\n\tdefer cancel()\n\tif err := s.Shutdown(ctx); err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n}", "func (s *Server) Run(addr string) {\n\tfmt.Println(\"Listening to port 8080\")\n\tlog.Fatal(http.ListenAndServe(addr, s.Router))\n}", "func Start(addr string) {\n\tf = NewServer()\n\thttp.HandleFunc(\"/bayeux\", serveWs)\n\thttp.HandleFunc(\"/\", serveOther)\n\n\t// serve static assets workaround\n\t//http.Handle(\"/file/\", http.StripPrefix(\"/file\", http.FileServer(http.Dir(\"/Users/paul/go/src/github.com/pcrawfor/fayego/runner\"))))\n\n\terr := http.ListenAndServe(addr, nil)\n\tif err != nil {\n\t\tfmt.Println(\"Fatal error \", err.Error())\n\t\tos.Exit(1)\n\t}\n}", "func (web *WebServer) Start() {\n\tlog.Println(http.ListenAndServe(web.listen, web.router))\n}", "func StartWebServer(addr url.URL, readTimeout, writeTimeout int, handler http.Handler) *Server {\n stopc := make(chan struct{})\n srv := &Server{\n addrURL: addr,\n httpServer: &http.Server{\n Addr: addr.Host,\n Handler: handler,\n ReadTimeout: time.Duration(readTimeout) * time.Second,\n WriteTimeout: time.Duration(writeTimeout) * time.Second,\n },\n stopc: stopc,\n donec: make(chan struct{}),\n }\n listener, err := net.Listen(\"tcp\", addr.Host)\n if err != nil {\n Error(err.Error())\n }\n go func() {\n defer func() {\n if err := recover(); err != nil {\n Warn(\n \"shutting down server with err \",\n Field(\"error\", fmt.Sprintf(`(%v)`, err)),\n )\n os.Exit(0)\n }\n close(srv.donec)\n }()\n if err := srv.httpServer.Serve(listener); err != nil && err != http.ErrServerClosed {\n Fatal(\n \"shutting down server with err \",\n Field(\"error\", err),\n )\n }\n }()\n return srv\n}", "func createAndStartServer() {\n\thttp.HandleFunc(\"/\", HomeHandler)\n\thttp.HandleFunc(\"/getShortLink\", onGetShortLink)\n\thttp.HandleFunc(\"/getRedirectLink\", onGetRedirectLink)\n\thttp.HandleFunc(\"/getVisits\", onGetVisits)\n\thttp.HandleFunc(\"/registerNewKey\", onRegisterNewKey)\n\thttp.ListenAndServe(os.Getenv(\"APP_URL\"), nil) // getting env var for port\n}", "func main() {\n\tservice.StartWebServer(\"8081\")\n}", "func (s *Server) Run(port uint16) error {\n s.client = &http.Client{\n CheckRedirect: func(req *http.Request, via []*http.Request) error {\n return http.ErrUseLastResponse\n },\n Timeout: 10 * time.Second,\n }\n addr := fmt.Sprintf(\":%d\", port)\n return http.ListenAndServe(addr, s)\n}", "func (s *Server) Start(port string, wTimeout, rTimeout, idleTimeout time.Duration) error {\n\thttp.Handle(\"/\", applyMiddlewares(http.HandlerFunc(s.Router), noPanicMiddleware(s.Log), corsMiddleware(\"*\")))\n\n\tsrv := http.Server{\n\t\tAddr: fmt.Sprintf(\":%s\", port),\n\t\tWriteTimeout: wTimeout,\n\t\tReadTimeout: rTimeout,\n\t\tIdleTimeout: idleTimeout,\n\t}\n\n\treturn srv.ListenAndServe()\n}", "func startWebserver(input string) {\n ip := \"130.240.170.62:1025\"\n\t//router := httprouter.New()\n\thttp.Handle(\"/static/\", http.StripPrefix(\"/static/\", http.FileServer(http.Dir(\"./static\"))))\n\t//router.GET(\"/\", IndexHandler)\n\thttp.HandleFunc(\"/\", IndexHandler)\n\thttp.HandleFunc(\"/about\", AboutHandler)\n\thttp.HandleFunc(\"/catmagic\", CatMagicHandler)\n http.HandleFunc(\"/toplist/rate\", ToplistRateHandler)\n http.HandleFunc(\"/toplist/comment\", ToplistCommentHandler)\n http.HandleFunc(\"/toplist/favorite\", ToplistFavoriteHandler)\n http.HandleFunc(\"/toplist/latest\", LatestPhotosHandler)\n\t//http.HandleFunc(\"/toplist\", TopListHandler)\n\thttp.HandleFunc(\"/photo/\", PhotoHandler)\n\thttp.HandleFunc(\"/login\", LoginHandler)\n http.HandleFunc(\"/mypage/\", MyPageHandler)\n\n\t//var input int\n\t//fmt.Scan(&input)\n\tif input == \"1\" {\n\t\tfmt.Println(\"running on\", ip)\n\t\tlog.Fatal(http.ListenAndServe(ip, nil))\n\t} else {\n\t\tfmt.Println(\"running on localhost:1025\")\n\t\tlog.Fatal(http.ListenAndServe(\"localhost:1025\", nil))\n\n\t}\n}", "func (s * Service)Start(port string) {\n\tr := chi.NewRouter()\n\n\tr.Use(middleware.Logger)\n\tr.Use(middleware.Timeout(5 * time.Second))\n\n\trpc := rpc.RPC{\n\t\tApp: &app.App{},\n\t}\n\tif len(port) == 0 {\n\t\t// default port 3000\n\t\tport = \"3000\"\n\t}\n\tr.Post(\"/generate_pricing\", rpc.GeneratePricing)\n\tr.Get(\"/generate_pricing\", rpc.GeneratePricingConfig)\n\ts.ListenAndServe(\":\"+port, r)\n}", "func (srv Web) Start() error {\n\tfmt.Printf(\"Starting service on port %s\\n\", srv.Settings.Port)\n\treturn http.ListenAndServe(srv.Settings.Port, srv.Router())\n}", "func (s *server) Run(addr string) error {\n\treturn http.ListenAndServe(addr, s.handler)\n}", "func (s *Server) Run() error {\n\t// start fetcher, reporter and doc generator in goroutines\n\tgo s.fetcher.Run()\n\tgo s.reporter.Run()\n\tgo s.docGenerator.Run()\n\n\t// start webserver\n\tlistenAddress := s.listenAddress\n\tif listenAddress == \"\" {\n\t\tlistenAddress = DefaultAddress\n\t}\n\n\tr := mux.NewRouter()\n\n\t// register ping api\n\tr.HandleFunc(\"/_ping\", pingHandler).Methods(\"GET\")\n\n\t// github webhook API\n\tr.HandleFunc(\"/events\", s.gitHubEventHandler).Methods(\"POST\")\n\n\t// travisCI webhook API\n\tr.HandleFunc(\"/ci_notifications\", s.ciNotificationHandler).Methods(\"POST\")\n\n\tlogrus.Infof(\"start http server on address %s\", listenAddress)\n\treturn http.ListenAndServe(listenAddress, r)\n}", "func Run(params *ContextParams) {\n\tr := createRouter(params)\n\n\tendless.DefaultHammerTime = 10 * time.Second\n\tendless.DefaultReadTimeOut = 295 * time.Second\n\tif err := endless.ListenAndServe(\":8080\", r); err != nil {\n\t\tlog.Infof(\"Server stopped: %s\", err)\n\t}\n}", "func Start(cfg Config) {\n\tr := mux.NewRouter()\n\n\tep := endpoints{\n\t\tVersion: 1,\n\t}\n\n\t// Health Check\n\tr.HandleFunc(\"/api/health/\", ep.GetHealth)\n\n\tsrv := &http.Server{\n\t\tHandler: r,\n\t\tAddr: fmt.Sprintf(\"%s:%d\", cfg.IP, cfg.Port),\n\t\t// Good practice: enforce timeouts for servers you create!\n\t\tWriteTimeout: 15 * time.Second,\n\t\tReadTimeout: 15 * time.Second,\n\t}\n\n\tlog.Fatal(srv.ListenAndServe())\n}", "func WebStartUp() {\n\thttp.HandleFunc(\"/\", servePage)\n\thttp.ListenAndServe(\":8080\", nil)\n}", "func runServer() {\n\t// listen and serve on 0.0.0.0:8080 (for windows \"localhost:8080\")\n\tlog.Fatalln(router.Run(fmt.Sprintf(\":%s\", env.AppPort)))\n}", "func (s *Server) Start() {\n\tlog.Println(\"Web server started at \" + s.configurationService.Address())\n\tlog.Fatal(http.ListenAndServe(s.configurationService.Address(), s.router()))\n}", "func (s *WebServer) Run(addr string) error {\n\tinitHandlers(s)\n\texpvar.Publish(\"Goroutines\", expvar.Func(func() interface{} {\n\t\treturn runtime.NumGoroutine()\n\t}))\n\n\thttp.Handle(\"/prom\", s.hub.Metrics.getHandler())\n\n\tsock, err := net.Listen(\"tcp\", addr)\n\tif err != nil {\n\t\treturn err\n\t}\n\tgo func() {\n\t\tfmt.Println(\"HTTP now available at\", addr)\n\t\tlog.Fatal(http.Serve(sock, nil))\n\t}()\n\treturn nil\n}", "func (server *Server) Start() {\n\trouter := mux.NewRouter()\n\n\trouter.HandleFunc(\"/\", errorHandler(server.root)).Methods(\"GET\")\n\trouter.HandleFunc(\"/stock\", errorHandler(server.listInventorySupplyHandlerFunc)).Methods(\"GET\")\n\trouter.HandleFunc(\"/sentViaAmazon\", errorHandler(server.createFulfillmentHandlerFunc)).Methods(\"POST\")\n\n\thttp.Handle(\"/\", router)\n\n\tfmt.Println(\"listening on\", server.Port)\n\thttp.ListenAndServe(fmt.Sprintf(\":%d\", server.Port), nil)\n}", "func (s *Server) Run(ctx context.Context) error {\n\th := HealthHandler{\n\t\tservices: map[string]Healthier{\n\t\t\t\"ec2\": s.collectors.EC2,\n\t\t\t\"asg\": s.collectors.ASG,\n\t\t\t\"spot\": s.collectors.Spot,\n\t\t\t\"nodes\": s.collectors.Node,\n\t\t\t\"pods\": s.collectors.Pod,\n\n\t\t\t\"mainloop\": s.mainloop,\n\t\t},\n\t}\n\n\trouter := httprouter.New()\n\trouter.GET(\"/\", s.handleStatus)\n\trouter.GET(\"/-/ready\", webutil.HandleHealth)\n\trouter.Handler(\"GET\", \"/-/healthy\", h)\n\trouter.Handler(\"GET\", \"/metrics\", promhttp.Handler())\n\n\treturn webutil.ListenAndServerWithContext(\n\t\tctx, \":8080\", router)\n}", "func RunServer(host string, port int) {\n\thandleWebsocket()\n\thandlePublicFiles()\n\n\tlogger.Println(fmt.Sprintf(\"server started at http://:%d/\", port))\n\tlog.Fatal(http.ListenAndServe(fmt.Sprintf(\":%d\", port), nil))\n}", "func serveSetup(c config.Server) {\n\tservices, modulePaths := module.Amalgamate(freeModules)\n\tserviceHandler := module.Handle(services)\n\n\tlog.Printf(\"Initializing %d modules and %d services\", len(modulePaths), len(services))\n\n\thttp.HandleFunc(\"/ws\", socket.Handle(c.HTTP, serviceHandler))\n\thttp.HandleFunc(\"/\", web.Handle(c.HTTP, modulePaths, nil))\n\n\tport := host.FirstAvailablePort(80, 8000, 3000)\n\tif port == 0 {\n\t\tlog.Fatal(\"Unable to find bindable port\")\n\t}\n\taddr := fmt.Sprintf(\":%d\", port)\n\n\tif !*flagSilent {\n\t\turl := *flagLocalURL\n\t\tgo func() {\n\t\t\ttime.Sleep(time.Second * 2)\n\t\t\tlog.Printf(\"Launching browser for setup or demo\")\n\t\t\thost.Start(\"http://\" + url + addr + \"/setup/\")\n\t\t}()\n\t}\n\n\tlog.Printf(\"Server starting on port %d\", port)\n\tweb.ExitIfError(http.ListenAndServe(addr, nil))\n}", "func RunServer(port int) {}", "func (e *Engine) Run(port string) error {\n\te.server = &http.Server{\n\t\tAddr: port,\n\t\tHandler: e,\n\t}\n\treturn e.server.ListenAndServe()\n}", "func (s *server) Run() error {\n\ts.logger.Info(\"starting http server\", logger.String(\"addr\", s.server.Addr))\n\ts.server.Handler = s.gin\n\t// Open listener.\n\ttrackedListener, err := conntrack.NewTrackedListener(\"tcp\", s.addr, s.r)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn s.server.Serve(trackedListener)\n}", "func (web Web) Run() {\n\tlog.Println(\"Starting webserver\")\n\n\t//Serve static files\n\tfs := http.FileServer(http.Dir(\"static/voipathon\"))\n\thttp.Handle(\"/\", fs)\n\n\tfsTestClient := http.FileServer(http.Dir(\"static/testclient\"))\n\thttp.Handle(\"/testclient/\", http.StripPrefix(\"/testclient/\", fsTestClient))\n\n\thttp.HandleFunc(\"/ws\", web.registerClient)\n\tlog.Println(\"Waiting for connections\")\n\tlog.Fatal(http.ListenAndServe(\":4242\", nil))\n}", "func start_HTTP(handler http.Handler, server Server) {\n\tfmt.Println(time.Now().Format(\"2006-01-02 03:04:05 PM\"), \"Running HTTP \"+get_http_address(server))\n\n\t// Start the HTTP listener\n\tlog.Fatal(http.ListenAndServe(get_http_address(server), handler))\n}", "func Run(h http.Handler) {\n\tsrv := createServer(h)\n\tgo gracefullyShutDownOnSignal(srv, context.Background())\n\tif err := srv.ListenAndServe(); err != http.ErrServerClosed {\n\t\tlog.Fatalf(\"Unable to to start server: %v\", err)\n\t}\n}", "func Run(port string) {\n\n\tr := NewRouter()\n\tr.Use(authorizeMiddleware)\n\tr.Use(loggingMiddleware)\n\tr.Use(jsonBodyParser)\n\thttp.Handle(\"/\", r)\n\tlog.Println(\"Starting HTTP service for admin on PORT: \", port)\n\terr := http.ListenAndServe(\":\"+port, nil)\n\n\tif err != nil {\n\t\tlog.Println(\"Failed to start HTTP service for adminService, PORT: \", port)\n\t\tlog.Println(\"Error: \", err.Error())\n\t}\n}", "func startHTTPListener() {\n\thttp.ListenAndServe(\":\"+GetConfig().Port, nil)\n}", "func (c *Config) Start() error {\n\t//c.mux = http.NewServeMux()\n\ts := newServer(c)\n\ts.indexTemplate = template.Must(template.ParseFiles(c.indexTemplatePath))\n\ts.callTemplate = template.Must(template.New(\"callDetails.html\").Funcs(template.FuncMap{\n\t\t\"displaySendCounts\": func(cd []counts.CommDataT, leadRank int, callID int) string {\n\t\t\tfor _, data := range cd {\n\t\t\t\tif data.LeadRank == leadRank {\n\t\t\t\t\treturn strings.Join(cd[leadRank].CallData[callID].SendData.RawCounts, \"<br />\")\n\t\t\t\t}\n\t\t\t}\n\t\t\treturn \"Call not found\"\n\t\t},\n\t\t\"displayRecvCounts\": func(cd []counts.CommDataT, leadRank int, callID int) string {\n\t\t\tfor _, data := range cd {\n\t\t\t\tif data.LeadRank == leadRank {\n\t\t\t\t\treturn strings.Join(cd[leadRank].CallData[callID].RecvData.RawCounts, \"<br />\")\n\t\t\t\t}\n\t\t\t}\n\t\t\treturn \"Call not found\"\n\t\t},\n\t\t\"displayCallPlot\": func(leadRank int, callID int) string {\n\t\t\treturn fmt.Sprintf(\"profiler_rank%d_call%d.png\", leadRank, callID)\n\t\t}}).ParseFiles(c.callTemplatePath))\n\ts.callsTemplate = template.Must(template.ParseFiles(c.callsTemplatePath))\n\ts.patternsTemplate = template.Must(template.ParseFiles(c.patternsTemplatePath))\n\ts.heatmapTemplate = template.Must(template.New(\"heatmapDetails.html\").Funcs(template.FuncMap{\n\t\t\"displayHeatmap\": func(patternID int) string {\n\t\t\treturn fmt.Sprintf(\"%d_task3.png\", patternID)\n\t\t}}).ParseFiles(c.heatmapTemplatePath))\n\ts.heatmapsTemplate = template.Must(template.ParseFiles(c.heatmapsTemplatePath))\n\ts.stopTemplate = template.Must(template.ParseFiles(c.stopTemplatePath))\n\n\tc.srv = &http.Server{\n\t\tAddr: fmt.Sprintf(\":%d\", c.Port),\n\t\tHandler: s,\n\t}\n\n\tgo func(c *Config) {\n\t\tdefer c.wg.Done()\n\t\tc.srv.ListenAndServe()\n\t\tfmt.Println(\"HTTP server is now terminated\")\n\t}(c)\n\n\treturn nil\n}", "func StartApplicatin() {\n\tmapUrls()\n\trouter.Run(\":8080\")\n}", "func Run() {\n\trouter := getRouter()\n\ts := &http.Server{\n\t\tAddr: \"0.0.0.0:8080\",\n\t\tHandler: router,\n\t\tReadTimeout: 10 * time.Second,\n\t\tWriteTimeout: 10 * time.Second,\n\t\tMaxHeaderBytes: 1 << 20,\n\t}\n\ts.ListenAndServe()\n}", "func (h *Handler) Run() {\n\tlog.Printf(\"Listening on %s\", h.Cfg.Server.Address)\n\tserver := &http.Server{\n\t\tHandler: getRouter(),\n\t\tAddr: h.Cfg.Server.Address,\n\t}\n\th.listenErrCh <- server.ListenAndServe()\n}", "func Run(httpHandlers http.Handler) (s servers) {\n\ts.Host = config.Config.Server.Host\n\ts.Port = config.Config.Server.Port\n\tlog.Info(\"Starting Server On :\", \"address\", s.Host, \"port\", s.Port)\n\tstartServer(s, httpHandlers)\n\treturn\n}", "func (display *WebDisplay) LaunchWebServer() {\n\n\thttp.HandleFunc(\"/\", htmlPageHandler)\n\thttp.HandleFunc(\"/image/\", func(w http.ResponseWriter, r *http.Request) { display.imageHandler(w, r) })\n\n\tlog.Print(\"Server listening on 8080\")\n\tlog.Fatal(http.ListenAndServe(\":8080\", nil))\n}", "func StartServer(port int) {\n\twsHandler := clientWebsocketHandler{upgrader: defaultUpgrader}\n\n\trouter := mux.NewRouter()\n\trouter.Handle(\"/client_ws\", wsHandler)\n\trouter.Handle(\"/d/{downloadId}\", downloadHandler{})\n\n\taddr := fmt.Sprintf(\":%d\", port)\n\thttp.ListenAndServe(addr, router)\n}", "func StartServer(addr *string) {\n\thttp.Handle(\"/metrics\", prometheus.Handler())\n\thttp.HandleFunc(\"/\", func(w http.ResponseWriter, r *http.Request) {\n\t\tw.Write([]byte(`<html>\n <head><title>LXC Exporter</title></head>\n <body>\n <h1>LXC Exporter</h1>\n <p><a href=\"/metrics\">Metrics</a></p>\n </body>\n </html>`))\n\t})\n\n\tif err := http.ListenAndServe(*addr, nil); err != nil {\n\t\tlog.Fatalf(\"Error starting HTTP server: %s\", err)\n\t}\n}", "func StartHTTPServer(healthz http.HandlerFunc, port int, mux *http.ServeMux) error {\n\thttpSrv, err := buildServer(healthz, port, mux)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Unable to prepare server prior to serving: %s\", err.Error())\n\t}\n\tlis, err := buildListener(port)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Unable to prepare net.listener prior to serving: %s\", err.Error())\n\t}\n\treturn httpSrv.Serve(*lis)\n}", "func startHTTP(handlers http.Handler, s Server) {\n\tfmt.Println(time.Now().Format(\"2006-01-02 03:04:05 PM\"), \"Running HTTP \"+httpAddress(s))\n\n\t// Start the HTTP listener\n\tlog.Fatal(http.ListenAndServe(httpAddress(s), handlers))\n}", "func startServer() {\n\t// index file\n\thttp.HandleFunc(\"/\", func(w http.ResponseWriter, r *http.Request) {\n\t\thttp.Redirect(w, r, \"/static/\", http.StatusFound)\n\t}) //设置访问的路由\n\n\t// static file\n\thttp.HandleFunc(\"/static/\", func(w http.ResponseWriter, r *http.Request) {\n\t\thttp.ServeFile(w, r, r.URL.Path[1:])\n\t})\n\n\t// other logic handlers\n\thttp.HandleFunc(\"/rank\", rank)\n\thttp.HandleFunc(\"/top\", top)\n\t//\thttp.HandleFunc(\"/update\", update)\n\n\terr := http.ListenAndServe(\":9090\", nil) //设置监听的端口\n\tif err != nil {\n\t\tlog.Fatal(\"ListenAndServe: \", err)\n\t}\n}", "func (a *App) Run(port string) {\n\tlog.Fatal(http.ListenAndServe(fmt.Sprintf(\":%s\", port), a.Router))\n}", "func StartServer() {\n\thandlePesquisa()\n\n\tlog.Info.Println(\"WebServer started...\")\n\thttp.ListenAndServe(\":8080\", httpLogger.WriteLog(http.DefaultServeMux, os.Stdout))\n}", "func (a *App) Run(httpPort string) {\n\n\tlog.Printf(\"Server running on port %s\\n\", httpPort)\n\n\t// Setup server\n\tserver := &http.Server{\n\t\tHandler: a.Router,\n\t\tAddr: \":\" + httpPort,\n\t\tWriteTimeout: 15 * time.Second,\n\t\tReadTimeout: 15 * time.Second,\n\t}\n\tlog.Fatal(server.ListenAndServe())\n}", "func (a *App) Run(port string) {\n\thttp.Handle(\"/\", a.Router)\n\n\tlog.Printf(\"Listening on %s\", port)\n\n\tlog.Fatal(http.ListenAndServe(fmt.Sprintf(\":%s\", port), nil))\n}", "func Run(port int) {\n\n\trunStartupHooks()\n\n}", "func StartWebServer(pubSub *pubsub.PubSub) {\n\t// setup web server\n\te := echo.New()\n\te.HideBanner = true\n\te.Use(middleware.Logger())\n\n\t// disable CORS on the web server if desired\n\tdisableCORS = viper.GetBool(\"server_settings.disablecors\")\n\tif disableCORS {\n\t\tlogger.Warn(\"Running in disabled CORS mode. This is very dangerous! Be careful!\")\n\t\te.Use(middleware.CORSWithConfig(middleware.CORSConfig{\n\t\t\tAllowOrigins: []string{\"*\"},\n\t\t\tAllowHeaders: []string{echo.HeaderOrigin, echo.HeaderContentType, echo.HeaderAccept},\n\t\t}))\n\t}\n\n\tc, _ := handlers.NewContainer()\n\n\t// GetLogstationName - Get Logstation Name\n\te.GET(\"/settings/logstation-name\", c.GetLogstationName)\n\n\t// GetSettingsSyntax - Get Syntax Colors\n\te.GET(\"/settings/syntax\", c.GetSettingsSyntax)\n\n\t// package up the built web files and serve them to the clients\n\tfsys, err := fs.Sub(webServerFiles, \"web/dist\")\n\tif err != nil {\n\t\tpanic(fmt.Errorf(\"error loading the web files into the server. error msg: %s\", err))\n\t}\n\tfileHandler := http.FileServer(http.FS(fsys))\n\te.GET(\"/*\", echo.WrapHandler(fileHandler))\n\n\t// pass message broker channel into websocket handler\n\twsHandlerChan := func(c echo.Context) error {\n\t\treturn WebSocketHandler(c, pubSub)\n\t}\n\te.GET(\"/ws\", wsHandlerChan)\n\n\t// start the web server\n\te.Logger.Fatal(e.Start(viper.GetString(\"server_settings.webserveraddress\") + \":\" + viper.GetString(\"server_settings.webserverport\")))\n}", "func (a HTTPServer) Start() {\n\tapi := a.router.PathPrefix(\"/v1\").Subrouter()\n\n\tapi.Use(middleware.NewCorrelationID().Execute)\n\n\tapi.Handle(\"/accounts\", a.createAccountHandler()).Methods(http.MethodPost)\n\tapi.Handle(\"/accounts/{account_id}\", a.findAccountByIDHandler()).Methods(http.MethodGet)\n\n\tapi.Handle(\"/transactions\", a.createTransactionHandler()).Methods(http.MethodPost)\n\n\t//api.Handle(\"/cashout\", a.createCashoutHandler()).Methods(http.MethodPost)\n\t//api.Handle(\"/cashin\", a.createTransactionHandler()).Methods(http.MethodPost)\n\t//api.Handle(\"/peer-too-peer\", a.createTransactionHandler()).Methods(http.MethodPost)\n\n\tapi.HandleFunc(\"/health\", healthCheck).Methods(http.MethodGet)\n\n\tserver := &http.Server{\n\t\tReadTimeout: 15 * time.Second,\n\t\tWriteTimeout: 15 * time.Second,\n\t\tAddr: fmt.Sprintf(\":%s\", os.Getenv(\"APP_PORT\")),\n\t\tHandler: a.router,\n\t}\n\n\tstop := make(chan os.Signal, 1)\n\tsignal.Notify(stop, os.Interrupt, syscall.SIGINT, syscall.SIGTERM)\n\n\tgo func() {\n\t\ta.logger.Println(\"Starting HTTP Server in port:\", os.Getenv(\"APP_PORT\"))\n\t\ta.logger.Fatal(server.ListenAndServe())\n\t}()\n\n\t<-stop\n\n\tctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)\n\tdefer func() {\n\t\tcancel()\n\t}()\n\n\tif err := server.Shutdown(ctx); err != nil {\n\t\ta.logger.Fatal(\"Server Shutdown Failed\")\n\t}\n\n\ta.logger.Println(\"Service down\")\n}", "func (s *Server) Start() error {\n\ts.RegisterHTTPHandlers()\n\tlog.Print(fmt.Sprintf(\"Listening HTTP on: %s\", s.url))\n\n\thandler := CORSWrap(s.router)\n\treturn http.ListenAndServe(s.url, handler)\n}", "func (g *RESTFrontend) Run(port int) {\n\tlistener, err := net.Listen(\"tcp\", fmt.Sprintf(\":%d\", port))\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tg.Listener = listener\n\tg.RunWithListener(listener)\n}", "func WebServer(){\n\thttp.HandleFunc(\"/\", handler)\n\thttp.HandleFunc(\"/count\",counter)\n\thttp.HandleFunc(\"/draw\",functions.Draw)\n\tlog.Fatal(http.ListenAndServe(\"localhost:8000\",nil))\n}", "func Start(port int, logLevel LogLevel) {\n\t// setup log level\n\tswitch logLevel {\n\tcase LogLevelError:\n\t\ttracelog.Start(tracelog.LevelError)\n\tcase LogLevelInfo:\n\t\ttracelog.Start(tracelog.LevelInfo)\n\tdefault:\n\t\ttracelog.Start(tracelog.LevelTrace)\n\t}\n\n\t// define routes\n\thttp.HandleFunc(\"/echo\", serveEcho)\n\thttp.HandleFunc(\"/api\", serveAPI)\n\n\t// turn off the legacy support\n\tmsgpackHandle.WriteExt = true\n\tmsgpackHandle.RawToString = true\n\n\t// start the server\n\ttracelog.Info(\"pzconnect\", \"start\", \"starting websocket server on port %d\", port)\n\terr := http.ListenAndServe(fmt.Sprintf(\":%d\", port), nil)\n\tlog.Fatal(\"ListenAndServe: \", err)\n\n\ttracelog.Stop()\n}", "func (s httpServer) Run(h http.Handler) {\n\ts.srv.Handler = h\n\tgo s.srv.ListenAndServe()\n}", "func (s *Server) Run() error {\n\tmux := s.createServeMux()\n\tlog.WithField(\"address\", \"http://\"+s.hostPort).Info(\"Starting\")\n\treturn http.ListenAndServe(s.hostPort, mux)\n}", "func (s *Server) Run() error {\n\tmux := s.createServeMux()\n\tlog.WithField(\"address\", \"http://\"+s.hostPort).Info(\"Starting\")\n\treturn http.ListenAndServe(s.hostPort, mux)\n}", "func (s *server) start(addr string) {\n\t// Set http handlers\n\thttp.HandleFunc(\"/\", s.wsHandler)\n\n\t// Start server\n\tif err := http.ListenAndServe(addr, nil); err != nil {\n\t\ts.log.Fatal(\"error starting http server\", zap.Error(err))\n\t}\n}", "func Start(ctx context.Context) {\n\tmux := &http.ServeMux{}\n\tmux.Handle(healthPath, &defaultHandler)\n\n\tserver := &http.Server{\n\t\tAddr: fmt.Sprintf(\":%d\", healthPort),\n\t\tHandler: mux,\n\t}\n\n\terrCh := make(chan error)\n\n\tgo func() {\n\t\terrCh <- server.ListenAndServe()\n\t}()\n\n\thandleServerError := func(err error) {\n\t\tif err != http.ErrServerClosed {\n\t\t\tlogging.FromContext(ctx).Errorw(\"Error during runtime of health server\", zap.Error(err))\n\t\t}\n\t}\n\n\tselect {\n\tcase <-ctx.Done():\n\t\tctx, cancel := context.WithTimeout(context.Background(), gracefulHandlerShutdown)\n\t\tdefer cancel()\n\n\t\tif err := server.Shutdown(ctx); err != nil {\n\t\t\tlogging.FromContext(ctx).Errorw(\"Error during shutdown of health server\", zap.Error(err))\n\t\t}\n\n\t\thandleServerError(<-errCh)\n\n\tcase err := <-errCh:\n\t\thandleServerError(err)\n\t}\n}", "func (s *Server) Start() error {\n\t// return if already started\n\tif s.listener != nil {\n\t\treturn nil\n\t}\n\n\thttp.HandleFunc(\"/health\", func(w http.ResponseWriter, r *http.Request) {\n\t\tfmt.Fprintln(w, \"OK\")\n\t})\n\n\thttp.Handle(\"/\", registry.New(&registry.Config{\n\t\tIPFSHost: s.ipfsHost,\n\t\tIPFSGateway: s.ipfsGateway,\n\t\tCIDResolvers: s.cidResolvers,\n\t\tCIDStorePath: s.cidStorePath,\n\t}))\n\n\tvar err error\n\ts.listener, err = net.Listen(\"tcp\", s.host)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\ts.Debugf(\"[registry/server] listening on %s\", s.listener.Addr())\n\tif s.tlsKeyPath != \"\" && s.tlsCertPath != \"\" {\n\t\treturn http.ServeTLS(s.listener, nil, s.tlsCertPath, s.tlsKeyPath)\n\t}\n\n\treturn http.Serve(s.listener, nil)\n}", "func (s *SideTwistHandler) StartHandler(restAddress string, configEntry config.HandlerConfigEntry) error {\n\tlistenAddress, err := config.GetHostPortString(configEntry)\n\tif err != nil {\n\t\treturn err\n\t}\n\ts.listenAddress = listenAddress\n\tlogger.Info(\"Starting SideTwist Handler\")\n\n\t// make sure we know the REST API address\n\ts.restAPIaddress = restAddress\n\t\n\t// make sure we can access the HTML template page for responses\n\ttemplateData, err := ioutil.ReadFile(s.templatePath)\n\tif err != nil {\n\t\treturn err\n\t}\n\ts.htmlTemplate = string(templateData)\n\n\t// initialize URL router\n\turlRouter := mux.NewRouter()\n\n\ts.server = &http.Server{\n\t\tAddr: s.listenAddress,\n\t\tWriteTimeout: time.Second * 15,\n\t\tReadTimeout: time.Second * 15,\n\t\tIdleTimeout: time.Second * 60,\n\t\tHandler: urlRouter,\n\t}\n\n\t// bind HTTP routes to their functions\n\turlRouter.HandleFunc(\"/search/{identifier}\", s.handleBeacon).Methods(\"GET\")\n\turlRouter.HandleFunc(\"/search/{identifier}\", s.handleResponse).Methods(\"POST\")\n\turlRouter.HandleFunc(\"/getFile/{filename}\", s.downloadFile).Methods(\"GET\")\n\turlRouter.HandleFunc(\"/logo.png\", fetchLogo).Methods(\"GET\")\n\t\n\t// start handler in goroutine so it doesn't block\n\tgo func() {\n\t\terr := s.server.ListenAndServe()\n\t\tif err != nil && err.Error() != \"http: Server closed\" {\n\t\t\tlogger.Error(err)\n\t\t}\n\t}()\n\treturn nil\n}", "func (h *HTTPApi) Start(router *httprouter.Router) {\n\trouter.POST(\"/v1/join\", h.Join)\n\trouter.POST(\"/v1/leave\", h.Leave)\n\n\t// TODO: options to enable/disable (or scope to just localhost)\n\trouter.GET(\"/v1/debug/pprof/\", wrapHandler(http.HandlerFunc(pprof.Index)))\n\trouter.GET(\"/v1/debug/pprof/cmdline\", wrapHandler(http.HandlerFunc(pprof.Cmdline)))\n\trouter.GET(\"/v1/debug/pprof/profile\", wrapHandler(http.HandlerFunc(pprof.Profile)))\n\trouter.GET(\"/v1/debug/pprof/symbol\", wrapHandler(http.HandlerFunc(pprof.Symbol)))\n\trouter.GET(\"/v1/debug/pprof/trace\", wrapHandler(http.HandlerFunc(pprof.Trace)))\n}", "func (s *Server) Start() error {\n\tif s.HostPortHTTP == \"\" {\n\t\ts.HostPortHTTP = \":\" + common.DefaultServerPortHTTP\n\t}\n\n\ts.eHandler = endtoend.NewHandler(s.AgentHostPort, s.SamplingServerURL)\n\n\tmux := http.NewServeMux()\n\tmux.HandleFunc(\"/\", func(w http.ResponseWriter, r *http.Request) { return }) // health check\n\tmux.HandleFunc(\"/start_trace\", func(w http.ResponseWriter, r *http.Request) {\n\t\ts.handleJSON(w, r, func() interface{} {\n\t\t\treturn tracetest.NewStartTraceRequest()\n\t\t}, func(ctx context.Context, req interface{}) (interface{}, error) {\n\t\t\treturn s.doStartTrace(req.(*tracetest.StartTraceRequest))\n\t\t})\n\t})\n\tmux.HandleFunc(\"/join_trace\", func(w http.ResponseWriter, r *http.Request) {\n\t\ts.handleJSON(w, r, func() interface{} {\n\t\t\treturn tracetest.NewJoinTraceRequest()\n\t\t}, func(ctx context.Context, req interface{}) (interface{}, error) {\n\t\t\treturn s.doJoinTrace(ctx, req.(*tracetest.JoinTraceRequest))\n\t\t})\n\t})\n\tmux.HandleFunc(\"/create_traces\", s.eHandler.GenerateTraces)\n\n\tlistener, err := net.Listen(\"tcp\", s.HostPortHTTP)\n\tif err != nil {\n\t\treturn err\n\t}\n\ts.listener = listener\n\ts.HostPortHTTP = listener.Addr().String()\n\n\tvar started sync.WaitGroup\n\tstarted.Add(1)\n\tgo func() {\n\t\tstarted.Done()\n\t\thttp.Serve(listener, mux)\n\t}()\n\tstarted.Wait()\n\tlog.Printf(\"Started http server at %s\\n\", s.HostPortHTTP)\n\treturn nil\n}", "func (s *Server) Start() {\n\tlog.Println(\"Starting webhook receiver on port 8080...\")\n\terr := http.ListenAndServe(\":8080\", nil)\n\tif err != nil {\n\t\tlog.Fatalf(\"Couldn't start server: %s\", err)\n\t}\n}", "func StartListening() {\n\thttp.HandleFunc(\"/health\", GenerateHandler(\"^/health$\", HealthHandler))\n\thttp.HandleFunc(\"/static/\", GenerateHandler(\"^/(static/(js/|css/|media/)[a-zA-Z0-9._]*)$\", FileHandler))\n\thttp.HandleFunc(\"/audits/\", GenerateHandler(\"^/(static/[a-zA-Z0-9._-]*)$\", FileHandler))\n\thttp.HandleFunc(\"/api/\", GenerateHandler(\"^/api/(get/(all|inventory|host))$\", APIHandler))\n\thttp.HandleFunc(\"/\", GenerateHandler(\"^/(.*)$\", FileHandler))\n\ta := fmt.Sprintf(\"%s:%s\", config.Host, config.Port)\n\tlogger.Infof(\"Start listening \\\"%s\\\"...\", a)\n\tlogger.Fatale(http.ListenAndServe(a, nil), \"Server crashed !\")\n}", "func main() {\n\thttp.HandleFunc(\"/status\", getStatus)\n\thttp.HandleFunc(\"/\", home)\n\thttp.ListenAndServe(\"127.0.0.1:8081\", nil) //ip and port or :8081 is enough\n\n}", "func startHTTPServer(ch chan<- bool) {\n\tserver := http.Server{\n\t\tAddr: \":80\",\n\t}\n\tlog.Println(\"HTTP server started (listening on port 80).\")\n\tlog.Println(\"HTTP server stopped with error:\", server.ListenAndServe())\n\tch <- true\n}", "func (config Config) RunHTTPServer() {\n\t// Set up a channel to listen to for interrupt signals\n\tvar runChan = make(chan os.Signal, 1)\n\n\t// Set up a context to allow for graceful server shutdowns in the event\n\t// of an OS interrupt (defers the cancel just in case)\n\tctx, cancel := context.WithTimeout(\n\t\tcontext.Background(),\n\t\tconfig.PilotLight.Server.Timeout.Server,\n\t)\n\tdefer cancel()\n\n\t// Create install-config.yaml file\n\tPreflightSetup(config)\n\n\t// Define server options\n\tserver := &http.Server{\n\t\tAddr: config.PilotLight.Server.Host + \":\" + config.PilotLight.Server.Port,\n\t\tHandler: NewRouter(config.PilotLight.Server.Path),\n\t\tReadTimeout: config.PilotLight.Server.Timeout.Read * time.Second,\n\t\tWriteTimeout: config.PilotLight.Server.Timeout.Write * time.Second,\n\t\tIdleTimeout: config.PilotLight.Server.Timeout.Idle * time.Second,\n\t}\n\n\t// Only listen on IPV4\n\tl, err := net.Listen(\"tcp4\", config.PilotLight.Server.Host+\":\"+config.PilotLight.Server.Port)\n\tcheck(err)\n\n\t// Handle ctrl+c/ctrl+x interrupt\n\tsignal.Notify(runChan, os.Interrupt, syscall.SIGTSTP)\n\n\t// Alert the user that the server is starting\n\tlog.Printf(\"Server is starting on %s\\n\", server.Addr)\n\n\t// Run the server on a new goroutine\n\tgo func() {\n\t\t//if err := server.ListenAndServe(); err != nil {\n\t\tif err := server.Serve(l); err != nil {\n\t\t\tif err == http.ErrServerClosed {\n\t\t\t\t// Normal interrupt operation, ignore\n\t\t\t} else {\n\t\t\t\tlog.Fatalf(\"Server failed to start due to err: %v\", err)\n\t\t\t}\n\t\t}\n\t}()\n\n\t// Block on this channel listeninf for those previously defined syscalls assign\n\t// to variable so we can let the user know why the server is shutting down\n\tinterrupt := <-runChan\n\n\t// If we get one of the pre-prescribed syscalls, gracefully terminate the server\n\t// while alerting the user\n\tlog.Printf(\"Server is shutting down due to %+v\\n\", interrupt)\n\tif err := server.Shutdown(ctx); err != nil {\n\t\tlog.Fatalf(\"Server was unable to gracefully shutdown due to err: %+v\", err)\n\t}\n}", "func (server *testHTTPServerImpl) Start() {\n\tbinding := fmt.Sprintf(\":%d\", server.GetPort())\n\tsrv := &http.Server{\n\t\tAddr: binding,\n\t\tHandler: server.router,\n\t\tReadHeaderTimeout: 5 * time.Second,\n\t}\n\tgo func() {\n\t\trootFolder, err := GetRootFolder()\n\t\tif err != nil {\n\t\t\tlog.Fatalf(\"Failed to get root folder of project: %s\", err)\n\t\t\treturn\n\t\t}\n\t\tcertFile := fmt.Sprintf(\"%s/testutils/test-server.pem\", rootFolder)\n\t\tkeyFile := fmt.Sprintf(\"%s/testutils/test-server.key\", rootFolder)\n\t\tif err = srv.ListenAndServeTLS(certFile, keyFile); !errors.Is(err, http.ErrServerClosed) {\n\t\t\tlog.Fatalf(\"Failed to start http server using binding %s: %s\", binding, err)\n\t\t}\n\n\t}()\n\tserver.httpServer = srv\n\n\tserver.waitForServerAlive()\n}", "func (s *Server) Start() {\n\tserver := http.Server{\n\t\tAddr: s.Port,\n\t\tHandler: handlers.LoggingHandler(s.Logger, s.Router),\n\t}\n\n\tfmt.Println(\"Running\")\n\tserver.ListenAndServe()\n}", "func main() {\n\n\tfmt.Println(\"Starting Restful services...\")\n\tfmt.Println(\"Using port:8080\")\n\thandleRequests()\n}" ]
[ "0.7877176", "0.76225066", "0.75079435", "0.74484295", "0.74228203", "0.73930115", "0.7374604", "0.737105", "0.7370332", "0.73519176", "0.7309832", "0.7303029", "0.7263866", "0.7234094", "0.7230882", "0.7226243", "0.7224325", "0.71582234", "0.71568817", "0.7156688", "0.71443933", "0.7044315", "0.7038623", "0.70214176", "0.7017917", "0.70120484", "0.69753903", "0.69616014", "0.6917773", "0.6892485", "0.68562096", "0.6850322", "0.68480325", "0.6838174", "0.68151945", "0.6793913", "0.6787188", "0.67832595", "0.6774717", "0.67745185", "0.6747507", "0.6746433", "0.67426056", "0.67387474", "0.673056", "0.6729572", "0.6723364", "0.6713709", "0.6711407", "0.6704399", "0.6704346", "0.6699648", "0.669607", "0.6691097", "0.6674273", "0.66728026", "0.66704035", "0.6670004", "0.66697925", "0.6668802", "0.66657126", "0.66649586", "0.6651958", "0.665033", "0.66422546", "0.66351247", "0.6620667", "0.6613385", "0.6611009", "0.660682", "0.6603583", "0.6585675", "0.6581234", "0.6561008", "0.6557288", "0.6552983", "0.6549444", "0.65467846", "0.6534439", "0.6525303", "0.6520714", "0.65206474", "0.6515228", "0.6515164", "0.65138716", "0.65138716", "0.6510796", "0.65078783", "0.6506983", "0.6505323", "0.6496178", "0.6495222", "0.64932454", "0.649025", "0.6490221", "0.6489123", "0.64843774", "0.64793956", "0.64780563", "0.6477067" ]
0.70987064
21
Shutdown gracefully shuts the webserver down.
func (ws *WebServer) Shutdown() error { err := ws.server.Shutdown(context.Background()) ws.server = nil return err }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (sw *SimpleWebServer) Shutdown(ctx context.Context) error {\n\tif !sw.running {\n\t\treturn fmt.Errorf(\"not started\")\n\t}\n\tsw.running = false\n\treturn sw.Server.Shutdown(ctx)\n}", "func (p *Proxy) Shutdown() {\n\tlog.Info(\"Shutting down server gracefully\")\n\tclose(p.shutdown)\n\tgraceful.Shutdown()\n\tp.gRPCStop()\n}", "func (sc *serverConfig) Shutdown() {\n\tlog.Println(\"shutting down http server...\")\n\tsc.Shutdown()\n}", "func Shutdown() {\n\tlog.Info().Msg(\"Shutting down HTTP server gracefully...\")\n\terr := E.Shutdown(nil)\n\tif err != nil {\n\t\tlog.Error().Msgf(\"Failed to shutdown HTTP server gracefully: %s\", err.Error())\n\t}\n}", "func Shutdown() {\n\tclose(shutdown)\n}", "func (i *Instance) Shutdown() {\n\t// Shutdown all dependencies\n\ti.Service.Shutdown()\n\n\t// Shutdown HTTP server\n\tctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)\n\tdefer cancel()\n\terr := i.httpServer.Shutdown(ctx)\n\tif err != nil {\n\t\tlogrus.WithError(err).Error(\"Failed to shutdown HTTP server gracefully\")\n\t\tos.Exit(1)\n\t}\n\n\tlogrus.Info(\"Shutdown HTTP server...\")\n\tos.Exit(0)\n}", "func (p *Proxy) Shutdown(ctx context.Context) error {\n\tif err := p.server.Shutdown(ctx); err != nil {\n\t\tp.logger.Error(\"Error shutting down HTTP server!\", zap.Error(err))\n\t\treturn err\n\t}\n\n\treturn nil\n}", "func (w *Webserver) Stop() error {\n\tw.logger.Infof(\"gracefully shutting down http server at %d...\", w.config.Port)\n\n\terr := w.Server.Shutdown(context.Background())\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tclose(w.jobs)\n\treturn nil\n}", "func (s *callbackServer) Shutdown() {\n\tif err := s.server.Shutdown(context.Background()); err != nil {\n\t\tlog.Printf(\"HTTP server Shutdown error: %v\", err)\n\t}\n}", "func Shutdown() {\n\tdefaultDaemon.Shutdown()\n}", "func (s *HTTPServer) Shutdown(ctx context.Context) error {\n\tglog.Infof(\"http server shutdown\")\n\treturn s.https.Shutdown(ctx)\n}", "func (s *Server) Shutdown() {\n\tclose(stop)\n}", "func (s *Server) Shutdown() error {\n\terr := s.httpListener.Shutdown(context.Background())\n\treturn err\n}", "func (mng *Manager) Shutdown(ctx context.Context) {\n\tif err := mng.srv.Shutdown(ctx); err != nil {\n\t\tmng.lgr.Warn(\"Error stopping HTTP router \" + err.Error())\n\t}\n}", "func (s *Server) ShutDown(ctx context.Context) error {\n\treturn s.HTTPServer.Shutdown(ctx)\n}", "func (ui *GUI) Shutdown() {\n\tctx, cl := context.WithTimeout(ui.cfg.Ctx, time.Second*5)\n\tdefer cl()\n\tif err := ui.server.Shutdown(ctx); err != nil {\n\t\tlog.Error(err)\n\t}\n}", "func Shutdown() {\n\tlog.Println(\"http server is shutting down...\")\n\n\tctx, cancel := context.WithTimeout(context.Background(), shutdownTimeout)\n\tdefer cancel()\n\n\tif err := server.Shutdown(ctx); err != nil {\n\t\tlog.Fatalf(\"Could not gracefully shutdown http server: %v\\n\", err)\n\t}\n\t// wait for the go routine to clean up\n\twg.Wait()\n}", "func (s *MuxServer) Shutdown(ctx context.Context) {\n\ts.HTTPServer.Shutdown(ctx)\n\ts.GRPCServer.Shutdown(ctx)\n}", "func _shutdown(w http.ResponseWriter, r *http.Request) {\n\tfmt.Println(\"Shutdown!\")\n\t//http.Shutdown(nil)\n}", "func (wk *Worker) Shutdown(_ *struct{}, _ *struct{}) error {\n\tserverless.Debug(\"Worker shutdown %s\\n\", wk.address)\n\tclose(wk.shutdown)\n\twk.l.Close()\n\treturn nil\n}", "func (s *Rest) Shutdown() {\n\tlog.Print(\"[WARN] shutdown rest server\")\n\tctx, cancel := context.WithTimeout(context.Background(), time.Second)\n\tdefer cancel()\n\ts.lock.Lock()\n\tif err := s.httpServer.Shutdown(ctx); err != nil {\n\t\tlog.Printf(\"[DEBUG] rest shutdown error, %s\", err)\n\t}\n\tlog.Print(\"[DEBUG] shutdown rest server completed\")\n\ts.lock.Unlock()\n}", "func (w *Web) Stop() error {\n\tw.L(\"Stopping web server on %s:%s\", w.Address, w.Port)\n\tctx, cancel := context.WithTimeout(context.Background(), nonZeroDuration(w.Timeouts.Shutdown, time.Second*30))\n\tdefer cancel()\n\terr := w.Shutdown(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tw.running = false\n\treturn nil\n}", "func (t *TLSServer) Shutdown(ctx context.Context) error {\n\terrC := make(chan error, 2)\n\tgo func() {\n\t\terrC <- t.httpServer.Shutdown(ctx)\n\t}()\n\tgo func() {\n\t\tt.grpcServer.server.GracefulStop()\n\t\terrC <- nil\n\t}()\n\terrors := []error{}\n\tfor i := 0; i < 2; i++ {\n\t\terrors = append(errors, <-errC)\n\t}\n\treturn trace.NewAggregate(errors...)\n}", "func shutDown(ctx context.Context, logger *log.Logger, srv *http.Server) {\n\tquit := make(chan os.Signal, 1)\n\tsignal.Notify(quit, os.Interrupt)\n\t<-quit\n\n\tlogger.Info(\"msg\", \"Shutting down HTTP/REST gateway server...\")\n\n\tctx, cancel := context.WithTimeout(ctx, 5*time.Second)\n\tdefer cancel()\n\n\tif err := srv.Shutdown(ctx); err != nil {\n\t\tlogger.Error(\"err\", fmt.Sprintf(\"Shutdown HTTP/REST gateway server: %s\", err.Error()))\n\t}\n\n\tlogger.Info(\"msg\", \"Shutdown done HTTP/REST gateway server\")\n}", "func (s *Server) Shutdown() {\n\t// TODO(aditya) shut down workers and socket readers\n\ts.logger.Info(\"Shutting down server gracefully\")\n\tclose(s.shutdown)\n\tif s.FlushOnShutdown {\n\t\tctx, cancel := context.WithTimeout(context.Background(), s.Interval)\n\t\ts.Flush(ctx)\n\t\tcancel()\n\t}\n\tgraceful.Shutdown()\n\tfor _, source := range s.sources {\n\t\tsource.source.Stop()\n\t}\n\n\t// Close the gRPC connection for forwarding\n\tif s.grpcForwardConn != nil {\n\t\ts.grpcForwardConn.Close()\n\t}\n}", "func Shutdown() error {\n\tif server != nil {\n\t\treturn server.Shutdown(context.Background())\n\t}\n\n\tserver = nil\n\treturn nil\n}", "func (c *Client) Shutdown() error {\n\tif _, err := c.httpPost(\"system/shutdown\", \"\"); err != nil {\n\t\treturn maskAny(err)\n\t}\n\treturn nil\n}", "func (a *Application) Shutdown() {\n\t// Publish `OnPreShutdown` event\n\ta.EventStore().sortAndPublishSync(&Event{Name: EventOnPreShutdown})\n\n\tctx, cancel := context.WithTimeout(context.Background(), a.settings.ShutdownGraceTimeout)\n\tdefer cancel()\n\n\ta.Log().Warn(\"aah go server graceful shutdown triggered with timeout of \", a.settings.ShutdownGraceTimeStr)\n\tif err := a.server.Shutdown(ctx); err != nil && err != http.ErrServerClosed {\n\t\ta.Log().Error(err)\n\t}\n\ta.shutdownRedirectServer()\n\ta.Log().Info(\"aah go server shutdown successfully\")\n\n\t// Publish `OnPostShutdown` event\n\ta.EventStore().sortAndPublishSync(&Event{Name: EventOnPostShutdown})\n}", "func (s *WiringServer) Shutdown() {\n\ts.cacheServer.hs.Close()\n}", "func (a *App) Shutdown() {\n\ta.shutdown <- syscall.SIGTERM\n}", "func (dd *DefaultDriver) Shutdown(ctx context.Context) error {\n\treturn dd.Server.Shutdown(ctx)\n}", "func (s *Rest) Shutdown() {\n\tlog.Print(\"[WARN] shutdown rest server\")\n\tctx, cancel := context.WithTimeout(context.Background(), time.Second)\n\n\tdefer cancel()\n\n\ts.lock.Lock()\n\n\tif s.httpServer != nil {\n\t\tif err := s.httpServer.Shutdown(ctx); err != nil {\n\t\t\tlog.Printf(\"[DEBUG] rest shutdown error, %s\", err)\n\t\t}\n\t}\n\n\tlog.Print(\"[DEBUG] shutdown rest server completed\")\n\n\ts.lock.Unlock()\n}", "func (srv *Server) Shutdown(ctx context.Context) error {\n\tif srv.Config.Stats != nil {\n\t\tif err := srv.Config.Stats.Shutdown(ctx); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\tif err := srv.ssh.Shutdown(ctx); err != nil {\n\t\treturn err\n\t}\n\treturn srv.http.Shutdown(ctx)\n}", "func (s *Server) Shutdown() error {\n\ts.api.ServerShutdown()\n\treturn nil\n}", "func (s Server) Shutdown() {\n\ts.logger.Error(s.server.Shutdown(context.Background()))\n}", "func (e *Engine) Shutdown() error {\n\treturn e.server.Shutdown(context.TODO())\n}", "func (app *App) Shutdown() {\n\tlog.Println(\"Releasing server resources...\")\n\n\t// shutdown server\n\terr := app.server.Close()\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\t// close database connection\n\terr = app.database.Close()\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tlog.Println(\"database: Connection closed\")\n}", "func (hmr *receiver) Shutdown(ctx context.Context) error {\n\tclose(hmr.done)\n\treturn hmr.closeScrapers(ctx)\n}", "func (mw *JWTMiddleware) Shutdown(_ context.Context) error {\n\treturn nil\n}", "func (httpAPI *HTTPAPIService) Shutdown(ctx context.Context) error {\n\tlog.Println(\"shutting down HTTP API service...\")\n\tif err := httpAPI.e.Shutdown(ctx); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}", "func (a *API) Shutdown(ctx context.Context) error {\n\tif err := a.Server.Shutdown(ctx); err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}", "func (s *Server) Shutdown() {\n\ts.quit <- syscall.SIGINT\n}", "func (s *Server) Shutdown() {\n\tlog.Println(\"shutting down reflector server...\")\n\ts.grp.StopAndWait()\n\tlog.Println(\"reflector server stopped\")\n}", "func (sc *controller) Shutdown(ctx context.Context) error {\n\tsc.stopScraping()\n\n\t// wait until scraping ticker has terminated\n\tif sc.initialized {\n\t\t<-sc.terminated\n\t}\n\n\tvar errs []error\n\tfor _, scraper := range sc.resourceMetricScrapers {\n\t\tif err := scraper.Shutdown(ctx); err != nil {\n\t\t\terrs = append(errs, err)\n\t\t}\n\t}\n\treturn consumererror.CombineErrors(errs)\n}", "func (s *SrvSession) Shutdown(ctx context.Context) {\n\tif s.srv != nil {\n\t\tif err := s.srv.Shutdown(ctx); err != nil && err != http.ErrServerClosed {\n\t\t\tlog.Warningf(\"Shutdown for [%v] %v\", s.listenAddr, err)\n\t\t}\n\t}\n}", "func (o *OmahaServer) Shutdown(ctx context.Context) {\n\to.server.Shutdown(ctx)\n\tclose(o.shuttingDown)\n}", "func (s *Server) Shutdown(ctx context.Context) {\n\ts.router.Shutdown(ctx)\n}", "func (k *KeKahu) Shutdown() (err error) {\n\tinfo(\"shutting down the kekahu service\")\n\n\t// Shutdown the server\n\tif err = k.server.Shutdown(); err != nil {\n\t\tk.echan <- err\n\t}\n\n\t// Notify the run method we're done\n\t// NOTE: do this last or the cleanup proceedure won't be done.\n\tk.done <- true\n\treturn nil\n}", "func Shutdown() {\n\tstdClient.Close()\n}", "func (sr *sapmReceiver) Shutdown(context.Context) error {\n\tif sr.server == nil {\n\t\treturn nil\n\t}\n\terr := sr.server.Close()\n\tsr.shutdownWG.Wait()\n\treturn err\n}", "func (s *Server) Shutdown() error {\n\ts.server.GracefulStop()\n\treturn nil\n}", "func (s *Server) Shutdown(ctx context.Context) error {\n\twg := &sync.WaitGroup{}\n\tc := make(chan struct{})\n\n\tgo func() {\n\t\tdefer close(c)\n\t\ts.GracefulStop()\n\t\twg.Wait()\n\t}()\n\n\tselect {\n\tcase <-c:\n\t\treturn nil\n\tcase <-ctx.Done():\n\t\treturn ctx.Err()\n\t}\n}", "func (s *Server) Shutdown() {\n\ts.log.Info(\"shutting down server\", zap.Int(\"peers\", s.PeerCount()))\n\ts.transport.Close()\n\ts.discovery.Close()\n\ts.consensus.Shutdown()\n\tfor _, p := range s.getPeers(nil) {\n\t\tp.Disconnect(errServerShutdown)\n\t}\n\ts.bQueue.discard()\n\ts.bSyncQueue.discard()\n\tif s.StateRootCfg.Enabled {\n\t\ts.stateRoot.Shutdown()\n\t}\n\tif s.oracle != nil {\n\t\ts.oracle.Shutdown()\n\t}\n\tif s.notaryModule != nil {\n\t\ts.notaryModule.Stop()\n\t}\n\tif s.chain.P2PSigExtensionsEnabled() {\n\t\ts.notaryRequestPool.StopSubscriptions()\n\t}\n\tclose(s.quit)\n}", "func (s *Server) Shutdown() error {\n\ts.ctxCancel()\n\treturn nil\n}", "func shutdownWebServers(ctx context.Context, signal ...string) {\n\tserverProcessStatus.Set(adminActionShuttingDown)\n\tif len(signal) > 0 {\n\t\tglog.Printf(ctx, \"%d: server shutting down by signal: %s\", gproc.Pid(), signal[0])\n\t\tforceCloseWebServers(ctx)\n\t\tallDoneChan <- struct{}{}\n\t} else {\n\t\tglog.Printf(ctx, \"%d: server shutting down by api\", gproc.Pid())\n\t\tgtimer.SetTimeout(ctx, time.Second, func(ctx context.Context) {\n\t\t\tforceCloseWebServers(ctx)\n\t\t\tallDoneChan <- struct{}{}\n\t\t})\n\t}\n}", "func (a *API) Shutdown() error {\n\treturn a.server.Shutdown(context.Background())\n}", "func (a *App) Shutdown() {\n\ta.Trace(\"lego.shutdown\", \"Gracefully shutting down...\")\n\ta.disco.Leave(a.appCtx)\n\tif !a.Drain() {\n\t\ta.Trace(\"lego.shutdown.abort\", \"Server already draining\")\n\t\treturn\n\t}\n\ta.close()\n}", "func (s *PingServer) Shutdown() {\n\ts.srv.GracefulStop()\n}", "func (s *T) Shutdown() {\n\tif s.srv != nil {\n\t\ts.srv.Close()\n\t}\n}", "func (zr *zipkinReceiver) Shutdown(context.Context) error {\n\tvar err error\n\tif zr.server != nil {\n\t\terr = zr.server.Close()\n\t}\n\tzr.shutdownWG.Wait()\n\treturn err\n}", "func (s *Server) Shutdown() (err error) {\n\tlog.Info().Msg(\"gracefully shutting down\")\n\ts.srv.GracefulStop()\n\tif err = s.trisa.Shutdown(); err != nil {\n\t\tlog.Error().Err(err).Msg(\"could not shutdown trisa server\")\n\t\treturn err\n\t}\n\tlog.Debug().Msg(\"successful shutdown\")\n\treturn nil\n}", "func (d *Driver) Shutdown(ctx context.Context) error {\n\treturn d.Server.Shutdown(ctx)\n}", "func (s *Server) Shutdown(ctx context.Context) error {\n\t// logInfo(\"%v %v Shutdown...\", s.Handler.LogTag(), s.Listener.Addr())\n\tdefer logInfo(\"%v %v Shutdown\", s.Handler.LogTag(), s.Listener.Addr())\n\ts.running = false\n\ts.Listener.Close()\n\tselect {\n\tcase <-s.chStop:\n\tcase <-ctx.Done():\n\t\treturn ErrTimeout\n\t}\n\treturn nil\n}", "func (s *Server) Shutdown() error {\n\treturn s.Provider().Stop()\n}", "func (a *App) Stop() {\n\t// Create a context to attempt a graceful 5 second shutdown.\n\tconst timeout = 5 * time.Second\n\tctx, cancel := context.WithTimeout(context.Background(), timeout)\n\tdefer cancel()\n\n\t// Attempt the graceful shutdown by closing the listener and\n\t// completing all inflight requests.\n\tif err := a.server.Shutdown(ctx); err != nil {\n\t\ta.logger.Printf(\"Could not stop server gracefully: %v\", err)\n\t\ta.logger.Printf(\"Initiating hard shutdown\")\n\t\tif err := a.server.Close(); err != nil {\n\t\t\ta.logger.Printf(\"Could not stop http server: %v\", err)\n\t\t}\n\t}\n}", "func (sc *ServerConn) Shutdown() {\n\tsc.cancel()\n}", "func (w *Worker) Shutdown() {\n\tw.Stopped = true\n\tclose(w.stopChan)\n}", "func (srv *Server) Shutdown(ctx context.Context, wg *sync.WaitGroup) error {\n\tdefer wg.Done()\n\treturn srv.Server.Shutdown(ctx)\n}", "func (s *Server) Shutdown() {\n\ts.candidate.endCampaign()\n\n\t// Shutdown the RPC listener.\n\tif s.rpcListener != nil {\n\t\tlogging.Info(\"core/server: shutting down RPC server at %v\", s.rpcListener.Addr())\n\t\ts.rpcListener.Close()\n\t}\n\n\tclose(s.shutdownChan)\n}", "func Shutdown() {\n\tlm().shutdown()\n}", "func (f *Fastglue) Shutdown(s *fasthttp.Server, shutdownComplete chan error) {\n\tshutdownComplete <- f.Server.Shutdown()\n}", "func (s *server) shutdown(ctx context.Context) error {\n\treturn s.server.Shutdown(ctx)\n}", "func (s *tcpServerBase) Shutdown() {\n\tlog.WithField(\"name\", s.name).Debug(\"shutting down\")\n\tclose(s.quit)\n\t_ = s.listener.Close()\n}", "func (s *Server) Shutdown() {\n\tlog.Println(\"Shutting down\")\n\ts.shutdown()\n}", "func (s *Server) Shutdown(graceful bool) {\n\ts.yorkieServiceCancel()\n\n\tif graceful {\n\t\ts.grpcServer.GracefulStop()\n\t} else {\n\t\ts.grpcServer.Stop()\n\t}\n}", "func (h *Hookbot) Shutdown() {\n\tclose(h.shutdown)\n\th.wg.Wait()\n}", "func Shutdown() {\n\tglobalNotifier.Shutdown()\n}", "func (r *router) Close() {\n\tlog.Info(\"Got OS shutdown signal, shutting down NSE webhook injector gracefully...\")\n\n\tif err := r.server.Shutdown(context.Background()); err != nil {\n\t\tlog.WithError(err).Error(\"Failed to shutting down the webhook server\")\n\t}\n}", "func (s *Server) Shutdown(ctx context.Context) error {\n\treturn s.service.Stop(ctx)\n}", "func (as *AdminServer) Shutdown() error {\n\tctx, cancel := context.WithTimeout(context.Background(), time.Second*10)\n\tdefer cancel()\n\treturn as.server.Shutdown(ctx)\n}", "func (r *RuntimeImpl) Shutdown() {\n\tr.logger.Info(\"\\n\\n * Starting graceful shutdown\")\n\tr.logger.Info(\" * Waiting goroutines stop\")\n\tif r.slackWriter != nil {\n\t\tmessage, attachments := buildSlackShutdownMessage(r.dashboardTitle, false)\n\t\tr.slackWriter.PostNow(message, attachments)\n\t}\n\tr.syncManager.Stop()\n\tif r.impListener != nil {\n\t\tr.impListener.Stop(true)\n\t}\n\tr.appMonitor.Stop()\n\tr.servicesMonitor.Stop()\n\n\tr.logger.Info(\" * Shutdown complete - see you soon!\")\n\tr.blocker <- struct{}{}\n}", "func (r *Raft) Shutdown(ctx context.Context) error {\n\tr.doClose(ErrServerClosed)\n\tselect {\n\tcase <-ctx.Done():\n\t\treturn ctx.Err()\n\tcase <-r.closed:\n\t\treturn nil\n\t}\n}", "func shutdownWebServers() error {\n\t// Do the shutdowns in parallel for efficiency\n\tvar wg sync.WaitGroup\n\tvar shutdownErr error\n\n\tfor _, s := range webServers {\n\t\tserver := s\n\t\tif server != nil {\n\t\t\twg.Add(1)\n\t\t\tgo func() {\n\t\t\t\t// Decrement the counter when the goroutine completes.\n\t\t\t\tdefer wg.Done()\n\n\t\t\t\tctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)\n\t\t\t\tdefer cancel()\n\n\t\t\t\tlog.Println(\"About to shutdown exporter server\")\n\n\t\t\t\tif err := server.Shutdown(ctx); err != nil {\n\t\t\t\t\tshutdownErr = err\n\t\t\t\t}\n\t\t\t\tlog.Println(\"Exporter server has shutdown\")\n\t\t\t}()\n\t\t}\n\t}\n\t// Wait for all shutdowns to complete\n\twg.Wait()\n\n\tif shutdownErr != nil {\n\t\tlog.Println(\"Shutdown error for exporter server\", shutdownErr)\n\t\treturn shutdownErr\n\t}\n\n\t// Now shutdown the main server\n\tctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)\n\tdefer cancel()\n\n\tlog.Println(\"About to shutdown main server\")\n\n\tif err := mainServer.Shutdown(ctx); err != nil {\n\t\tlog.Println(\"Shutdown error for main server\", err)\n\t\t// Ignore Error. We keep getting one but if we ignore things still work...\n\t\tlog.Println(\"Ignoring error and continuing\")\n\t\t// return err\n\t}\n\n\treturn nil\n}", "func (s *testDoQServer) Shutdown() {\n\t_ = s.listener.Close()\n}", "func (s *Server) Shutdown(ctx context.Context) error {\n\treturn s.e.Shutdown(ctx)\n}", "func (a *App) Shutdown(code int) {\n\tos.Exit(code)\n}", "func Shutdown() {\n\t// Currently nothing to do\n}", "func Shutdown() {\n\tlog.Infof(\"Bot shutting down..\")\n\n\tif len(portfolio.Portfolio.Addresses) != 0 {\n\t\tbot.config.Portfolio = portfolio.Portfolio\n\t}\n\n\tlog.Infof(\"Exiting.\")\n\tos.Exit(0)\n}", "func (s *Server) Shutdown(ctx context.Context) error {\n\tif s.closing == nil {\n\t\t// Nothing to do\n\t\treturn nil\n\t}\n\tclose(s.closing)\n\n\t// Stops listening.\n\terr := s.closeListener()\n\tif err != nil {\n\t\treturn err\n\t}\n\tfor {\n\t\tselect {\n\t\tcase <-ctx.Done():\n\t\t\t// Forces closing of all actives connections.\n\t\t\ts.close()\n\t\t\treturn ctx.Err()\n\t\tcase <-s.closed:\n\t\t\treturn nil\n\t\t}\n\t}\n}", "func (s *ServerImpl) Shutdown() {\n\tprocs := FindListeningProcesses(s.startPort, s.endPort)\n\tfor _, proc := range procs {\n\t\tif p, err := os.FindProcess(proc.Pid); err == nil {\n\t\t\tp.Kill()\n\t\t}\n\t}\n\tos.Exit(0)\n}", "func (factory *Factory) Shutdown() {\n\tfactory.shutdownInProgress = true\n\t// If the cleanup flag is present don't do any cleanup\n\tif !factory.options.cleanup {\n\t\treturn\n\t}\n\n\t// Wait 15 seconds before running all shutdown handlers to ensure everything can catch up.\n\ttime.Sleep(15 * time.Second)\n\terr := factory.CleanupChaosMeshExperiments()\n\tif err != nil {\n\t\treturn\n\t}\n\n\tfactory.invariantShutdownHooks.InvokeShutdownHandlers()\n\tfactory.shutdownHooks.InvokeShutdownHandlers()\n}", "func (s *ServerV6) Shutdown() {\n\tselect {\n\tcase s.errs <- nil:\n\tdefault:\n\t}\n}", "func (self *WinLogWatcher) Shutdown() {\n\tclose(self.shutdown)\n\tfor channel, _ := range self.watches {\n\t\tself.RemoveSubscription(channel)\n\t}\n\tCloseEventHandle(uint64(self.renderContext))\n\tclose(self.errChan)\n\tclose(self.eventChan)\n}", "func (gateway *Gateway) Shutdown(ctx context.Context) error {\n\tif gateway.Server == nil {\n\t\treturn nil\n\t}\n\n\tgateway.Server.GracefulStop()\n\treturn nil\n}", "func (this *ReceiverHolder) Shutdown() error {\n\tfmt.Println(\"Shutting down Server...please wait.\")\n\tfmt.Println(\"######################################\")\n\tthis.receiver.Stop()\n\tfmt.Println(\"######################################\")\n\treturn nil\n}", "func (gate *Gate) Shutdown() {\n\tctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)\n\tdefer cancel()\n\n\tif err := gate.srv.Shutdown(ctx); err != nil {\n\t\tpanic(\"Server Shutdown Error : \" + err.Error())\n\t}\n\tfmt.Println(\"Server Exit\")\n}", "func (s *DefaultServer) Shutdown() error {\n\terr := s.cmd.Wait()\n\tif err != nil {\n\t\treturn s.cmd.Process.Kill()\n\t}\n\n\treturn err\n}", "func Shutdown() (ex error) {\n\tif server.cmd != nil {\n\t\tserver.cmd.Process.Signal(os.Interrupt)\n\t\terr := server.cmd.Wait()\n\t\tserver.cmd = nil\n\n\t\tvar eerr *exec.ExitError\n\t\tif errors.As(err, &eerr) && eerr.ExitCode() == 2 {\n\t\t\treturn nil\n\t\t}\n\t\treturn err\n\t}\n\treturn nil\n}", "func (c *LspConn) Shutdown(ctx context.Context) error {\n\treturn c.Call(ctx, \"shutdown\", nil, nil)\n}", "func (s *Server) Shutdown(ctx context.Context) error {\n\treturn s.callbackServer.Shutdown(ctx)\n}" ]
[ "0.7640571", "0.7272516", "0.7171907", "0.7166998", "0.7069365", "0.70634085", "0.69620883", "0.69575524", "0.69446343", "0.6941636", "0.6906725", "0.6897182", "0.6874807", "0.6861194", "0.684571", "0.6753442", "0.6734881", "0.67202586", "0.6720189", "0.6679935", "0.66797376", "0.6674736", "0.66726226", "0.6670953", "0.66545606", "0.66333294", "0.66307473", "0.6620747", "0.6614834", "0.66146874", "0.6604905", "0.65998316", "0.65814805", "0.65690273", "0.65677", "0.6565337", "0.6559486", "0.65545446", "0.6541352", "0.6510134", "0.65024585", "0.6494187", "0.64743865", "0.64686507", "0.6466162", "0.64650774", "0.64637375", "0.6456962", "0.6446893", "0.6434645", "0.6431994", "0.64318955", "0.6423513", "0.64231527", "0.6422527", "0.64131093", "0.6410047", "0.64050555", "0.64014167", "0.63927835", "0.637845", "0.6376504", "0.6369166", "0.6369064", "0.63525724", "0.6352059", "0.6346934", "0.63377047", "0.6336201", "0.6322375", "0.6321385", "0.6321254", "0.63189334", "0.631801", "0.6311464", "0.62881345", "0.62873423", "0.6284722", "0.6283003", "0.6273449", "0.6251829", "0.6251375", "0.6243525", "0.6237993", "0.6236186", "0.6232055", "0.6222863", "0.62203205", "0.6205791", "0.6203686", "0.62005234", "0.61983114", "0.6197578", "0.619712", "0.619657", "0.6192141", "0.61910623", "0.61872166", "0.617093", "0.6164099" ]
0.78245765
0
defaultConfig returns the default configs
func defaultConfig() *config { return &config{ Drop: false, RGBA: rgba{ R: 255, G: 255, B: 255, A: 1, }, Export: export{ Format: "jpeg", Quality: 85, }, } }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func defaultConfig() interface{} {\n\treturn &config{\n\t\tPools: make(pools),\n\t\tConfDirPath: \"/etc/cmk\",\n\t}\n}", "func defaultConfig() *config {\n\treturn &config{}\n}", "func defaultConfig() interface{} {\n\treturn &conf{\n\t\tPools: make(map[string]poolConfig),\n\t\tLabelNode: false,\n\t\tTaintNode: false,\n\t}\n}", "func defaultConfig() Config {\n\treturn Config{\n\t\tConfFileOptions: defaultFileOptions(),\n\t}\n}", "func defaultConfig() *config {\n\treturn &config{\n\t\tOperations: operations{\n\t\t\tResize: resize{\n\t\t\t\tRaw: *resizeDefaults(),\n\t\t\t},\n\t\t\tFlip: flip{\n\t\t\t\tRaw: *flipDefaults(),\n\t\t\t},\n\t\t\tBlur: blur{\n\t\t\t\tRaw: *blurDefaults(),\n\t\t\t},\n\t\t\tRotate: rotate{\n\t\t\t\tRaw: *rotateDefaults(),\n\t\t\t},\n\t\t\tCrop: crop{\n\t\t\t\tRaw: *cropDefaults(),\n\t\t\t},\n\t\t\tLabel: label{\n\t\t\t\tRaw: *labelDefaults(),\n\t\t\t},\n\t\t},\n\t\tExport: export{\n\t\t\tRaw: *exportDefaults(),\n\t\t},\n\t}\n}", "func Default() *Config {\n\treturn &defaultConfig\n}", "func DefaultConfig() Config {\n\treturn Config{\n\t\tBaseConfig: defaultBaseConfig(),\n\t\tP2P: p2pConfig.DefaultConfig(),\n\t\tAPI: apiConfig.DefaultConfig(),\n\t\tCONSENSUS: consensusConfig.DefaultConfig(),\n\t\tHARE: hareConfig.DefaultConfig(),\n\t\tTIME: timeConfig.DefaultConfig(),\n\t}\n}", "func configDefault(config ...Config) Config {\n\t// Return default config if nothing provided\n\tif len(config) < 1 {\n\t\treturn ConfigDefault\n\t}\n\n\t// Override default config\n\tcfg := config[0]\n\n\t// Set default values\n\tif cfg.Host == \"\" {\n\t\tcfg.Host = ConfigDefault.Host\n\t}\n\tif cfg.Port <= 0 {\n\t\tcfg.Port = ConfigDefault.Port\n\t}\n\tif cfg.Database == \"\" {\n\t\tcfg.Database = ConfigDefault.Database\n\t}\n\tif cfg.Table == \"\" {\n\t\tcfg.Table = ConfigDefault.Table\n\t}\n\tif int(cfg.GCInterval.Seconds()) <= 0 {\n\t\tcfg.GCInterval = ConfigDefault.GCInterval\n\t}\n\treturn cfg\n}", "func configDefault(config ...Config) Config {\n\t// Return default config if nothing provided\n\tif len(config) < 1 {\n\t\treturn ConfigDefault\n\t}\n\n\t// Override default config\n\tcfg := config[0]\n\n\tif cfg.EnableStackTrace && cfg.StackTraceHandler == nil {\n\t\tcfg.StackTraceHandler = defaultStackTraceHandler\n\t}\n\n\treturn cfg\n}", "func DefaultConfig() *Config {\n\treturn &Config{\n\t\tAppName: pkg.Name(),\n\t\tLogPath: \"/tmp/log\",\n\t\tFlowRules: make([]*flow.Rule, 0),\n\t}\n}", "func defaultConfig() *ApplicationConfig {\n\tconf := &ApplicationConfig{\n\t\tLogLevel: \"info\",\n\t\tHideArchived: true,\n\t\tDefaultMetadata: []string{\"Author\", \"Published At\", \"Language\", \"Ipfs\", \"Class\", \"Title\"},\n\t\tAutoComplete: true,\n\t\tAutoCompleteMaxResults: 20,\n\t\tEnableFullTextSearch: true,\n\t\tColors: defaultColors(),\n\t\tShortcuts: defaultShortcuts(),\n\t}\n\treturn conf\n}", "func Default() *AAAConfig {\n\treturn defaultStbConfig\n}", "func (t *xfconfTable) getDefaultConfig() (map[string]map[string]interface{}, error) {\n\tresults := make(map[string]map[string]interface{}, 0)\n\n\tdefaultDirs := getDefaultXfconfDirs()\n\tfor _, dir := range defaultDirs {\n\t\tdefaultConfig, err := t.getConfigFromDirectory(dir)\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"error getting config from default directory %s: %w\", dir, err)\n\t\t}\n\t\tmaps.Copy(results, defaultConfig)\n\t}\n\n\treturn results, nil\n}", "func DefaultConfig() Config {\n\treturn Config{\n\t\t// Dependencies.\n\t\tK8sClient: nil,\n\t\tLogger: nil,\n\t}\n}", "func DefaultConfig() *Config {\n\treturn &Config{\n\t\tNats: \"nats://192.168.168.195:4222\",\n\t\tKafka: \"\",\n\t\tQscNames: DefaultQscConfig(),\n\t}\n}", "func (f *factory) DefaultConfig() interface{} {\n\treturn f.newDefaultCfg()\n}", "func DefaultConfig() Config {\n\treturn Config{\n\t\t// Dependencies.\n\t\tResource: nil,\n\n\t\t// Settings.\n\t\tName: \"\",\n\t}\n}", "func DefaultConfig() *Config {\n\treturn &Config{\n\t\tPort: 5000,\n\t\tHapHome: \"/HOME/hapadm\",\n\t\tClusterID: \"default-name\",\n\t\tSudo: true,\n\t}\n}", "func DefaultConfig() Config {\n\treturn Config{\n\t\t// Dependencies.\n\t\tK8sClient: nil,\n\t\tLogger: nil,\n\t\tVaultClient: nil,\n\t}\n}", "func GetDefaultConfig() *Config {\n\tconfig := Config{\n\t\tFormatting: \"MarkDown\",\n\t\tLogLevel: \"Info\",\n\t\tPath: \"descriptions.yaml\",\n\t\tWhiteList: \"whitelist.json\",\n\t}\n\treturn &config\n}", "func DefaultConfig() Config {\n\tnewConfig := Config{\n\t\t// Dependencies.\n\t\tFactoryCollection: factory.MustNewCollection(),\n\t\tLog: log.New(log.DefaultConfig()),\n\t\tStorageCollection: storage.MustNewCollection(),\n\t}\n\n\treturn newConfig\n}", "func defaultConfig() *config {\n\treturn &config{\n\t\tPermission: 0777,\n\t}\n}", "func DefaultConfig() (*Config, format.PropKeyResolver) {\n\tconfig := &Config{}\n\tpkr := format.NewPropKeyResolver(config)\n\t_ = pkr.SetDefaultProps(config)\n\treturn config, pkr\n}", "func DefaultConfig() *Config {\n\treturn &Config{\n\t\tSFCRenderer: defaultSFCRenderer,\n\t}\n}", "func DefaultConfig() *config.Config {\n\treturn &config.Config{\n\t\tDebug: config.Debug{\n\t\t\tAddr: \"127.0.0.1:9174\",\n\t\t},\n\t\tService: config.Service{\n\t\t\tName: \"notifications\",\n\t\t},\n\t\tNotifications: config.Notifications{\n\t\t\tSMTP: config.SMTP{\n\t\t\t\tHost: \"127.0.0.1\",\n\t\t\t\tPort: \"1025\",\n\t\t\t\tSender: \"noreply@example.com\",\n\t\t\t},\n\t\t\tEvents: config.Events{\n\t\t\t\tEndpoint: \"127.0.0.1:9233\",\n\t\t\t\tCluster: \"ocis-cluster\",\n\t\t\t\tConsumerGroup: \"notifications\",\n\t\t\t},\n\t\t\tRevaGateway: \"127.0.0.1:9142\",\n\t\t},\n\t}\n}", "func DefaultConfig() *Config {\n\treturn &Config{\n\t\tZapConfig: zap.NewProductionConfig(),\n\t}\n}", "func createDefaultConfig() component.Config {\n\treturn &Config{}\n}", "func createDefaultConfig() component.Config {\n\treturn &Config{}\n}", "func configDefault(config ...Config) Config {\n\t// Return default config if nothing provided\n\tif len(config) < 1 {\n\t\treturn ConfigDefault\n\t}\n\n\t// Override default config\n\tcfg := config[0]\n\n\t// Set default values\n\n\tif cfg.Next == nil {\n\t\tcfg.Next = ConfigDefault.Next\n\t}\n\n\tif cfg.Lifetime.Nanoseconds() == 0 {\n\t\tcfg.Lifetime = ConfigDefault.Lifetime\n\t}\n\n\tif cfg.KeyHeader == \"\" {\n\t\tcfg.KeyHeader = ConfigDefault.KeyHeader\n\t}\n\tif cfg.KeyHeaderValidate == nil {\n\t\tcfg.KeyHeaderValidate = ConfigDefault.KeyHeaderValidate\n\t}\n\n\tif cfg.KeepResponseHeaders != nil && len(cfg.KeepResponseHeaders) == 0 {\n\t\tcfg.KeepResponseHeaders = ConfigDefault.KeepResponseHeaders\n\t}\n\n\tif cfg.Lock == nil {\n\t\tcfg.Lock = NewMemoryLock()\n\t}\n\n\tif cfg.Storage == nil {\n\t\tcfg.Storage = memory.New(memory.Config{\n\t\t\tGCInterval: cfg.Lifetime / 2, // Half the lifetime interval\n\t\t})\n\t}\n\n\treturn cfg\n}", "func DefaultConfig() Config {\n\treturn Config{\n\t\t// Dependencies.\n\t\tBackOff: nil,\n\t\tFramework: nil,\n\t\tInformer: nil,\n\t\tLogger: nil,\n\t\tTPR: nil,\n\t}\n}", "func NewDefaultConfig() *Config {\n\treturn &Config{\n\t\tArtifactsDir: DefaultKataArtifactsDir,\n\t}\n}", "func DefaultConfig() Config {\n\treturn Config{\n\t\tBlockSize: 64,\n\t\tNumReplicas: 2,\n\t\tNumTapestry: 2,\n\t\tZkAddr: \"localhost:2181\",\n\t}\n}", "func DefaultConfig() *Config {\n\treturn &Config{\n\t\tRedisURI: \"redis://127.0.0.1:6379\",\n\t\tGCP: &GCPConfig{\n\t\t\tProjectID: \"\",\n\t\t\tServiceAccountFile: \"\",\n\t\t},\n\t}\n}", "func DefaultConfig() Configuration {\n\tvar cfg Configuration\n\tSetDefaults(&cfg)\n\treturn cfg\n}", "func DefaultConfig() *Config { return &Config{BaseConfig{MinFees: defaultMinimumFees}} }", "func DefaultConfig() *Config {\n\tc := &Config{}\n\tif _, err := toml.Decode(defaultConfig, c); err != nil {\n\t\tpanic(err)\n\t}\n\tif err := c.Validate(); err != nil {\n\t\tpanic(err)\n\t}\n\treturn c\n}", "func DefaultConfig() Config {\n\treturn Config{\n\t\t// Dependencies.\n\t\tFileSystem: nil,\n\t\tK8sClient: nil,\n\t\tLogger: nil,\n\n\t\t// Settings.\n\t\tFlag: nil,\n\t\tViper: nil,\n\t}\n}", "func DefaultConfig() Config {\n\tnewConfig := Config{\n\t\t// Dependencies.\n\t\tFactoryCollection: factory.MustNewCollection(),\n\t\tLog: log.New(log.DefaultConfig()),\n\t\tStorageCollection: storage.MustNewCollection(),\n\n\t\t// Settings.\n\t\tMaxSignals: 5,\n\t}\n\n\treturn newConfig\n}", "func DefaultConfig() Config {\n\treturn MemoryConstrainedDefaults()\n}", "func DefaultConfig() *Config {\n\tconfig := new(Config)\n\tconfig.URL = MktmpioURL\n\treturn config\n}", "func DefaultConfig() *Config {\n\treturn &Config{\n\t\tinjectString: defaultInject,\n\t\tconfigString: defaultConfig,\n\t\tappProfile: newAppProfile(),\n\t\tactivateES: false,\n\t}\n}", "func DefaultConfig() Config {\n\treturn Config{\n\t\tPUCT: 1.0,\n\t}\n}", "func DefaultConfig() Config {\n\treturn Config{\n\t\tPort: 3010,\n\t\tEnv: \"dev\",\n\t\tDatabase: DefaultPostgresConfig(),\n\t}\n}", "func DefaultConfig() Config {\n\treturn Config{\n\t\t// Dependencies.\n\t\tConfigurers: nil,\n\t\tFileSystem: afero.NewMemMapFs(),\n\t\tLogger: nil,\n\n\t\t// Settings.\n\t\tHelmBinaryPath: \"\",\n\t\tOrganisation: \"\",\n\t\tPassword: \"\",\n\t\tRegistry: \"\",\n\t\tUsername: \"\",\n\t}\n}", "func DefaultConfig() Config {\n\tvar config Config\n\tconfig.DB.DSN = DefaultDSN\n\treturn config\n}", "func defaultConfigs() lint.Configs {\n\treturn lint.Configs{}\n}", "func createDefaultConfig() component.Config {\n\treturn &Config{\n\t\tScraperControllerSettings: scraperhelper.ScraperControllerSettings{\n\t\t\tCollectionInterval: defaultCollectionInterval,\n\t\t\tTimeout: defaultTimeout,\n\t\t},\n\t\tEndpoint: defaultEndpoint,\n\t\tVersion: defaultVersion,\n\t\tCommunity: defaultCommunity,\n\t\tSecurityLevel: defaultSecurityLevel,\n\t\tAuthType: defaultAuthType,\n\t\tPrivacyType: defaultPrivacyType,\n\t}\n}", "func DefaultConfig() *Config {\n\treturn &Config{\n\t\tAddress: \"127.0.0.1\",\n\t\tPort: 5700,\n\t\tSyslogFacility: \"SYSLOG\",\n\t\tLogLevel: \"INFO\",\n\t\tConsulDatacenter: \"dc1\",\n\t\tConsulPort: 8500,\n\t\tParameters: make(map[string]string),\n\t\tDeclarations: essentials.NewDeclarationsConfig(),\n\t}\n}", "func DefaultConfig() Config {\n\treturn Config{\n\t\t// Dependencies.\n\t\tLogger: nil,\n\n\t\t// Settings.\n\t\tBridgeName: \"\",\n\t}\n}", "func DefaultConfig() *Config {\n\treturn &Config{\n\t\tLogLevel: \"debug\",\n\t\tLogFormat: \"text\",\n\n\t\tDatabaseDriver: \"boltdb\",\n\t\tDatabasePath: \"db/eremetic.db\",\n\n\t\tName: \"Eremetic\",\n\t\tUser: \"root\",\n\t\tCheckpoint: true,\n\t\tFailoverTimeout: 2592000.0,\n\t\tQueueSize: 100,\n\t}\n}", "func Default() *Config {\n\treturn &Config{\n\t\tEnv: &Env{Region: region, Zone: zone, DeployEnv: deployEnv, Host: host},\n\t\tDiscovery: &naming.Config{Region: region, Zone: zone, Env: deployEnv, Host: host},\n\t}\n}", "func ConfigDefault() Config {\n\tc := Config{\n\t\tCache: true,\n\t\tCacheRefresh: 3,\n\t\tCachePath: \"./.hpy\",\n\t\tMotd: true,\n\t\tMotdPath: \"/tmp/hpy.json\",\n\t\tLogging: true,\n\t\tLoggingPath: \"/var/log/hpy.log\",\n\t\tIgnoreLogging: false,\n\t\tDebug: false,\n\t}\n\treturn c\n}", "func DefaultConfig() *Config {\n\treturn &Config{\n\t\tLager: lager.NewLogLager(nil),\n\t\tPool: new(gob.Pool),\n\t}\n}", "func DefaultConfig() *Config {\n\tcfg := &Config{\n\t\tuserHost: \"sms.yunpian.com\",\n\t\tsignHost: \"sms.yunpian.com\",\n\t\ttplHost: \"sms.yunpian.com\",\n\t\tsmsHost: \"sms.yunpian.com\",\n\t\tvoiceHost: \"voice.yunpian.com\",\n\t\tflowHost: \"flow.yunpian.com\",\n\t}\n\treturn cfg.WithUseSSL(true).WithHTTPClient(defaultHTTPClient())\n}", "func DefaultConfig() Config {\n\treturn Config{\n\t\tHeartbeat: defaultHeartbeat,\n\t\tLocale: defaultLocale,\n\t\tDefaultLoggerLevel: zerolog.ErrorLevel,\n\t}\n}", "func DefaultConfig() *Config {\n\treturn &Config{\n\t\tShowOurLogs: true,\n\t\tDeliverLogs: defaultDeliver,\n\t\tBackoffCap: defaultBackoffCap,\n\t\tBackoffGranularity: defaultBackoffGranularity,\n\t\tTickInterval: defaultTickInterval,\n\t}\n}", "func DefaultConfig() Config {\n\treturn Config{make(map[string]string), DefaultBasicAuthRealm, 0, nil}\n}", "func DefaultConfig() Config {\n\treturn Config{\n\t\t// Dependencies.\n\t\tLogger: nil,\n\n\t\t// Settings.\n\t\tFlag: nil,\n\t\tViper: nil,\n\n\t\tDescription: \"\",\n\t\tGitCommit: \"\",\n\t\tName: \"\",\n\t\tSource: \"\",\n\t}\n}", "func DefaultConfig() *Config {\n\treturn &Config{\n\t\tBaseConfig: DefaultBaseConfig(),\n\t\tEth: DefaultEthConfig(),\n\t\tHuron: DefaultHuronConfig(),\n\t\tRaft: DefaultRaftConfig(),\n\t}\n}", "func DefaultConfiguration() Configuration {\n\treturn Configuration{\n\t\t\tDescribeCaller: true,\n\t}\n}", "func DefaultConfig() *Config {\n\treturn &Config{\n\t\tLogLevel: \"debug\",\n\t\tLogFormat: \"text\",\n\n\t\tDatabase: DatabaseConfig{\n\t\t\tHost: \"127.0.0.1\",\n\t\t\tPort: 3306,\n\t\t\tName: \"fusion\",\n\t\t\tUser: \"fusion\",\n\t\t\tPassword: \"password\",\n\t\t},\n\t}\n}", "func DefaultConfig() *Config {\n\treturn &Config{\n\t\tApiAddress: \"http://api.bosh-lite.com\",\n\t\tUsername: \"admin\",\n\t\tPassword: \"admin\",\n\t\tToken: \"\",\n\t\tSkipSslValidation: false,\n\t\tHttpClient: http.DefaultClient,\n\t\tUserAgent: \"SM-CF-client/1.0\",\n\t}\n}", "func DefaultConfig(storageType string, factories config.Factories) *configmodels.Config {\n\texporters := createExporters(storageType, factories)\n\ttypes := []string{}\n\tfor _, v := range exporters {\n\t\ttypes = append(types, v.Type())\n\t}\n\treturn &configmodels.Config{\n\t\tReceivers: createReceivers(factories),\n\t\tExporters: exporters,\n\t\tProcessors: createProcessors(factories),\n\t\tService: configmodels.Service{\n\t\t\tPipelines: map[string]*configmodels.Pipeline{\n\t\t\t\t\"traces\": {\n\t\t\t\t\tInputType: configmodels.TracesDataType,\n\t\t\t\t\tReceivers: []string{\"jaeger\"},\n\t\t\t\t\tExporters: types,\n\t\t\t\t\tProcessors: []string{\"batch\"},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n}", "func DefaultConfig(hostname string) *Config {\n\treturn &Config{\n\t\thostname,\n\t\t8, // 8 vnodes\n\t\tsha1.New, // SHA1\n\t\ttime.Duration(5 * time.Second),\n\t\ttime.Duration(15 * time.Second),\n\t\t8, // 8 successors\n\t\tnil, // No delegate\n\t\t160, // 160bit hash function\n\t\t\"\",\n\t}\n}", "func DefaultBuzzerConfig(dummy bool) ([]BuzzerConfig, error) {\n\tbuzzerDefined := []BuzzerConfig{}\n\n\tbuzzerType := \"ActiveBuzzer\"\n\tif dummy {\n\t\tbuzzerType = \"DummyBuzzer\"\n\t}\n\n\tname := \"Main Buzzer\"\n\tbuzzerDefined = append(buzzerDefined, BuzzerConfig{\n\t\tName: name,\n\t\tType: buzzerType,\n\t\tProperties: []PropertyConfig{\n\t\t\t{Name: \"Name\", Type: \"string\", Hidden: false, Value: name, Comment: \"Buzzer Name\", Choice: \"\"},\n\t\t},\n\t})\n\n\treturn buzzerDefined, nil\n}", "func DefaultConfig() Config {\n\treturn Config{\n\t\t// Dependencies.\n\t\tValueModifiers: nil,\n\n\t\t// Settings.\n\t\tIgnoreFields: nil,\n\t\tSelectFields: nil,\n\t}\n}", "func configDefault(config ...Config) Config {\n\t// Return default config if nothing provided\n\tif len(config) < 1 {\n\t\treturn ConfigDefault\n\t}\n\n\t// Override default config\n\tcfg := config[0]\n\n\t// Set default values\n\tif cfg.Next == nil {\n\t\tcfg.Next = ConfigDefault.Next\n\t}\n\tif cfg.Authorizer == nil {\n\t\tcfg.Authorizer = func(userRole string) bool {\n\t\t\treturn cfg.Role == userRole\n\t\t}\n\t}\n\tif cfg.Unauthorized == nil {\n\t\tcfg.Unauthorized = func(c *fiber.Ctx) error {\n\t\t\tc.Set(fiber.HeaderWWWAuthenticate, \"Role realm=\"+cfg.Realm)\n\t\t\treturn c.SendStatus(fiber.StatusUnauthorized)\n\t\t}\n\t}\n\tif cfg.Role == \"\" {\n\t\tcfg.Role = ConfigDefault.Role\n\t}\n\tif cfg.UserCtxName == \"\" {\n\t\tcfg.UserCtxName = ConfigDefault.UserCtxName\n\t}\n\treturn cfg\n}", "func NewDefaultConfig() Config {\n\treturn Config{\n\t\tName: \"avo\",\n\t\tPkg: pkg(),\n\t}\n}", "func DefaultConfig() Config {\n\treturn Config{\n\t\tBlockchainInfo: types.DefaultBlockchainInfo(),\n\n\t\tAPIPassword: \"\",\n\n\t\tAPIaddr: \"localhost:23110\",\n\t\tRPCaddr: \":23112\",\n\t\tAllowAPIBind: false,\n\n\t\tNoBootstrap: false,\n\t\tRequiredUserAgent: RivineUserAgent,\n\t\tAuthenticateAPI: false,\n\n\t\tProfile: false,\n\t\tProfileDir: \"profiles\",\n\t\tRootPersistentDir: \"\",\n\t\tVerboseLogging: false,\n\n\t\tBootstrapPeers: nil,\n\n\t\tDebugConsensusDB: \"\",\n\t}\n}", "func DefaultConfig() Config {\n\treturn Config{\n\t\tDir: DefaultConfDir,\n\t\tTimeout: xtime.Duration(\"1s\"),\n\t\tEnable: false,\n\t\tMysql: ConfDataSourceMysql{\n\t\t\tEnable: false,\n\t\t\tDsn: \"127.0.0.1:6379\",\n\t\t},\n\t\tEtcd: ConfDataSourceEtcd{\n\t\t\tEnable: false,\n\t\t\tSecure: false,\n\t\t\tEndPoints: []string{\"127.0.0.1:2379\"},\n\t\t},\n\t}\n}", "func DefaultConfig() *Config {\n\treturn &Config{\n\t\tBaseConfig: BaseConfig{\n\t\t\tMinGasPrices: defaultMinGasPrices,\n\t\t\tQueryGasLimit: 0,\n\t\t\tInterBlockCache: true,\n\t\t\tPruning: pruningtypes.PruningOptionDefault,\n\t\t\tPruningKeepRecent: \"0\",\n\t\t\tPruningInterval: \"0\",\n\t\t\tMinRetainBlocks: 0,\n\t\t\tIndexEvents: make([]string, 0),\n\t\t\tIAVLCacheSize: 781250,\n\t\t\tIAVLDisableFastNode: false,\n\t\t\tAppDBBackend: \"\",\n\t\t},\n\t\tTelemetry: telemetry.Config{\n\t\t\tEnabled: false,\n\t\t\tGlobalLabels: [][]string{},\n\t\t},\n\t\tAPI: APIConfig{\n\t\t\tEnable: false,\n\t\t\tSwagger: false,\n\t\t\tAddress: DefaultAPIAddress,\n\t\t\tMaxOpenConnections: 1000,\n\t\t\tRPCReadTimeout: 10,\n\t\t\tRPCMaxBodyBytes: 1000000,\n\t\t},\n\t\tGRPC: GRPCConfig{\n\t\t\tEnable: true,\n\t\t\tAddress: DefaultGRPCAddress,\n\t\t\tMaxRecvMsgSize: DefaultGRPCMaxRecvMsgSize,\n\t\t\tMaxSendMsgSize: DefaultGRPCMaxSendMsgSize,\n\t\t},\n\t\tGRPCWeb: GRPCWebConfig{\n\t\t\tEnable: true,\n\t\t},\n\t\tStateSync: StateSyncConfig{\n\t\t\tSnapshotInterval: 0,\n\t\t\tSnapshotKeepRecent: 2,\n\t\t},\n\t\tStreaming: StreamingConfig{\n\t\t\tABCI: ABCIListenerConfig{\n\t\t\t\tKeys: []string{},\n\t\t\t\tStopNodeOnErr: true,\n\t\t\t},\n\t\t},\n\t\tMempool: MempoolConfig{\n\t\t\tMaxTxs: 5_000,\n\t\t},\n\t}\n}", "func DefaultConfig() *Config {\n\treturn &Config{\n\t\tDatabase: \"payments\",\n\t\tCollection: \"payments\",\n\t\tExpiryTimeInMinutes: \"90\",\n\t\tRefundBatchSize: 20,\n\t}\n}", "func DefaultConfig() *Config {\n\treturn &Config{\n\t\tLogLevel: \"INFO\",\n\t\tBindAddr: \"127.0.0.1\",\n\t\tPorts: &Ports{\n\t\t\tHTTP: 4646,\n\t\t\tRPC: 4647,\n\t\t\tSerf: 4648,\n\t\t},\n\t\tAddresses: &Addresses{},\n\t\tServer: &ServerConfig{\n\t\t\tEnabled: false,\n\t\t},\n\t}\n}", "func configDefault(config ...Config) Config {\n\t// Return default config if nothing provided\n\tif len(config) < 1 {\n\t\treturn ConfigDefault\n\t}\n\n\t// Override default config\n\tcfg := config[0]\n\n\t// Set default values\n\tif int(cfg.Duration.Seconds()) > 0 {\n\t\tlog.Warn(\"[LIMITER] Duration is deprecated, please use Expiration\")\n\t\tcfg.Expiration = cfg.Duration\n\t}\n\tif cfg.Key != nil {\n\t\tlog.Warn(\"[LIMITER] Key is deprecated, please us KeyGenerator\")\n\t\tcfg.KeyGenerator = cfg.Key\n\t}\n\tif cfg.Store != nil {\n\t\tlog.Warn(\"[LIMITER] Store is deprecated, please use Storage\")\n\t\tcfg.Storage = cfg.Store\n\t}\n\tif cfg.Next == nil {\n\t\tcfg.Next = ConfigDefault.Next\n\t}\n\tif cfg.Max <= 0 {\n\t\tcfg.Max = ConfigDefault.Max\n\t}\n\tif int(cfg.Expiration.Seconds()) <= 0 {\n\t\tcfg.Expiration = ConfigDefault.Expiration\n\t}\n\tif cfg.KeyGenerator == nil {\n\t\tcfg.KeyGenerator = ConfigDefault.KeyGenerator\n\t}\n\tif cfg.LimitReached == nil {\n\t\tcfg.LimitReached = ConfigDefault.LimitReached\n\t}\n\tif cfg.LimiterMiddleware == nil {\n\t\tcfg.LimiterMiddleware = ConfigDefault.LimiterMiddleware\n\t}\n\treturn cfg\n}", "func NewDefault() *Config {\n\tvv := defaultConfig\n\treturn &vv\n}", "func NewDefault() *Config {\n\tvv := defaultConfig\n\treturn &vv\n}", "func Default() *Config {\n\tconf := &Config{\n\t\tProtocol: \"tcp\",\n\t\tAddr: \"0.0.0.0:25565\",\n\t\tHosts: []HostConfig{\n\t\t\t{\n\t\t\t\tName: \"Server-1\",\n\t\t\t\tAddr: \"localhost:25580\",\n\t\t\t},\n\t\t},\n\t\tLogConfig: LogConfig{\n\t\t\tLogConnections: true,\n\t\t\tLogDisconnect: false,\n\t\t},\n\t\tHealthCheckTime: 5,\n\t\tUDPTimeout: 3000,\n\t\tSaveConfigOnClose: false,\n\t\tInterfaces: []string{},\n\t}\n\tconf.fillFlags()\n\treturn conf\n}", "func DefaultConfig() Config {\n\treturn Config{\n\t\t// Dependencies.\n\t\tHTTPClient: nil,\n\t\tLogger: nil,\n\n\t\t// Settings.\n\t\tFlag: nil,\n\t\tViper: nil,\n\t}\n}", "func DefaultConfig() *Config {\n\tconfig := &Config{\n\t\tOwnerProcName: \"\",\n\t\tOwnerReleaseInterval: 1 * time.Second,\n\t\tOwnerReleaseTimeout: 5 * time.Minute,\n\t\tSourcePattern: \"/tmp/rotate/source\",\n\t\tTempStorage: \"/tmp/rotate/tmp\",\n\t\tArchiveStorage: \"/tmp/rotate/archive\",\n\t\tFinalizeCommands: []string{},\n\t}\n\treturn config\n}", "func DefaultConfig() Config {\n\treturn Config{\n\t\tBuffered: true,\n\t\tDepth: 10,\n\t}\n}", "func loadDefault() (*Config, error) {\n\tcfg := Config{}\n\n\t// App\n\tcfg.App.ServerPort = 8080\n\tcfg.App.LogLevel = LogLevel.Debug\n\n\t// Providers\n\t// Provider 1\n\tamazon := ProviderConfig{Name: \"amazon\"}\n\t// Provider 2\n\tsendgrid := ProviderConfig{Name: \"sendgrid\"}\n\n\t// Mail\n\tcfg.Mailer.Providers[0] = amazon\n\tcfg.Mailer.Providers[1] = sendgrid\n\n\treturn &cfg, nil\n}", "func NewDefaultConfig() *Config {\n\treturn &Config{\n\t\tIgnoreNamespaces: []string{\"kube-system\", \"kube-public\"},\n\t}\n}", "func DefaultConfig() *Config {\n\treturn &Config{\n\t\tSMTPBindAddr: \"0.0.0.0:1025\",\n\t\tHTTPBindAddr: \"0.0.0.0:8025\",\n\t\tHostname: \"mailhog.example\",\n\t\tMongoURI: \"127.0.0.1:27017\",\n\t\tMongoDatabase: \"mailhog\",\n\t\tPostgresURI: \"postgres://127.0.0.1:5432/mailhog\",\n\t\tMongoColl: \"messages\",\n\t\tStorageType: \"memory\",\n\t\tMessageChan: make(chan *data.Message),\n\t\tOutgoingSMTP: make(map[string]*OutgoingSMTP),\n\t}\n}", "func DefaultConfig() *AppConfig {\r\n\treturn &AppConfig{\r\n\t\tEngine: EngineSDL,\r\n\t\tGraphics: gfx.DefaultConfig(),\r\n\t}\r\n}", "func configDefault(config ...Config) Config {\n\t// Return default config if nothing provided\n\tif len(config) < 1 {\n\t\treturn ConfigDefault\n\t}\n\n\t// Override default config\n\tcfg := config[0]\n\n\t// Set default values\n\tif cfg.XSSProtection == \"\" {\n\t\tcfg.XSSProtection = ConfigDefault.XSSProtection\n\t}\n\n\tif cfg.ContentTypeNosniff == \"\" {\n\t\tcfg.ContentTypeNosniff = ConfigDefault.ContentTypeNosniff\n\t}\n\n\tif cfg.XFrameOptions == \"\" {\n\t\tcfg.XFrameOptions = ConfigDefault.XFrameOptions\n\t}\n\n\tif cfg.ReferrerPolicy == \"\" {\n\t\tcfg.ReferrerPolicy = ConfigDefault.ReferrerPolicy\n\t}\n\n\tif cfg.CrossOriginEmbedderPolicy == \"\" {\n\t\tcfg.CrossOriginEmbedderPolicy = ConfigDefault.CrossOriginEmbedderPolicy\n\t}\n\n\tif cfg.CrossOriginOpenerPolicy == \"\" {\n\t\tcfg.CrossOriginOpenerPolicy = ConfigDefault.CrossOriginOpenerPolicy\n\t}\n\n\tif cfg.CrossOriginResourcePolicy == \"\" {\n\t\tcfg.CrossOriginResourcePolicy = ConfigDefault.CrossOriginResourcePolicy\n\t}\n\n\tif cfg.OriginAgentCluster == \"\" {\n\t\tcfg.OriginAgentCluster = ConfigDefault.OriginAgentCluster\n\t}\n\n\tif cfg.XDNSPrefetchControl == \"\" {\n\t\tcfg.XDNSPrefetchControl = ConfigDefault.XDNSPrefetchControl\n\t}\n\n\tif cfg.XDownloadOptions == \"\" {\n\t\tcfg.XDownloadOptions = ConfigDefault.XDownloadOptions\n\t}\n\n\tif cfg.XPermittedCrossDomain == \"\" {\n\t\tcfg.XPermittedCrossDomain = ConfigDefault.XPermittedCrossDomain\n\t}\n\n\treturn cfg\n}", "func DefaultConfig() *Config {\n\treturn &Config{\n\t\tContractQueryGasLimit: DefaultContractQueryGasLimit,\n\t\tContractDebugMode: DefaultContractDebugMode,\n\t\tWriteVMMemoryCacheSize: DefaultWriteVMMemoryCacheSize,\n\t\tReadVMMemoryCacheSize: DefaultReadVMMemoryCacheSize,\n\t\tNumReadVMs: DefaultNumReadVM,\n\t}\n}", "func defaultBaseConfig() BaseConfig {\n\treturn BaseConfig{\n\t\tHomeDir: defaultHomeDir,\n\t\tDataDir: defaultDataDir,\n\t\tConfigFile: defaultConfigFileName,\n\t\tLogDir: defaultLogDir,\n\t\tAccountDir: defaultAccountDir,\n\t\tTestMode: defaultTestMode,\n\t\tCollectMetrics: false,\n\t\tMetricsPort: 1010,\n\t\tOracleServer: \"http://localhost:3030\",\n\t\tOracleServerWorldId: 0,\n\t\tGenesisTime: time.Now().Format(time.RFC3339),\n\t\tLayerDurationSec: 10,\n\t}\n}", "func DefaultConfig() Config {\n\treturn Config{\n\t\tAddr: DefaultAddr,\n\t\tDirPath: DefaultDirPath,\n\t\tBlockSize: DefaultBlockSize,\n\t\tRwMethod: storage.FileIO,\n\t\tIdxMode: KeyValueMemMode,\n\t\tMaxKeySize: DefaultMaxKeySize,\n\t\tMaxValueSize: DefaultMaxValueSize,\n\t\tSync: false,\n\t\tReclaimThreshold: DefaultReclaimThreshold,\n\t\tSingleReclaimThreshold: DefaultSingleReclaimThreshold,\n\t}\n}", "func NewDefaultConfig() *Config {\n\treturn &Config{\n\t\tDefaultNamespace: \"default\",\n\t\tFileName: \"stdin\",\n\t\tTargetKubernetesVersion: \"master\",\n\t}\n}", "func DefaultConfig() Config {\n\treturn Config{\n\t\tNetwork: DefaultNetwork,\n\t\tRPCListen: \"localhost:11010\",\n\t\tRESTListen: \"localhost:8081\",\n\t\tServer: &loopServerConfig{\n\t\t\tNoTLS: false,\n\t\t},\n\t\tLoopDir: LoopDirBase,\n\t\tConfigFile: defaultConfigFile,\n\t\tDataDir: LoopDirBase,\n\t\tDatabaseBackend: DatabaseBackendSqlite,\n\t\tSqlite: &loopdb.SqliteConfig{\n\t\t\tDatabaseFileName: defaultSqliteDatabasePath,\n\t\t},\n\t\tLogDir: defaultLogDir,\n\t\tMaxLogFiles: defaultMaxLogFiles,\n\t\tMaxLogFileSize: defaultMaxLogFileSize,\n\t\tDebugLevel: defaultLogLevel,\n\t\tTLSCertPath: DefaultTLSCertPath,\n\t\tTLSKeyPath: DefaultTLSKeyPath,\n\t\tTLSValidity: DefaultAutogenValidity,\n\t\tMacaroonPath: DefaultMacaroonPath,\n\t\tMaxLSATCost: lsat.DefaultMaxCostSats,\n\t\tMaxLSATFee: lsat.DefaultMaxRoutingFeeSats,\n\t\tLoopOutMaxParts: defaultLoopOutMaxParts,\n\t\tTotalPaymentTimeout: defaultTotalPaymentTimeout,\n\t\tMaxPaymentRetries: defaultMaxPaymentRetries,\n\t\tEnableExperimental: false,\n\t\tLnd: &lndConfig{\n\t\t\tHost: \"localhost:10009\",\n\t\t\tMacaroonPath: DefaultLndMacaroonPath,\n\t\t},\n\t}\n}", "func getDefaultConfiguration(t *testing.T) *configuration.Registry {\n\tconfig, err := configuration.New(\"\")\n\trequire.NoError(t, err)\n\treturn config\n}", "func DefaultDevConfig() *Config {\n\tcfg := &Config{\n\t\tuserHost: \"test-api.yunpian.com\",\n\t\tsignHost: \"test-api.yunpian.com\",\n\t\ttplHost: \"test-api.yunpian.com\",\n\t\tsmsHost: \"test-api.yunpian.com\",\n\t\tvoiceHost: \"test-api.yunpian.com\",\n\t\tflowHost: \"test-api.yunpian.com\",\n\t}\n\treturn cfg.WithUseSSL(true).WithHTTPClient(defaultHTTPClient())\n}", "func DefaultConfig() Config {\n\tencoding := cosmoscmd.MakeEncodingConfig(app.ModuleBasics)\n\n\treturn Config{\n\t\tCodec: encoding.Marshaler,\n\t\tTxConfig: encoding.TxConfig,\n\t\tLegacyAmino: encoding.Amino,\n\t\tInterfaceRegistry: encoding.InterfaceRegistry,\n\t\tAccountRetriever: dclauthtypes.AccountRetriever{},\n\t\tAppConstructor: func(val Validator) servertypes.Application {\n\t\t\treturn app.New(\n\t\t\t\tval.Ctx.Logger, tmdb.NewMemDB(), nil, true, map[int64]bool{}, val.Ctx.Config.RootDir, 0,\n\t\t\t\tencoding,\n\t\t\t\tsimapp.EmptyAppOptions{},\n\t\t\t\tbaseapp.SetPruning(storetypes.NewPruningOptionsFromString(val.AppConfig.Pruning)),\n\t\t\t\tbaseapp.SetMinGasPrices(val.AppConfig.MinGasPrices),\n\t\t\t)\n\t\t},\n\t\tGenesisState: app.ModuleBasics.DefaultGenesis(encoding.Marshaler),\n\t\tTimeoutCommit: 2 * time.Second,\n\t\tChainID: \"chain-\" + tmrand.NewRand().Str(6),\n\t\tNumValidators: 1,\n\t\tBondDenom: sdk.DefaultBondDenom,\n\t\tMinGasPrices: fmt.Sprintf(\"0.000006%s\", sdk.DefaultBondDenom),\n\t\tAccountTokens: sdk.TokensFromConsensusPower(1000, sdk.DefaultPowerReduction),\n\t\tStakingTokens: sdk.TokensFromConsensusPower(500, sdk.DefaultPowerReduction),\n\t\tBondedTokens: sdk.TokensFromConsensusPower(100, sdk.DefaultPowerReduction),\n\t\tPruningStrategy: storetypes.PruningOptionNothing,\n\t\tCleanupDir: true,\n\t\tSigningAlgo: string(hd.Secp256k1Type),\n\t\tKeyringOptions: []keyring.Option{},\n\t}\n}", "func Default() *Config {\n\trootDir := \"/\"\n\tswitch runtime.GOOS {\n\tcase \"windows\":\n\t\trootDir = `C:\\`\n\t}\n\treturn &Config{\n\t\tFile: File{\n\t\t\tProxyNetwork: \"unix\",\n\t\t\tProxyAddress: filepath.Join(client.Namespace(), \"acme-lsp.rpc\"),\n\t\t\tAcmeNetwork: \"unix\",\n\t\t\tAcmeAddress: filepath.Join(client.Namespace(), \"acme\"),\n\t\t\tWorkspaceDirectories: nil,\n\t\t\tRootDirectory: rootDir,\n\t\t\tFormatOnPut: true,\n\t\t\tCodeActionsOnPut: []protocol.CodeActionKind{\n\t\t\t\tprotocol.SourceOrganizeImports,\n\t\t\t},\n\t\t\tServers: nil,\n\t\t\tFilenameHandlers: nil,\n\t\t},\n\t}\n}", "func DefaultConfig() *Config {\n\treturn &Config{\n\t\tGlobal: GlobalConfig{\n\t\t\tTempDir: \"/tmp/apigee-istio\",\n\t\t\tKeepAliveMaxConnectionAge: 10 * time.Minute,\n\t\t\tAPIAddress: \":5000\",\n\t\t\tMetricsAddress: \":5001\",\n\t\t},\n\t\tTenant: TenantConfig{\n\t\t\tClientTimeout: 30 * time.Second,\n\t\t\tInternalJWTDuration: 10 * time.Minute,\n\t\t\tInternalJWTRefresh: 30 * time.Second,\n\t\t},\n\t\tProducts: ProductsConfig{\n\t\t\tRefreshRate: 2 * time.Minute,\n\t\t},\n\t\tAnalytics: AnalyticsConfig{\n\t\t\tFileLimit: 1024,\n\t\t\tSendChannelSize: 10,\n\t\t\tCollectionInterval: 2 * time.Minute,\n\t\t},\n\t\tAuth: AuthConfig{\n\t\t\tAPIKeyCacheDuration: 30 * time.Minute,\n\t\t\tAPIKeyHeader: \"x-api-key\",\n\t\t\tTargetHeader: \":authority\",\n\t\t\tRejectUnauthorized: false,\n\t\t\tJWTProviderKey: \"apigee\",\n\t\t},\n\t}\n}", "func DefaultConfig() Config {\n\treturn Config{\n\t\tObservability: DefaultObservability(),\n\t\tWorkerHasher: DefaultHasher,\n\t\tWorkerCount: 1,\n\t\tMaxItemRetries: 10,\n\t\tWorkerQueueSize: 2000,\n\t\tLeaderElectionEnabled: true,\n\t\tDelayResolution: time.Millisecond * 250,\n\t\tDelayQueueSize: 1000,\n\t\tMaxReconcileTime: time.Second * 10,\n\t}\n}", "func NewDefaultConfig() *Config {\n\tconf := &Config{\n\t\tUnicastConfig: NewDefaultUnicastConfig(),\n\t\tExtensionConfig: NewDefaultExtensionConfig(),\n\t}\n\treturn conf\n}", "func DefaultConfConfig() cmd.ConfConfig {\n\treturn cmd.ConfConfig{\n\t\tDir: \".textile\",\n\t\tName: \"config\",\n\t\tType: \"yaml\",\n\t\tEnvPrefix: \"BUCK\",\n\t}\n}", "func createDefaultConfig() component.Config {\n\treturn &Config{\n\t\tProtocols: Protocols{\n\t\t\tGRPC: &configgrpc.GRPCServerSettings{\n\t\t\t\tNetAddr: confignet.NetAddr{\n\t\t\t\t\tEndpoint: defaultGRPCEndpoint,\n\t\t\t\t\tTransport: \"tcp\",\n\t\t\t\t},\n\t\t\t\t// We almost write 0 bytes, so no need to tune WriteBufferSize.\n\t\t\t\tReadBufferSize: 512 * 1024,\n\t\t\t},\n\t\t\tHTTP: &HTTPConfig{\n\t\t\t\tHTTPServerSettings: &confighttp.HTTPServerSettings{\n\t\t\t\t\tEndpoint: defaultHTTPEndpoint,\n\t\t\t\t},\n\t\t\t\tTracesURLPath: defaultTracesURLPath,\n\t\t\t\tMetricsURLPath: defaultMetricsURLPath,\n\t\t\t\tLogsURLPath: defaultLogsURLPath,\n\t\t\t},\n\t\t},\n\t}\n}", "func defaultConfig() *config.Config {\n\treturn &config.Config{\n\t\tTargetAnnotation: \"bio.terra.testing/snapshot-policy\",\n\t\tGoogleProject: \"fake-project\",\n\t\tRegion: \"us-central1\",\n\t}\n}" ]
[ "0.8458187", "0.8257558", "0.8144465", "0.79126495", "0.7902076", "0.7703992", "0.7674732", "0.76744676", "0.7656039", "0.75664085", "0.7556187", "0.75450176", "0.75397223", "0.7538148", "0.7524036", "0.7485726", "0.74851745", "0.74809283", "0.74773145", "0.74746037", "0.7470708", "0.7470188", "0.74662757", "0.7460732", "0.74480164", "0.7436953", "0.74221426", "0.74221426", "0.7401036", "0.7384656", "0.73744893", "0.73699725", "0.7366931", "0.7365971", "0.73632663", "0.73306197", "0.7321086", "0.73190254", "0.7313521", "0.7311758", "0.7294846", "0.7286542", "0.7280138", "0.72770584", "0.7276869", "0.7273896", "0.7271725", "0.72574425", "0.7256212", "0.7255557", "0.7253383", "0.72435015", "0.7241475", "0.72397655", "0.72365165", "0.7221908", "0.7215687", "0.7210101", "0.7209821", "0.72083616", "0.72009575", "0.71871114", "0.7176066", "0.717183", "0.71650296", "0.715773", "0.71390855", "0.71313953", "0.71261626", "0.7125163", "0.71250135", "0.7120181", "0.7118398", "0.7113262", "0.71122605", "0.71122605", "0.7109051", "0.7100818", "0.7097853", "0.7097532", "0.70974874", "0.7085428", "0.7069273", "0.7046708", "0.70455897", "0.7042559", "0.7036823", "0.70319307", "0.70138556", "0.7008446", "0.7005578", "0.69928426", "0.69922453", "0.6991373", "0.69862306", "0.6985139", "0.6983285", "0.6983201", "0.69806147", "0.69780767" ]
0.7397149
29
WithHTTPTimeout returns an HTTPCheckerOption that specifies the timeout for HTTP requests. Setting a timeout is highly recommended, but it needs to be carefully chosen to avoid false results.
func WithHTTPTimeout(timeout time.Duration) HTTPCheckerOption { return func(c *HTTPChecker) { c.timeout = timeout } }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func HTTPTimeout(duration time.Duration) HTTPOption {\n\treturn func(c *HTTPCollector) { c.client.Timeout = duration }\n}", "func WithTimeout(t time.Duration) Option {\n\treturn func(c *Client) { c.httpClient.Timeout = t }\n}", "func OptTLSHandshakeTimeout(d time.Duration) Option {\n\treturn func(r *Request) error {\n\t\tif r.Client == nil {\n\t\t\tr.Client = &http.Client{}\n\t\t}\n\t\tif r.Client.Transport == nil {\n\t\t\tr.Client.Transport = &http.Transport{}\n\t\t}\n\t\tif typed, ok := r.Client.Transport.(*http.Transport); ok {\n\t\t\ttyped.TLSHandshakeTimeout = d\n\t\t}\n\t\treturn nil\n\t}\n}", "func WithTimeout(duration time.Duration) Option {\n\treturn wrappedOption{otlpconfig.WithTimeout(duration)}\n}", "func WithTimeout(duration time.Duration) Option {\n\treturn wrappedOption{oconf.WithTimeout(duration)}\n}", "func WithHTTPServerTimeout(t time.Duration) Option {\n\treturn func(s *Server) {\n\t\ts.HTTPServerTimeout = t\n\t}\n}", "func OptTimeout(d time.Duration) Option {\n\treturn func(r *Request) error {\n\t\tif r.Client == nil {\n\t\t\tr.Client = &http.Client{}\n\t\t}\n\t\tr.Client.Timeout = d\n\t\treturn nil\n\t}\n}", "func WithTimeout(timeout time.Duration) Option {\n\treturn func(o *options) {\n\t\to.timeout = timeout\n\t}\n}", "func WithTimeout(timeout time.Duration) Option {\n\treturn func(o *options) {\n\t\to.timeout = timeout\n\t}\n}", "func WithTimeout(t time.Duration) OptFunc {\n\treturn func(d *Downloader) {\n\t\td.timeout = t\n\t}\n}", "func OptionHTTPTimeouts(read, write, idle time.Duration, disableKeepAlive bool) Option {\n\treturn func(cfg *gwconfig) {\n\t\tcfg.httpReadTimeout = read\n\t\tcfg.httpWriteTimeout = write\n\t\tcfg.httpIdleTimeout = idle\n\t\tcfg.httpDisableKeepAlive = disableKeepAlive\n\t}\n}", "func WithTimeout(timeout time.Duration) Option {\n\treturn func(opts *Options) {\n\t\topts.Timeout = timeout\n\t}\n}", "func Timeout(t time.Duration) ClientOpt {\n\treturn func(c *Client) {\n\t\tc.HTTPClient.Timeout = t\n\t}\n}", "func Timeout(t time.Duration) RequestOption {\n\treturn func(o *RequestOptions) {\n\t\to.Timeout = t\n\t}\n}", "func WithTimeout(timeout time.Duration) Option {\n\treturn func(opts *Opts) error {\n\t\topts.Timeout = timeout\n\t\treturn nil\n\t}\n}", "func Timeout(timeout time.Duration) Option {\n\treturn func(client *http.Client) {\n\t\tclient.Timeout = timeout\n\t}\n}", "func Timeout(timeout time.Duration) Option {\n\treturn func(client *http.Client) {\n\t\tclient.Timeout = timeout\n\t}\n}", "func TimeoutOption(d time.Duration) Option {\n\treturn func(w *Webman) {\n\t\tw.timeout = d\n\t}\n}", "func Timeout(d time.Duration) ConfigOpt {\n\treturn func(c *Config) {\n\t\tc.transport.ResponseHeaderTimeout = d\n\t\tc.transport.TLSHandshakeTimeout = d\n\t\tc.dialer.Timeout = d\n\t}\n}", "func WithTimeout(timeout time.Duration) ClientOption {\n\treturn withTimeout{timeout}\n}", "func (builder *TCPHealthCheckBuilder) WithTimeout(timeout time.Duration) *TCPHealthCheckBuilder {\n\tbuilder.options.Timeout = timeout\n\treturn builder\n}", "func (d *Dnsfilter) SetHTTPTimeout(t time.Duration) {\n\td.client.Timeout = t\n}", "func (manager Manager) HTTPTimeout() time.Duration {\n\treturn manager.viperConfig.GetDuration(httpTimeout)\n}", "func NewHTTPClientWithTimeout(t time.Duration) *http.Client {\n\ttr := &http.Transport{\n\t\t// Added IdleConnTimeout to reduce the time of idle connections which\n\t\t// could potentially slow macOS reconnection when there is a sudden\n\t\t// network disconnection/issue\n\t\tIdleConnTimeout: t,\n\t\tProxy: http.ProxyFromEnvironment,\n\t}\n\th := &http.Client{\n\t\tTransport: tr,\n\t\tTimeout: t}\n\treturn h\n}", "func WithTimeout(t time.Duration) APIOption {\n\treturn newAPIOption(func(o *Options) {\n\t\to.Timeout = t\n\t})\n}", "func WithShutdownTimeout(timeout time.Duration) Option {\n\treturn func(p *Protocol) error {\n\t\tif p == nil {\n\t\t\treturn fmt.Errorf(\"http shutdown timeout option can not set nil protocol\")\n\t\t}\n\t\tp.ShutdownTimeout = timeout\n\t\treturn nil\n\t}\n}", "func WithTimeout(timeout time.Duration) Option {\n\treturn func(c *Client) {\n\t\tc.client.Timeout = timeout\n\t}\n}", "func WithTimeout(timeout time.Duration) Option {\n\treturn func(c *Client) {\n\t\tc.client.Timeout = timeout\n\t}\n}", "func WithTimeout(timeout time.Duration) ClientOption {\n\treturn optionFunc(func(c *Client) {\n\t\tc.WithTimeout(timeout)\n\t})\n}", "func WithTimeout(t time.Duration) apiOption {\n\treturn func(m *Management) {\n\t\tm.timeout = t\n\t}\n}", "func SetHTTPClientWithTimeout(t time.Duration) error {\n\tif t <= 0 {\n\t\treturn errCannotSetInvalidTimeout\n\t}\n\tm.Lock()\n\t_HTTPClient = NewHTTPClientWithTimeout(t)\n\tm.Unlock()\n\treturn nil\n}", "func WithTimeout(timeout time.Duration) Opt {\n\treturn func(c *Client) error {\n\t\tc.client.Timeout = timeout\n\t\treturn nil\n\t}\n}", "func WithTimeout(t time.Duration) Option {\n\treturn func(o *Manager) {\n\t\to.timeout = t\n\t}\n}", "func (htpc *HttpProcessorConfig) Timeout(connectionTimeout time.Duration) *HttpProcessorConfig {\n\thtpc.connectionTimeout = connectionTimeout\n\treturn htpc\n}", "func WithTimeout(timeout time.Duration) BuilderOptionFunc {\n\treturn func(b *Builder) error {\n\t\tb.timeout = timeout\n\t\treturn nil\n\t}\n}", "func WithTimeout(timeoutType fab.TimeoutType, timeout time.Duration) RequestOption {\n\treturn func(ctx context.Client, o *requestOptions) error {\n\t\tif o.Timeouts == nil {\n\t\t\to.Timeouts = make(map[fab.TimeoutType]time.Duration)\n\t\t}\n\t\to.Timeouts[timeoutType] = timeout\n\t\treturn nil\n\t}\n}", "func Timeout(t time.Duration) Option {\n\treturn func(o *Options) {\n\t\to.Timeout = t\n\t}\n}", "func Timeout(t time.Duration) Option {\n\treturn func(o *Options) {\n\t\to.Timeout = t\n\t}\n}", "func Timeout(t time.Duration) Option {\n\treturn func(o *Options) {\n\t\to.Timeout = t\n\t}\n}", "func Timeout(t time.Duration) Option {\n\treturn func(o *Options) {\n\t\to.Timeout = t\n\t}\n}", "func WithTimeout(timeout time.Duration) ClientOption {\n\treturn func(client *Client) {\n\t\tclient.timeout = timeout\n\t}\n}", "func WithTimeout(timeout time.Duration) ClientOption {\n\treturn func(c *Client) error {\n\t\tc.timeout = timeout\n\t\treturn nil\n\t}\n}", "func Timeout(t time.Duration) Option {\n\treturn func(c *Config) Option {\n\t\tprevious := c.Timeout\n\t\tc.Timeout = t\n\t\treturn Timeout(previous)\n\t}\n}", "func WithTimeout(d time.Duration) DialOption {\n\treturn func(o *DialOptions) {\n\t\to.Timeout = d\n\t}\n}", "func WithTimeout(d time.Duration) DialOption {\n\treturn func(o *DialOptions) {\n\t\to.Timeout = d\n\t}\n}", "func WithHTTPServerShutdownTimeout(t time.Duration) Option {\n\treturn func(s *Server) {\n\t\ts.HTTPServerShutdownTimeout = t\n\t}\n}", "func WithRequestTimeout(t time.Duration) ConfigOption {\n\treturn func(c *Config) {\n\t\tc.RequestTimeoutTime = t\n\t}\n}", "func WithTimeout(timeout time.Duration) configF {\n\treturn func(c *config) *config {\n\t\tc.defaultTimeout = timeout\n\t\treturn c\n\t}\n}", "func Timeout(timeout time.Duration) OptionFunc {\n\treturn func(tc *TracedClient) error {\n\t\tif timeout <= 0 {\n\t\t\treturn errors.New(\"timeout must be positive\")\n\t\t}\n\t\ttc.cl.Timeout = timeout\n\t\treturn nil\n\t}\n}", "func (o *HandleGetAboutUsingGETParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}", "func WithMetricsTimeout(duration time.Duration) Option {\n\treturn wrappedOption{otlpconfig.WithMetricsTimeout(duration)}\n}", "func WithOutboundTimeout(timeout time.Duration) OutboundHTTPOpt {\n\treturn func(opts *outboundCommHTTPOpts) {\n\t\topts.client.Timeout = timeout\n\t}\n}", "func WithTimeout(duration time.Duration) ReconcilerOption {\n\treturn func(r *Reconciler) {\n\t\tr.timeout = duration\n\t}\n}", "func WithHTTP() Option {\n\treturn optionFunc(func(c *config) error {\n\t\tc.namespace = \"http\"\n\t\treturn nil\n\t})\n}", "func WithTimeout(timeout time.Duration) ReqContextOptions {\n\treturn func(ctx *requestContextOpts) {\n\t\tctx.timeout = timeout\n\t}\n}", "func UseHttpClientWithTimeout(timeout time.Duration) {\n\tdialTimeout := func(network, addr string) (net.Conn, error) {\n\t\treturn net.DialTimeout(network, addr, timeout)\n\t}\n\n\ttransport := http.Transport{\n\t\tDial: dialTimeout,\n\t}\n\tclient := http.Client{\n\t\tTransport: &transport,\n\t}\n\n\tdefaultHttpClient = &client\n}", "func Timeout(timeout int64) Option {\n\treturn func(opts *options) {\n\t\topts.timeout = time.Duration(timeout) * time.Second\n\t}\n}", "func NewHTTPChecker(url string, opts ...HTTPCheckerOption) *HTTPChecker {\n\tchecker := &HTTPChecker{\n\t\turl: url,\n\t\tmethod: http.MethodGet,\n\t}\n\n\tfor _, opt := range opts {\n\t\topt(checker)\n\t}\n\n\treturn checker\n}", "func WithHTTPMethod(method string) HTTPCheckerOption {\n\treturn func(c *HTTPChecker) {\n\t\tc.method = method\n\t}\n}", "func WithTimeout(timeout time.Duration) Option {\n\treturn func(opts *VDRI) {\n\t\topts.client.Timeout = timeout\n\t}\n}", "func clientWithTimeout(timeout time.Duration) *http.Client {\n tr := &http.Transport{\n ResponseHeaderTimeout: timeout,\n Dial: (&net.Dialer{\n Timeout: timeout,\n KeepAlive: time.Second,\n }).Dial,\n TLSHandshakeTimeout: timeout,\n MaxIdleConnsPerHost: 100,\n }\n return &http.Client{\n Timeout: timeout,\n Transport: tr,\n }\n}", "func GetHTTPClient(timeout int) *http.Client {\n\n\t// This env var should only be used in our test environments or in an emergency when there is a problem with the SSL certificate of a horizon service.\n\tskipSSL := false\n\tif os.Getenv(\"HZN_SSL_SKIP_VERIFY\") != \"\" {\n\t\tskipSSL = true\n\t}\n\n\t// Set request timeout based on environment variables and input values. The environment variable always overrides the\n\t// input parameter. The other timeouts are subject to the timeout setting also.\n\trequestTimeout := timeout\n\n\tif envTimeout := os.Getenv(config.HTTPRequestTimeoutOverride); envTimeout != \"\" {\n\t\tif t, err := strconv.Atoi(envTimeout); err == nil {\n\t\t\trequestTimeout = t\n\t\t} else {\n\t\t\tWarning(i18n.GetMessagePrinter().Sprintf(\"Unable to use %v to set the request timeout, the value is not a valid number: %v\", config.HTTPRequestTimeoutOverride, envTimeout))\n\t\t}\n\t}\n\n\tresponseTimeout := int(float64(requestTimeout) * 0.8)\n\tdialTimeout := int(float64(requestTimeout) * 0.5)\n\tkeepAlive := requestTimeout * 2\n\tTLSHandshake := dialTimeout\n\texpectContinue := int(float64(requestTimeout) * 0.5)\n\n\tVerbose(i18n.GetMessagePrinter().Sprintf(\"HTTP request timeout set to %v seconds\", requestTimeout))\n\n\treturn &http.Client{\n\t\t// remember that this timeout is for the whole request, including\n\t\t// body reading. This means that you must set the timeout according\n\t\t// to the total payload size you expect\n\t\tTimeout: time.Second * time.Duration(requestTimeout),\n\t\tTransport: &http.Transport{\n\t\t\tDial: (&net.Dialer{\n\t\t\t\tTimeout: time.Duration(dialTimeout) * time.Second,\n\t\t\t\tKeepAlive: time.Duration(keepAlive) * time.Second,\n\t\t\t}).Dial,\n\t\t\tTLSHandshakeTimeout: time.Duration(TLSHandshake) * time.Second,\n\t\t\tResponseHeaderTimeout: time.Duration(responseTimeout) * time.Second,\n\t\t\tExpectContinueTimeout: time.Duration(expectContinue) * time.Second,\n\t\t\tMaxIdleConns: config.MaxHTTPIdleConnections,\n\t\t\tIdleConnTimeout: config.HTTPIdleConnectionTimeout * time.Millisecond, // ms since we don't want cli to hold onto connections for very long\n\t\t\tTLSClientConfig: &tls.Config{\n\t\t\t\tInsecureSkipVerify: skipSSL,\n\t\t\t},\n\t\t},\n\t}\n\n}", "func (o *GetProductUpgradeURLUsingGETParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}", "func WithPlainHTTP(b bool) Opt {\n\treturn func(o *opts) {\n\t\to.plainHTTP = b\n\t}\n}", "func Timeout(o int) interface {\n\ttimeoutOptionSetter\n} {\n\treturn &timeoutOption{o}\n}", "func Timeout(t time.Duration) DiscoverOption {\n\treturn func(o *dOpts) {\n\t\to.timeout = t\n\t}\n}", "func HTTPGetCheck(url string, timeout time.Duration) Check {\n\tclient := http.Client{\n\t\tTimeout: timeout,\n\t\t// never follow redirects\n\t\tCheckRedirect: func(*http.Request, []*http.Request) error {\n\t\t\treturn http.ErrUseLastResponse\n\t\t},\n\t}\n\treturn func() error {\n\t\tresp, err := client.Get(url)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tresp.Body.Close()\n\t\tif resp.StatusCode != 200 {\n\t\t\treturn fmt.Errorf(\"returned status %d\", resp.StatusCode)\n\t\t}\n\t\treturn nil\n\t}\n}", "func HTTPGetCheck(url string, timeout time.Duration) Check {\n\tclient := http.Client{\n\t\tTimeout: timeout,\n\t\t// never follow redirects\n\t\tCheckRedirect: func(*http.Request, []*http.Request) error {\n\t\t\treturn http.ErrUseLastResponse\n\t\t},\n\t}\n\treturn func() error {\n\t\tresp, err := client.Get(url)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tresp.Body.Close()\n\t\tif resp.StatusCode != 200 {\n\t\t\treturn fmt.Errorf(\"returned status %d\", resp.StatusCode)\n\t\t}\n\t\treturn nil\n\t}\n}", "func HTTPClientWithTLSConfig(conf *tls.Config) *http.Client {\n\treturn &http.Client{\n\t\tTimeout: time.Second * 20,\n\t\tTransport: &http.Transport{\n\t\t\tDial: (&net.Dialer{\n\t\t\t\tTimeout: 5 * time.Second,\n\t\t\t}).Dial,\n\t\t\tTLSHandshakeTimeout: 5 * time.Second,\n\t\t\tTLSClientConfig: conf,\n\t\t},\n\t}\n}", "func HTTP(healthService string, httpTimeout time.Duration) bool {\n\tclient := http.Client{\n\t\tTimeout: httpTimeout,\n\t}\n\n\tresp, err := client.Get(healthService)\n\t// Check if response timeouts or returns an HTTP error\n\tif err != nil {\n\t\treturn false\n\t}\n\tdefer resp.Body.Close()\n\tbytes, err := ioutil.ReadAll(resp.Body)\n\tif err != nil {\n\t\treturn false\n\t}\n\n\tif strings.Contains(string(bytes), \"healthy\") {\n\t\treturn true\n\t}\n\n\treturn false\n}", "func Timeout(o *Options) func(http.Handler) http.Handler {\n\ttimeout := o.requestTimeout()\n\treturn func(delegate http.Handler) http.Handler {\n\t\treturn http.HandlerFunc(func(response http.ResponseWriter, request *http.Request) {\n\t\t\tctx, cancel := context.WithTimeout(request.Context(), timeout)\n\t\t\tdefer cancel()\n\t\t\tdelegate.ServeHTTP(response, request.WithContext(ctx))\n\t\t})\n\t}\n}", "func NewHTTPOptions(URL string) *HTTPOptions {\n\to := HTTPOptions{\n\t\tURL: URL,\n\t\tTickerInterval: 20,\n\t\tTimeout: 60,\n\t\tExpectedStatus: http.StatusOK,\n\t\tHeaders: make(map[string]string),\n\t}\n\treturn &o\n}", "func NewHTTPOptions(URL string) *HTTPOptions {\n\to := HTTPOptions{\n\t\tURL: URL,\n\t\tTickerInterval: 20,\n\t\tTimeout: 60,\n\t\tExpectedStatus: http.StatusOK,\n\t\tHeaders: make(map[string]string),\n\t}\n\treturn &o\n}", "func WithHeaderTimeout(headerTimeout int) configurer {\n\treturn func(conf *config) {\n\t\tconf.headerTimeout = headerTimeout\n\t}\n}", "func (o *SyncStatusUsingGETParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}", "func (c *Client) WithTimeout(timeout time.Duration) *Client {\n\tc.lock.RLock()\n\tdefer c.lock.RUnlock()\n\tc.httpClient.Timeout = timeout\n\treturn c\n}", "func WithTimeout(d time.Duration, cancelFunc func(*Entry)) ExecOption {\n\treturn func(opt *Option) {\n\t\topt.timeout = d\n\t\topt.cancelFunc = cancelFunc\n\t}\n}", "func WithOptions(o Options) filters.Spec {\n\tif o.Timeout == 0 {\n\t\to.Timeout = defaultIdleConnTimeout\n\t}\n\tif o.MaxIdleConns == 0 {\n\t\to.MaxIdleConns = defaultMaxIdleConns\n\t}\n\tif o.MaxIdleConnsPerHost == 0 {\n\t\to.MaxIdleConnsPerHost = defaultMaxIdleConnsPerHost\n\t}\n\tif o.IdleConnTimeout == 0 {\n\t\to.IdleConnTimeout = defaultIdleConnTimeout\n\t}\n\treturn &teeSpec{options: o}\n}", "func (*ResponseTimeoutError) Timeout() bool { return true }", "func (o *NearestUsingGET1Params) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}", "func (o *GetPageDataUsingGETParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}", "func WithCustomTimouts(to Timeouts) ServerOption {\n\treturn func(server *Server) {\n\t\tif to.WriteTimeout != 0 {\n\t\t\tserver.http.WriteTimeout = to.WriteTimeout\n\t\t}\n\t\tif to.ReadTimeout != 0 {\n\t\t\tserver.http.ReadTimeout = to.ReadTimeout\n\t\t}\n\t}\n}", "func WithSocketTimeout(socketTimeout int) configurer {\n\treturn func(conf *config) {\n\t\tconf.socketTimeout = socketTimeout\n\t}\n}", "func WithSessionTimeout(tm time.Duration) Option {\n\treturn func(o *config) {\n\t\to.SessionTimeout = tm\n\t}\n}", "func (o *BasicOptions) Timeout() int {\n\tif o.AdvancedOptions.Timeout == 0 {\n\t\treturn int(defaultTimeout)\n\t}\n\treturn o.AdvancedOptions.Timeout\n}", "func (o *IntegrationsManualHTTPSCreateParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}", "func WithHTTPClient(hc *http.Client) Option {\n\treturn func(c *gate.Configuration) {\n\t\tc.HTTPClient = hc\n\t}\n}", "func DiscoveryTimeout(t time.Duration) RequestOption {\n\treturn func(o *RequestOptions) {\n\t\to.DiscoveryTimeout = t\n\t}\n}", "func WithHTMLOptions(opts ...html.Option) Option {\n\treturn &withHTMLOptions{opts}\n}", "func RequestTimeout(x time.Duration) func(*Client) {\n\treturn func(client *Client) {\n\t\tclient.HttpClient.Timeout = x * time.Second\n\t}\n}", "func Timeout(next http.Handler) http.Handler {\n\treturn http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\tif config.V.BasicTimeout.Duration > 0 {\n\t\t\tc, cancel := context.WithTimeout(r.Context(), config.V.BasicTimeout.Duration)\n\t\t\tdefer cancel()\n\t\t\tr = r.WithContext(c)\n\t\t}\n\t\tnext.ServeHTTP(w, r)\n\t})\n}", "func WithTimeout(ctx context.Context) (context.Context, context.CancelFunc) {\n\treturn context.WithTimeout(ctx, RetryAttempts*RetryInterval)\n}", "func RequestTimeout(d time.Duration) Option {\n\treturn func(o *Options) {\n\t\to.CallOptions.RequestTimeout = d\n\t}\n}", "func With_timeout(parent *Group, timeout time.Duration) option {\n\treturn func(o *Group) {\n\t\tif o.Context != nil {\n\t\t\tpanic(\"context already set\")\n\t\t}\n\t\tif parent == nil {\n\t\t\tpanic(\"parent is nil\")\n\t\t}\n\t\to.Context, o.CancelFunc = context.WithTimeout(parent, timeout)\n\t\to.parent = parent\n\t}\n}", "func (o *GetCustomRuleParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}", "func (o *GetContentSourceUsingGETParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}", "func (o *GetSellerServicesUsingGETParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}", "func HttpOptions(httpOpts *server.Options) Option {\n\treturn func(c *Service) {\n\t\tc.serverOptions = httpOpts\n\t}\n}", "func WithProbeTimeout(d time.Duration) Option {\n\treturn withProbeTimeout(d)\n}", "func ReadTimeout(d time.Duration) Option {\n\treturn optionFunc(func(c *config) {\n\t\tc.httpServer.ReadTimeout = d\n\t})\n}" ]
[ "0.7159275", "0.69684875", "0.6743757", "0.66001695", "0.65526265", "0.6522622", "0.64707696", "0.64276093", "0.64276093", "0.6423289", "0.6418356", "0.6373808", "0.63530564", "0.6339308", "0.63357085", "0.6263725", "0.6263725", "0.62461066", "0.6234871", "0.6172803", "0.6101886", "0.6080703", "0.60358036", "0.6032779", "0.5967944", "0.59673154", "0.5962482", "0.5962482", "0.59528047", "0.5931415", "0.5893104", "0.58916926", "0.58883893", "0.587311", "0.5841937", "0.5823836", "0.57963204", "0.57963204", "0.57963204", "0.57963204", "0.5735933", "0.5708355", "0.5667647", "0.5662354", "0.5662354", "0.5641856", "0.5635422", "0.5622269", "0.56067306", "0.5572468", "0.556804", "0.55459213", "0.55400425", "0.5538155", "0.5530923", "0.55023915", "0.549629", "0.5493569", "0.54546607", "0.5442181", "0.5439581", "0.54267323", "0.5395509", "0.53757703", "0.5362832", "0.5350591", "0.5349407", "0.5349407", "0.53492254", "0.5341458", "0.53402215", "0.5311131", "0.5311131", "0.5300229", "0.5294786", "0.52890795", "0.52567905", "0.52420914", "0.5238898", "0.5235046", "0.5217137", "0.52150375", "0.51906663", "0.5187018", "0.51237893", "0.51229733", "0.5116447", "0.5116379", "0.5100647", "0.5098407", "0.50960684", "0.50799084", "0.5074859", "0.50741684", "0.50722003", "0.5068532", "0.506626", "0.5065036", "0.5057658", "0.5054713" ]
0.8700442
0
WithHTTPMethod returns an HTTPCheckerOption that specifies the method for HTTP requests. The default method is "GET" which works in most of the cases, but another popular choice is "HEAD".
func WithHTTPMethod(method string) HTTPCheckerOption { return func(c *HTTPChecker) { c.method = method } }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func WithMethod(method string) Option {\n\treturn func(p *Protocol) error {\n\t\tif p == nil {\n\t\t\treturn fmt.Errorf(\"http method option can not set nil protocol\")\n\t\t}\n\t\tmethod = strings.TrimSpace(method)\n\t\tif method != \"\" {\n\t\t\tif p.RequestTemplate == nil {\n\t\t\t\tp.RequestTemplate = &nethttp.Request{}\n\t\t\t}\n\t\t\tp.RequestTemplate.Method = method\n\t\t\treturn nil\n\t\t}\n\t\treturn fmt.Errorf(\"http method option was empty string\")\n\t}\n}", "func WithMethod(method string) Option {\n\treturn func(r *RequestClient) {\n\t\tr.method = method\n\t}\n}", "func SetHTTPMethod(method string) auth.Option {\n\treturn internal.SetRequesterMethod(method)\n}", "func OptMethod(method string) Option {\n\treturn RequestOption(webutil.OptMethod(method))\n}", "func OptMethod(method string) Option {\n\treturn func(r *Request) error {\n\t\tr.Method = method\n\t\treturn nil\n\t}\n}", "func OptGet() Option {\n\treturn func(r *Request) error {\n\t\tr.Method = \"GET\"\n\t\treturn nil\n\t}\n}", "func (h *HTTP) SetMethod(method string) {\n\th.method = method\n}", "func (i Internet) HTTPMethod() string {\n\treturn i.Faker.RandomStringElement([]string{\n\t\thttp.MethodGet,\n\t\thttp.MethodHead,\n\t\thttp.MethodPost,\n\t\thttp.MethodPut,\n\t\thttp.MethodPatch,\n\t\thttp.MethodDelete,\n\t\thttp.MethodConnect,\n\t\thttp.MethodOptions,\n\t\thttp.MethodTrace,\n\t})\n}", "func (server *HTTPServer) handleOptionsMethod(nextHandler http.Handler) http.Handler {\n\treturn http.HandlerFunc(\n\t\tfunc(w http.ResponseWriter, r *http.Request) {\n\t\t\tif r.Method == \"OPTIONS\" {\n\t\t\t\tw.WriteHeader(http.StatusOK)\n\t\t\t} else {\n\t\t\t\tnextHandler.ServeHTTP(w, r)\n\t\t\t}\n\t\t})\n}", "func (server *HTTPServer) handleOptionsMethod(nextHandler http.Handler) http.Handler {\n\treturn http.HandlerFunc(\n\t\tfunc(w http.ResponseWriter, r *http.Request) {\n\t\t\tif r.Method == \"OPTIONS\" {\n\t\t\t\tw.WriteHeader(http.StatusOK)\n\t\t\t} else {\n\t\t\t\tnextHandler.ServeHTTP(w, r)\n\t\t\t}\n\t\t})\n}", "func (o GetServerGroupsGroupHealthCheckOutput) HttpCheckMethod() pulumi.StringOutput {\n\treturn o.ApplyT(func(v GetServerGroupsGroupHealthCheck) string { return v.HttpCheckMethod }).(pulumi.StringOutput)\n}", "func (htpc *HttpProcessorConfig) Method(httpMethod string) *HttpProcessorConfig {\n\thtpc.method = httpMethod\n\treturn htpc\n}", "func (o ServerGroupHealthCheckOutput) HttpCheckMethod() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v ServerGroupHealthCheck) *string { return v.HttpCheckMethod }).(pulumi.StringPtrOutput)\n}", "func (r *Request) HTTPMethod() string {\n\treturn r.httpMethod\n}", "func (o ServerGroupHealthCheckPtrOutput) HttpCheckMethod() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v *ServerGroupHealthCheck) *string {\n\t\tif v == nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn v.HttpCheckMethod\n\t}).(pulumi.StringPtrOutput)\n}", "func (t *Target) SetMethod(m string) error {\n if httpMethodChecker.MatchString(m) {\n t.method = m\n } else {\n t.method = defaultMethod\n return errInvalidHttpMethod\n }\n return nil\n}", "func Method(method string) Option {\n\treturn func(c *Config) Option {\n\t\tprevious := c.Method\n\t\tc.Method = method\n\t\treturn Method(previous)\n\t}\n}", "func (f *APIAuditFilter) WhereHTTPMethod(p entql.StringP) {\n\tf.Where(p.Field(apiaudit.FieldHTTPMethod))\n}", "func (c *Client) Options(url string, headers, queryParams map[string][]string) (response *http.Response, err error) {\n\treturn c.makeRequest(url, http.MethodOptions, headers, queryParams, nil)\n}", "func (m MetaData) GetHTTPMethod() string {\n\tif len(m) == 0 {\n\t\treturn \"\"\n\t}\n\treturn m[httpMethod]\n}", "func Method(method string, urlPath string) (Request, error) {\n\tif method != \"GET\" && method != \"POST\" && method != \"PUT\" && method != \"DELETE\" &&\n\t\tmethod != \"HEAD\" && method != \"OPTIONS\" && method != \"PATCH\" {\n\t\treturn nil, errors.New(\"method not supported\")\n\t}\n\treturn newRequest(method, urlPath, sessionWithoutCookies)\n}", "func httpMethodBuilder(m string, ac AccessControl, handler http.Handler, router *httprouter.Router, status string, url string, proxyConfig *Proxy) {\n\tlogger.Debugf(\"[DEBUG] LINK:\", m, url)\n\tswitch m {\n\tcase \"GET\":\n\t\trouter.GET(ac.Route, easyJWT(handler, ac, proxyConfig.Connect.HeaderPrefix))\n\tcase \"POST\":\n\t\trouter.POST(ac.Route, easyJWT(handler, ac, proxyConfig.Connect.HeaderPrefix))\n\tcase \"PUT\":\n\t\trouter.PUT(ac.Route, easyJWT(handler, ac, proxyConfig.Connect.HeaderPrefix))\n\tcase \"DELETE\":\n\t\trouter.DELETE(ac.Route, easyJWT(handler, ac, proxyConfig.Connect.HeaderPrefix))\n\tcase \"HEAD\":\n\t\trouter.HEAD(ac.Route, easyJWT(handler, ac, proxyConfig.Connect.HeaderPrefix))\n\t}\n\t// always OPTIONS\n\tif h, _, _ := router.Lookup(\"OPTIONS\", ac.Route); h == nil {\n\t\tlogger.Debugf(\"[DEBUG] LINK: OPTIONS\", url)\n\t\trouter.OPTIONS(ac.Route, func(w http.ResponseWriter, r *http.Request, _ httprouter.Params) {\n\t\t\tlogger.Debugf(\"[DEBUG] set cors\", r.URL)\n\t\t\tw, r = addCORSHeaders(w, r)\n\t\t\tw.Write([]byte(\"\"))\n\t\t\treturn\n\t\t})\n\t}\n\n}", "func EnsureMethod(method string) func(http.HandlerFunc) http.HandlerFunc {\n\treturn func(next http.HandlerFunc) http.HandlerFunc {\n\t\treturn func(w http.ResponseWriter, r *http.Request) {\n\t\t\tif r.Method == method {\n\t\t\t\tnext(w, r)\n\t\t\t} else {\n\t\t\t\tw.WriteHeader(http.StatusMethodNotAllowed)\n\t\t\t}\n\t\t}\n\t}\n}", "func WithMethodNotAllowed(f http.Handler) ConfigOption {\n\treturn ConfigOptionFunc(func(c *Config) { c.MethodNotAllowed = f })\n}", "func (APIResourceBase) Options(session *Session, url string, queries url.Values, body io.Reader) (APIStatus, interface{}) {\n\treturn FailSimple(http.StatusMethodNotAllowed), nil\n}", "func checkMethod(method string) Adapter {\n\treturn func(fn http.Handler) http.Handler {\n\t\treturn http.HandlerFunc(func(rw http.ResponseWriter, req *http.Request) {\n\t\t\ts := http.StatusMethodNotAllowed\n\t\t\t// ensure a POST\n\t\t\tif req.Method != method {\n\t\t\t\thttp.Error(rw, (newResponse(s, 0)).JSON(), s)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tfn.ServeHTTP(rw, req)\n\t\t})\n\t}\n}", "func SupportedMethods(method string, additional ...string) Middleware {\n\tsupported := make(map[string]bool, len(additional)+1)\n\tsupported[strings.ToUpper(method)] = true\n\tfor _, m := range additional {\n\t\tsupported[strings.ToUpper(m)] = true\n\t}\n\tif supported[http.MethodGet] {\n\t\tsupported[http.MethodHead] = true\n\t}\n\n\tallowed := make([]string, len(supported))\n\ti := 0\n\tfor m := range supported {\n\t\tallowed[i] = m\n\t\ti++\n\t}\n\tsort.Strings(allowed)\n\tallowedHeader := strings.Join(allowed, \",\")\n\n\treturn func(name string, next HandlerFunc) HandlerFunc {\n\t\treturn func(ctx context.Context, w http.ResponseWriter, r *http.Request) error {\n\t\t\tif !supported[r.Method] {\n\t\t\t\tw.Header().Set(AllowHeader, allowedHeader)\n\t\t\t\treturn RawError(\n\t\t\t\t\tMethodNotAllowed(),\n\t\t\t\t\tfmt.Errorf(\"method %q is not supported by %q\", r.Method, name),\n\t\t\t\t\tPlainTextContentType,\n\t\t\t\t)\n\t\t\t}\n\t\t\treturn next(ctx, w, r)\n\t\t}\n\t}\n}", "func (o *Request) WithMethod(method string) *Request {\n\to.Method = method\n\treturn o\n}", "func (c *Client) Options(ctx context.Context, url string, data ...interface{}) (*Response, error) {\n\treturn c.DoRequest(ctx, http.MethodOptions, url, data...)\n}", "func WithPlainHTTP(b bool) Opt {\n\treturn func(o *opts) {\n\t\to.plainHTTP = b\n\t}\n}", "func (tr *Transport) OPTION(\n\turl string,\n\tfn Handler,\n\toptions ...HandlerOption,\n) {\n\ttr.mux.Handler(\n\t\tnet_http.MethodOptions,\n\t\turl,\n\t\tnewHandler(fn, append(tr.options, options...)...),\n\t)\n}", "func (h *Handler) ServeHTTP(w http.ResponseWriter, r *http.Request) {\n\th.prepResponse(w)\n\n\tif r.Method == optionsMethod {\n\t\th.handlePreflight(w, r)\n\t\tw.WriteHeader(http.StatusOK)\n\t\treturn\n\t}\n\n\th.handleRequest(w, r)\n\th.next.ServeHTTP(w, r)\n}", "func WithHTTP() Option {\n\treturn optionFunc(func(c *config) error {\n\t\tc.namespace = \"http\"\n\t\treturn nil\n\t})\n}", "func isSupportedHTTPMethod(method string) bool {\n\tfor _, v := range supportedHTTPMethods {\n\t\tif v == method {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}", "func MatchesHTTPMethod(text string) bool {\n\treturn len(MatchHTTPMethod(text)) > 0\n}", "func (op *ThreeDEnrollmentAssembly) GetHTTPMethod() string {\n\treturn op.opHTTPData.GetHTTPMethod()\n}", "func OptionsHandler(w http.ResponseWriter, r *http.Request) {\n\tw.Header().Add(\"Allow\", http.MethodGet)\n\tw.Header().Add(\"Allow\", http.MethodHead)\n\tw.Header().Add(\"Allow\", http.MethodPost)\n\tw.Header().Add(\"Allow\", http.MethodPut)\n\tw.Header().Add(\"Allow\", http.MethodDelete)\n\tw.Header().Add(\"Allow\", http.MethodOptions)\n\tw.Header().Set(\"Transfer-Encoding\", \"chunked\")\n\tw.Header().Set(\"Access-Control-Allow-Origin\", \"*\")\n\tw.WriteHeader(http.StatusNoContent)\n}", "func (self *RouteBuilder) Method(method string) *RouteBuilder {\n\tself.httpMethod = method\n\treturn self\n}", "func (F *Frisby) Options(url string) *Frisby {\n\tF.Method = \"OPTIONS\"\n\tF.Url = url\n\treturn F\n}", "func (rb *RequestBuilder) Options(url string) *Response {\n\treturn rb.DoRequest(http.MethodOptions, url, nil)\n}", "func (b *RouteBuilder) Method(method string) *RouteBuilder {\n\tb.httpMethod = method\n\treturn b\n}", "func (h *RequestHeader) SetMethod(method string) {\n\th.method = append(h.method[:0], method...)\n}", "func WithHTTPTimeout(timeout time.Duration) HTTPCheckerOption {\n\treturn func(c *HTTPChecker) {\n\t\tc.timeout = timeout\n\t}\n}", "func (r *Request) method(method, path string) *Request {\n\tif path != \"/\" {\n\t\tr.URLStruct.Path = path\n\t}\n\tr.Method = strings.ToUpper(method)\n\treturn r\n}", "func LimitMethod(m string) RequestOption {\n\treturn func(o *RequestOptions) {\n\t\to.LimitMethod = m\n\t}\n}", "func IncHttpMethod(method string) {\n\thostname, _ := os.Hostname()\n\tlabels := prometheus.Labels{\n\t\t\"hostname\": hostname,\n\t\t\"env\": os.Getenv(\"ENV\"),\n\t\t\"method\": method,\n\t}\n\n\thttpMethods.With(labels).Inc()\n}", "func (pi PathItem) HasMethod(method string) bool {\n\tswitch strings.ToUpper(method) {\n\tcase \"GET\":\n\t\treturn pi.Get != nil\n\tcase \"POST\":\n\t\treturn pi.Post != nil\n\tcase \"PUT\":\n\t\treturn pi.Put != nil\n\tcase \"DELETE\":\n\t\treturn pi.Delete != nil\n\tcase \"OPTIONS\":\n\t\treturn pi.Options != nil\n\tcase \"HEAD\":\n\t\treturn pi.Head != nil\n\tcase \"PATCH\":\n\t\treturn pi.Patch != nil\n\t}\n\n\treturn false\n}", "func (apiReq *APIRequest) Method(method string) *APIRequest {\n\tapiReq.method = method\n\treturn apiReq\n}", "func NewHTTPChecker(url string, opts ...HTTPCheckerOption) *HTTPChecker {\n\tchecker := &HTTPChecker{\n\t\turl: url,\n\t\tmethod: http.MethodGet,\n\t}\n\n\tfor _, opt := range opts {\n\t\topt(checker)\n\t}\n\n\treturn checker\n}", "func (r *bitroute) ServeHTTP(w http.ResponseWriter, req *http.Request) {\n\tif r.recoveryHandler != nil {\n\t\tdefer r.recovery(w, req)\n\t}\n\tif _, ok := r.handlers[req.Method]; ok {\n\t\tif handle, params, ok := r.handlers[req.Method].get(req.URL.Path); ok {\n\t\t\tc := NewControl(w, req)\n\t\t\tif len(params) > 0 {\n\t\t\t\tfor _, item := range params {\n\t\t\t\t\tc.Param(item.key, item.value)\n\t\t\t\t}\n\t\t\t}\n\t\t\tif r.middlewareHandler != nil {\n\t\t\t\tr.middlewareHandler(handle)(c)\n\t\t\t} else {\n\t\t\t\thandle(c)\n\t\t\t}\n\t\t\treturn\n\t\t}\n\t}\n\tallowed := r.allowedMethods(req.URL.Path)\n\n\tif len(allowed) == 0 {\n\t\tif r.notFound != nil {\n\t\t\tc := NewControl(w, req)\n\t\t\tr.notFound(c)\n\t\t} else {\n\t\t\thttp.NotFound(w, req)\n\t\t}\n\t\treturn\n\t}\n\n\tw.Header().Set(\"Allow\", strings.Join(allowed, \", \"))\n\tif req.Method == \"OPTIONS\" && r.optionsRepliesEnabled {\n\t\treturn\n\t}\n\tif r.notAllowed != nil {\n\t\tc := NewControl(w, req)\n\t\tr.notAllowed(c)\n\t} else {\n\t\thttp.Error(w, http.StatusText(http.StatusMethodNotAllowed), http.StatusMethodNotAllowed)\n\t}\n}", "func (r *Router) Options(path string, handle HandleFunc) {\n\tr.register(path, http.MethodOptions, handle)\n}", "func (h methodNotAllowedHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {\n\t// Sets the Allow header\n\tw.Header().Add(\"Allow\", strings.Join(h, \", \"))\n\tw.WriteHeader(http.StatusMethodNotAllowed)\n}", "func (r *Request) Method(method string) *Request {\n\tr.method = method\n\treturn r\n}", "func (req *Request) SetMethod(method string) {\n\treq.Req.Method = method\n}", "func (a *AuthToken) Method() transport.AuthMethod {\n\treturn &http.BasicAuth{\n\t\tUsername: a.Username,\n\t\tPassword: a.Token,\n\t}\n}", "func (h *RequestHeader) SetProtocol(method string) {\n\th.proto = append(h.proto[:0], method...)\n\th.noHTTP11 = !bytes.Equal(h.proto, strHTTP11)\n}", "func getHTTPMethodType(httpMethod v1alpha1.HTTPMethod) string {\n\tif !reflect.DeepEqual(httpMethod.Get, v1alpha1.GetMethod{}) {\n\t\treturn \"Get\"\n\t}\n\treturn \"Post\"\n}", "func (r *Route) Options(handler http.Handler) *Route {\n\tr.handlers[http.MethodOptions] = handler\n\treturn r\n}", "func HTTPMethodOverride(c *Controller, fc []Filter) {\n\t// An array of HTTP verbs allowed.\n\tverbs := []string{\"POST\", \"PUT\", \"PATCH\", \"DELETE\"}\n\n\tmethod := strings.ToUpper(c.Request.Request.Method)\n\n\tif method == \"POST\" {\n\t\tparam := strings.ToUpper(c.Request.Request.PostFormValue(\"_method\"))\n\n\t\tif len(param) > 0 {\n\t\t\toverride := false\n\t\t\t// Check if param is allowed\n\t\t\tfor _, verb := range verbs {\n\t\t\t\tif verb == param {\n\t\t\t\t\toverride = true\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tif override {\n\t\t\t\tc.Request.Request.Method = param\n\t\t\t} else {\n\t\t\t\tc.Response.Status = 405\n\t\t\t\tc.Result = c.RenderError(&Error{\n\t\t\t\t\tTitle: \"Method not allowed\",\n\t\t\t\t\tDescription: \"Method \" + param + \" is not allowed (valid: \" + strings.Join(verbs, \", \") + \")\",\n\t\t\t\t})\n\t\t\t\treturn\n\t\t\t}\n\n\t\t}\n\t}\n\n\tfc[0](c, fc[1:]) // Execute the next filter stage.\n}", "func Method(m string) Middleware {\n\n\t// Create a new Middleware\n\treturn func(f http.HandlerFunc) http.HandlerFunc {\n\n\t\t// Define the http.HandlerFunc\n\t\treturn func(w http.ResponseWriter, r *http.Request) {\n\n\t\t\t// Do middleware things\n\t\t\tif r.Method != m {\n\t\t\t\thttp.Error(w, http.StatusText(http.StatusMethodNotAllowed), http.StatusMethodNotAllowed)\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\t// Call the next middleware/handler in chain\n\t\t\tf(w, r)\n\t\t}\n\t}\n}", "func (h *sampleResolveHandler) Method() string {\n\treturn http.MethodGet\n}", "func (op *ChargeDMSAssembly) GetHTTPMethod() string {\n\treturn op.opHTTPData.GetHTTPMethod()\n}", "func (srv *Server) handleOptions(res http.ResponseWriter, req *http.Request) {\n\tmethods := make(map[string]bool)\n\n\tnode := srv.getFSNode(req.URL.Path)\n\tif node != nil {\n\t\tmethods[http.MethodGet] = true\n\t\tmethods[http.MethodHead] = true\n\t}\n\n\tfor _, rute := range srv.routeDeletes {\n\t\t_, ok := rute.parse(req.URL.Path)\n\t\tif ok {\n\t\t\tmethods[http.MethodDelete] = true\n\t\t\tbreak\n\t\t}\n\t}\n\n\tfor _, rute := range srv.routeGets {\n\t\t_, ok := rute.parse(req.URL.Path)\n\t\tif ok {\n\t\t\tmethods[http.MethodGet] = true\n\t\t\tbreak\n\t\t}\n\t}\n\n\tfor _, rute := range srv.routePatches {\n\t\t_, ok := rute.parse(req.URL.Path)\n\t\tif ok {\n\t\t\tmethods[http.MethodPatch] = true\n\t\t\tbreak\n\t\t}\n\t}\n\n\tfor _, rute := range srv.routePosts {\n\t\t_, ok := rute.parse(req.URL.Path)\n\t\tif ok {\n\t\t\tmethods[http.MethodPost] = true\n\t\t\tbreak\n\t\t}\n\t}\n\n\tfor _, rute := range srv.routePuts {\n\t\t_, ok := rute.parse(req.URL.Path)\n\t\tif ok {\n\t\t\tmethods[http.MethodPut] = true\n\t\t\tbreak\n\t\t}\n\t}\n\n\tif len(methods) == 0 {\n\t\tres.WriteHeader(http.StatusNotFound)\n\t\treturn\n\t}\n\n\tmethods[http.MethodOptions] = true\n\n\tvar x int\n\tallows := make([]string, len(methods))\n\tfor k, v := range methods {\n\t\tif v {\n\t\t\tallows[x] = k\n\t\t\tx++\n\t\t}\n\t}\n\n\tsort.Strings(allows)\n\n\tres.Header().Set(\"Allow\", strings.Join(allows, \", \"))\n\tres.WriteHeader(http.StatusOK)\n}", "func (options *EditLoadBalancerMonitorOptions) SetMethod(method string) *EditLoadBalancerMonitorOptions {\n\toptions.Method = core.StringPtr(method)\n\treturn options\n}", "func (c *BaseController) Options(w http.ResponseWriter, r *Request) {\n\thttp.Error(w, \"Method Not Allowed\", 405)\n}", "func (g *Group) OPTIONS(path string, handler Handler, middleware ...Middleware) *Group {\n\treturn g.Add(http.MethodOptions, path, handler, middleware...)\n}", "func (tr *Transport) Options(url string, fn HandlerFunc, options ...HandlerOption) {\n\ttr.mux.Handler(net_http.MethodOptions, url, encapsulate(fn, tr.options, options))\n}", "func (r *Request) Options(url string) (*Response, error) {\n\treturn r.Execute(MethodOptions, url)\n}", "func getHttpRuleForMethod(method *surface_v1.Method, body *string) annotations.HttpRule {\n\tvar httpRule annotations.HttpRule\n\tswitch method.Method {\n\tcase \"GET\":\n\t\thttpRule = annotations.HttpRule{\n\t\t\tPattern: &annotations.HttpRule_Get{\n\t\t\t\tGet: method.Path,\n\t\t\t},\n\t\t}\n\tcase \"POST\":\n\t\thttpRule = annotations.HttpRule{\n\t\t\tPattern: &annotations.HttpRule_Post{\n\t\t\t\tPost: method.Path,\n\t\t\t},\n\t\t}\n\tcase \"PUT\":\n\t\thttpRule = annotations.HttpRule{\n\t\t\tPattern: &annotations.HttpRule_Put{\n\t\t\t\tPut: method.Path,\n\t\t\t},\n\t\t}\n\tcase \"PATCH\":\n\t\thttpRule = annotations.HttpRule{\n\t\t\tPattern: &annotations.HttpRule_Patch{\n\t\t\t\tPatch: method.Path,\n\t\t\t},\n\t\t}\n\tcase \"DELETE\":\n\t\thttpRule = annotations.HttpRule{\n\t\t\tPattern: &annotations.HttpRule_Delete{\n\t\t\t\tDelete: method.Path,\n\t\t\t},\n\t\t}\n\t}\n\n\tif body != nil {\n\t\thttpRule.Body = *body\n\t}\n\n\treturn httpRule\n}", "func (rs *RequestSender) WithMethod(method string) *RequestSender {\n\trs.method = method\n\treturn rs\n}", "func AccessControlRequestMethod(value string) Option {\n\treturn setHeader(\"Access-Control-Request-Method\", value)\n}", "func (options *CreateLoadBalancerMonitorOptions) SetMethod(method string) *CreateLoadBalancerMonitorOptions {\n\toptions.Method = core.StringPtr(method)\n\treturn options\n}", "func testMethod(t *testing.T, r *http.Request, want string) {\n\tif got := r.Method; got != want {\n\t\tt.Errorf(\"Request method: %v, want %v\", got, want)\n\t}\n}", "func testMethod(t *testing.T, r *http.Request, want string) {\n\tif got := r.Method; got != want {\n\t\tt.Errorf(\"Request method: %v, want %v\", got, want)\n\t}\n}", "func OptPut() Option {\n\treturn func(r *Request) error {\n\t\tr.Method = \"PUT\"\n\t\treturn nil\n\t}\n}", "func Test_Ctx_Method(t *testing.T) {\n\tt.Parallel()\n\tfctx := &fasthttp.RequestCtx{}\n\tfctx.Request.Header.SetMethod(MethodGet)\n\tapp := New()\n\tctx := app.AcquireCtx(fctx)\n\tdefer app.ReleaseCtx(ctx)\n\tutils.AssertEqual(t, MethodGet, ctx.Method())\n\tctx.Method(MethodPost)\n\tutils.AssertEqual(t, MethodPost, ctx.Method())\n\n\tctx.Method(\"MethodInvalid\")\n\tutils.AssertEqual(t, MethodPost, ctx.Method())\n}", "func WithGetHandlerFunc(fn nethttp.HandlerFunc) Option {\n\treturn func(p *Protocol) error {\n\t\tif p == nil {\n\t\t\treturn fmt.Errorf(\"http GET handler func can not set nil protocol\")\n\t\t}\n\t\tp.GetHandlerFn = fn\n\t\treturn nil\n\t}\n}", "func (r *Router) Options(path string, handlers ...Handler) {\n\tr.addToRoutes(http.MethodOptions, path, handlers...)\n}", "func DefaultOptions(\n server Server,\n transactor *Transactor,\n methods []*RouteControllerHelper,\n) responses.Data {\n var methodStrings []string\n seenMethods := map[RequestMethod]bool{\n MethodOPTIONS: true,\n }\n for _, rch := range(methods) {\n for _, method := range rch.AllMethods() {\n if _, ok := seenMethods[method]; !ok {\n methodStrings = append(methodStrings, method.String())\n seenMethods[method] = true\n }\n }\n }\n sort.Strings(methodStrings)\n methodStrings = append([]string{MethodOPTIONS.String()}, methodStrings...)\n err := transactor.SetHeader(\n \"Access-Control-Allow-Methods\",\n strings.Join(methodStrings, \", \"),\n )\n if err != nil {\n return transactor.Abort(\n http.StatusInternalServerError,\n neterr.DefaultOptionsHeaderSetError,\n neterr.CodedErrorFromError(0, err),\n )\n }\n\n return transactor.Respond(http.StatusOK)\n}", "func AllowMethod(w http.ResponseWriter, m string, ms ...string) bool {\n\tfor _, meth := range ms {\n\t\tif m == meth {\n\t\t\treturn true\n\t\t}\n\t}\n\tw.Header().Set(\"Allow\", strings.Join(ms, \",\"))\n\thttp.Error(w, \"Method Not Allowed\", http.StatusMethodNotAllowed)\n\treturn false\n}", "func Verb(acceptedHTTPMethodgo string) middleware.Middleware {\n\treturn func(nextHandler http.HandlerFunc) http.HandlerFunc {\n\t\treturn func(response http.ResponseWriter, request *http.Request) {\n\t\t\tresponse.Header().Set(\"Content-Type\", \"application/json\")\n\n\t\t\tif request.Method != acceptedHTTPMethodgo {\n\t\t\t\tDispatchNewHttpError(response, fmt.Sprintf(\"Method %s is not allowed\", request.Method), http.StatusMethodNotAllowed)\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tnextHandler(response, request)\n\t\t}\n\t}\n}", "func (c *Client) HTTP(verb, spath string, ro *RequestOptions) (*http.Response, error) {\n\treq, err := c.Request(verb, spath, ro)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tres, err := dispose(c.HTTPClient.Do(req))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn res, nil\n}", "func OptPost() Option {\n\treturn func(r *Request) error {\n\t\tr.Method = \"POST\"\n\t\treturn nil\n\t}\n}", "func (gar *GetCarrierRequest) Method() string {\n\treturn http.MethodGet\n}", "func (r *Router) ServeHTTP(w http.ResponseWriter, req *http.Request) {\n\tif r.PanicHandler != nil {\n\t\tdefer r.recv(w, req)\n\t}\n\n\tpath := req.URL.Path\n\n\tif root := r.trees[req.Method]; root != nil {\n\t\tif handle, ps, tsr := root.getValue(path); handle != nil {\n\t\t\thandle(w, req, ps)\n\t\t\treturn\n\t\t} else if req.Method != http.MethodConnect && path != \"/\" {\n\t\t\tcode := 301 // Permanent redirect, request with GET method\n\t\t\tif req.Method != http.MethodGet {\n\t\t\t\t// Temporary redirect, request with same method\n\t\t\t\t// As of Go 1.3, Go does not support status code 308.\n\t\t\t\tcode = 307\n\t\t\t}\n\n\t\t\tif tsr && r.RedirectTrailingSlash {\n\t\t\t\tif len(path) > 1 && path[len(path)-1] == '/' {\n\t\t\t\t\treq.URL.Path = path[:len(path)-1]\n\t\t\t\t} else {\n\t\t\t\t\treq.URL.Path = path + \"/\"\n\t\t\t\t}\n\t\t\t\thttp.Redirect(w, req, req.URL.String(), code)\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\t// Try to fix the request path\n\t\t\tif r.RedirectFixedPath {\n\t\t\t\tfixedPath, found := root.findCaseInsensitivePath(\n\t\t\t\t\tCleanPath(path),\n\t\t\t\t\tr.RedirectTrailingSlash,\n\t\t\t\t)\n\t\t\t\tif found {\n\t\t\t\t\treq.URL.Path = string(fixedPath)\n\t\t\t\t\thttp.Redirect(w, req, req.URL.String(), code)\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\n\tif req.Method == http.MethodOptions && r.HandleOPTIONS {\n\t\t// Handle OPTIONS requests\n\t\tif allow := r.allowed(path, http.MethodOptions); allow != \"\" {\n\t\t\tw.Header().Set(\"Allow\", allow)\n\t\t\tif r.GlobalOPTIONS != nil {\n\t\t\t\tr.GlobalOPTIONS.ServeHTTP(w, req)\n\t\t\t}\n\t\t\treturn\n\t\t}\n\t} else if r.HandleMethodNotAllowed { // Handle 405\n\t\tif allow := r.allowed(path, req.Method); allow != \"\" {\n\t\t\tw.Header().Set(\"Allow\", allow)\n\t\t\tif r.MethodNotAllowed != nil {\n\t\t\t\tr.MethodNotAllowed.ServeHTTP(w, req)\n\t\t\t} else {\n\t\t\t\thttp.Error(w,\n\t\t\t\t\thttp.StatusText(http.StatusMethodNotAllowed),\n\t\t\t\t\thttp.StatusMethodNotAllowed,\n\t\t\t\t)\n\t\t\t}\n\t\t\treturn\n\t\t}\n\t}\n\n\t// Handle 404\n\tif r.NotFound != nil {\n\t\tr.NotFound.ServeHTTP(w, req)\n\t} else {\n\t\thttp.NotFound(w, req)\n\t}\n}", "func WithMethod(method Method) func(*Finder) {\n\treturn func(n *Finder) {\n\t\tn.method = method\n\t}\n}", "func HTTPMethodToString(method *pb.HttpMethod) string {\n\t// Check Unregistered first as Registered (being an enum) defaults\n\t// to GET when empty\n\tif method.GetUnregistered() != \"\" {\n\t\treturn method.GetUnregistered()\n\t}\n\treturn method.GetRegistered().String()\n}", "func (mm *Middleman) Options(path string, middleware Middleware) error {\r\n\treturn mm.addMiddleware(path, http.MethodOptions, middleware)\r\n}", "func WithRequestMethod(method string) RequestParam {\n\treturn requestParamFunc(func(b *requestBuilder) error {\n\t\tif method == \"\" {\n\t\t\treturn werror.Error(\"transport.RequestMethod: method can not be empty\")\n\t\t}\n\t\tb.method = strings.ToUpper(method)\n\t\treturn nil\n\t})\n}", "func (r *Router) OPTIONS(url string, viewFn View) *Path {\n\treturn r.Path(fasthttp.MethodOptions, url, viewFn)\n}", "func (h *HashRouter) ServeHTTP(w http.ResponseWriter, req *http.Request) {\n\tswitch req.Method {\n\tcase \"GET\":\n\t\th.get(w, req)\n\tcase \"POST\":\n\t\th.post(w, req)\n\tdefault:\n\t\tw.WriteHeader(http.StatusMethodNotAllowed)\n\t\tfmt.Fprint(w, \"Method not supported\")\n\t}\n}", "func ValidateMethod(r *http.Request, method string) (int, error) {\n\tif r.Method != method {\n\t\treturn http.StatusMethodNotAllowed, fmt.Errorf(\"Method not allowed\")\n\t}\n\n\t// No errors here!\n\treturn http.StatusOK, nil\n}", "func ClientWithMethods(methods []string) ClientOption {\n\treturn func(c *Client) error {\n\t\tfor _, method := range methods {\n\t\t\tif method != http.MethodGet && method != http.MethodPost {\n\t\t\t\treturn fmt.Errorf(\"invalid method %s\", method)\n\t\t\t}\n\t\t}\n\t\tc.methods = methods\n\t\treturn nil\n\t}\n}", "func WithRenderingMethod(a RenderingMethod) Option {\n\treturn &withRenderingMethod{a}\n}", "func FilterOptions(handler http.Handler) http.Handler {\n\treturn http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\tif r.Method != http.MethodOptions {\n\t\t\thandler.ServeHTTP(w, r)\n\t\t}\n\t})\n}", "func Options(url string, data ...interface{}) (*ClientResponse, error) {\n\treturn DoRequest(\"OPTIONS\", url, data...)\n}", "func (e *Expect) OPTIONS(path string, pathargs ...interface{}) *Request {\n\treturn e.Request(http.MethodOptions, path, pathargs...)\n}", "func (api *apiConfig) MethodGet(uri string, queryParams map[string]string, decodedResponse interface{}) error {\n\t// If an override was configured, use it instead.\n\tif api.methodGet != nil {\n\t\treturn api.methodGet(uri, queryParams, decodedResponse)\n\t}\n\n\t// Form the request to make to WebFlow.\n\treq, err := http.NewRequest(\"GET\", api.BaseURL+uri, nil)\n\tif err != nil {\n\t\treturn errors.New(fmt.Sprint(\"Unable to create a new http request\", err))\n\t}\n\n\t// Webflow needs to know the auth token and the version of their API to use.\n\treq.Header.Set(\"Authorization\", \"Bearer \"+api.Token)\n\treq.Header.Set(\"Accept-Version\", defaultVersion)\n\n\t// Set query parameters.\n\tif len(queryParams) > 0 {\n\t\tquery := req.URL.Query()\n\t\tfor key, val := range queryParams {\n\t\t\tquery.Add(key, val)\n\t\t}\n\t\treq.URL.RawQuery = query.Encode()\n\t}\n\n\t// Make the request.\n\tres, err := api.Client.Do(req)\n\tif err != nil {\n\t\treturn err\n\t}\n\t// TODO: read docs for ReaderCloser.Close() to determine what to do when it errors.\n\tdefer res.Body.Close()\n\n\t// Status codes of 200 to 299 are healthy; the rest are an error, redirect, etc.\n\tif res.StatusCode >= 300 || res.StatusCode < 200 {\n\t\terrResp := &GeneralError{}\n\t\tif err := json.NewDecoder(res.Body).Decode(errResp); err != nil {\n\t\t\treturn fmt.Errorf(\"Unknown API error; status code %d; error: %+v\", res.StatusCode, err)\n\t\t}\n\t\treturn errors.New(errResp.Err)\n\t}\n\n\tif err := json.NewDecoder(res.Body).Decode(decodedResponse); err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}", "func (pc *PolicyConfigurator) Method() string {\n\treturn http.MethodPost\n}", "func (r *AppsCheckAuthorizationReq) HTTPRequest(ctx context.Context, opt ...RequestOption) (*http.Request, error) {\n\treturn buildHTTPRequest(ctx, r, opt)\n}" ]
[ "0.7333683", "0.68030214", "0.67774516", "0.6632889", "0.6376519", "0.6234125", "0.59104705", "0.57998", "0.5736446", "0.5736446", "0.5700958", "0.56578785", "0.56539893", "0.5588941", "0.55525637", "0.5535288", "0.5522277", "0.5516675", "0.5490813", "0.5489231", "0.54805845", "0.5466961", "0.54269046", "0.5389769", "0.5385671", "0.5359885", "0.5359084", "0.53523695", "0.5311622", "0.53115296", "0.5292697", "0.5253341", "0.5235252", "0.5234908", "0.5220277", "0.5219571", "0.5177984", "0.51719046", "0.5152201", "0.51464856", "0.5144671", "0.5134448", "0.5130726", "0.5128874", "0.51221627", "0.51121175", "0.5089505", "0.5066802", "0.50573546", "0.5048443", "0.5046939", "0.5041407", "0.5039482", "0.50382143", "0.50348043", "0.50273615", "0.50131327", "0.50086194", "0.5005466", "0.49841002", "0.4981243", "0.49787518", "0.49692163", "0.49690542", "0.49241084", "0.4910632", "0.49071792", "0.48997745", "0.4899267", "0.4896351", "0.4891311", "0.4891036", "0.4886816", "0.4886816", "0.48782277", "0.48705333", "0.4869334", "0.48650134", "0.48579964", "0.48459318", "0.48350456", "0.4829439", "0.4821662", "0.48123217", "0.48011702", "0.4782129", "0.47815788", "0.47808278", "0.47751376", "0.4771229", "0.47705275", "0.4763205", "0.4762922", "0.47549918", "0.4750992", "0.47484767", "0.4744883", "0.4743809", "0.47408465", "0.47331223" ]
0.85374355
0
NewHTTPChecker creates a new HTTPChecker with a URL and optional configuration. Example: checker := healthz.NewHTTPChecker(" healthz.WithHTTPTimeout(3time.Second))
func NewHTTPChecker(url string, opts ...HTTPCheckerOption) *HTTPChecker { checker := &HTTPChecker{ url: url, method: http.MethodGet, } for _, opt := range opts { opt(checker) } return checker }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func NewHTTPCheck(name, endpoint string) (Check, error) {\n\tep, err := url.Parse(endpoint)\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\thc := &httpCheck{\n\t\tcheck: newCheck(name, ep.Hostname(), CheckTypeHTTP),\n\t\tURL: ep.Path,\n\t}\n\n\tif ep.Scheme == \"https\" {\n\t\thc.Encryption = true\n\t}\n\n\tif ep.User != nil {\n\t\tif ep.User.Username() != \"\" {\n\t\t\thc.Auth = ep.User.Username()\n\t\t}\n\n\t\tif pass, ok := ep.User.Password(); ok {\n\t\t\thc.Auth = hc.Auth + \":\" + pass\n\t\t}\n\t}\n\n\tif ep.Port() != \"\" {\n\t\thc.Port, err = strconv.Atoi(ep.Port())\n\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t} else if ep.Scheme == \"https\" {\n\t\thc.Port = 443\n\t}\n\n\treturn hc, nil\n}", "func WithHTTPTimeout(timeout time.Duration) HTTPCheckerOption {\n\treturn func(c *HTTPChecker) {\n\t\tc.timeout = timeout\n\t}\n}", "func NewHTTPCheckLister(indexer cache.Indexer) HTTPCheckLister {\n\treturn &hTTPCheckLister{indexer: indexer}\n}", "func (c *HTTPChecker) Check() error {\n\tclient := &http.Client{\n\t\tTimeout: c.timeout,\n\t}\n\n\treq, err := http.NewRequest(c.method, c.url, nil)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tresp, err := client.Do(req)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif resp.StatusCode != http.StatusOK {\n\t\treturn ErrCheckFailed\n\t}\n\n\treturn nil\n}", "func HTTPGetCheck(url string, timeout time.Duration) Check {\n\tclient := http.Client{\n\t\tTimeout: timeout,\n\t\t// never follow redirects\n\t\tCheckRedirect: func(*http.Request, []*http.Request) error {\n\t\t\treturn http.ErrUseLastResponse\n\t\t},\n\t}\n\treturn func() error {\n\t\tresp, err := client.Get(url)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tresp.Body.Close()\n\t\tif resp.StatusCode != 200 {\n\t\t\treturn fmt.Errorf(\"returned status %d\", resp.StatusCode)\n\t\t}\n\t\treturn nil\n\t}\n}", "func HTTPGetCheck(url string, timeout time.Duration) Check {\n\tclient := http.Client{\n\t\tTimeout: timeout,\n\t\t// never follow redirects\n\t\tCheckRedirect: func(*http.Request, []*http.Request) error {\n\t\t\treturn http.ErrUseLastResponse\n\t\t},\n\t}\n\treturn func() error {\n\t\tresp, err := client.Get(url)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tresp.Body.Close()\n\t\tif resp.StatusCode != 200 {\n\t\t\treturn fmt.Errorf(\"returned status %d\", resp.StatusCode)\n\t\t}\n\t\treturn nil\n\t}\n}", "func NewHTTPOptions(URL string) *HTTPOptions {\n\to := HTTPOptions{\n\t\tURL: URL,\n\t\tTickerInterval: 20,\n\t\tTimeout: 60,\n\t\tExpectedStatus: http.StatusOK,\n\t\tHeaders: make(map[string]string),\n\t}\n\treturn &o\n}", "func NewHTTPOptions(URL string) *HTTPOptions {\n\to := HTTPOptions{\n\t\tURL: URL,\n\t\tTickerInterval: 20,\n\t\tTimeout: 60,\n\t\tExpectedStatus: http.StatusOK,\n\t\tHeaders: make(map[string]string),\n\t}\n\treturn &o\n}", "func NewChecker(opts ...Option) *Checker {\n\topt := &options{\n\t\ttimeout: 10 * time.Second,\n\t}\n\n\tfor _, o := range opts {\n\t\to(opt)\n\t}\n\n\treturn &Checker{\n\t\ttimeout: opt.timeout,\n\t\tchecks: opt.checks,\n\t}\n}", "func TCPChecker(addr string, timeout time.Duration) health.Checker {\n\treturn health.CheckFunc(func() error {\n\t\tconn, err := net.DialTimeout(\"tcp\", addr, timeout)\n\t\tif err != nil {\n\t\t\treturn errors.New(\"connection to \" + addr + \" failed\")\n\t\t}\n\t\tconn.Close()\n\t\treturn nil\n\t})\n}", "func NewHealthchecker(log *logrus.Logger, hostname string) Healthchecker {\n\treturn &healthchecker{\n\t\tlog: log.WithField(\"service\", \"lookup\"),\n\t\thostname: hostname,\n\t}\n}", "func NewHealthCheck(opt ...Option) *HealthCheck {\n\topts := GetOpts(opt...)\n\n\th := &HealthCheck{\n\t\tstatus: &healthStatus{},\n\t}\n\tif e, ok := opts[optionWithEngine].(*gin.Engine); ok {\n\t\th.Engine = e\n\t}\n\tif path, ok := opts[optionWithHealthPath].(string); ok {\n\t\th.HealthPath = path\n\t} else {\n\t\th.HealthPath = \"/ready\"\n\t}\n\tif handler, ok := opts[optionWithHealthHandler].(gin.HandlerFunc); ok {\n\t\th.Handler = handler\n\t} else {\n\t\th.Handler = h.DefaultHealthHandler()\n\t}\n\n\tif ticker, ok := opts[optionHealthTicker].(*time.Ticker); ok {\n\t\th.metricTicker = ticker\n\t} else {\n\t\th.metricTicker = time.NewTicker(DefaultHealthTickerDuration)\n\t}\n\n\treturn h\n}", "func (s *hTTPCheckLister) HTTPChecks(namespace string) HTTPCheckNamespaceLister {\n\treturn hTTPCheckNamespaceLister{indexer: s.indexer, namespace: namespace}\n}", "func NewHealthChecker(o *IotxOverlay) *HealthChecker {\n\thc := &HealthChecker{Overlay: o}\n\thc.SilentInterval = o.Config.SilentInterval\n\treturn hc\n}", "func NewHealthChecker(registration Lifecycle, checks []healthcheck.Agent) *HealthChecker {\n\tif len(checks) == 0 {\n\t\tpanic(\"No health checks provided\")\n\t}\n\n\treturn &HealthChecker{\n\t\tstop: make(chan struct{}),\n\t\tagents: checks,\n\t\tregistration: registration,\n\t}\n}", "func New(strategy string, cfg config.HealthcheckConfig) *Healthcheck {\n\n\tcheck := registry[strategy]\n\n\t/* Create healthcheck */\n\n\th := Healthcheck{\n\t\tcheck: check,\n\t\tcfg: cfg,\n\t\tIn: make(chan []core.Target),\n\t\tOut: make(chan CheckResult),\n\t\tworkers: []*Worker{},\n\t\tstop: make(chan bool),\n\t}\n\n\treturn &h\n}", "func NewHTTP(cfg config.Config) *HTTP {\n\tclient := &http.Client{\n\t\tTimeout: cfg.Timeout,\n\t}\n\treturn &HTTP{\n\t\tclient: client,\n\t\tconfig: cfg,\n\t}\n}", "func TestAgent_HTTPCheck(t *testing.T) {\n\tt.Parallel()\n\tlogger := log.New(ioutil.Discard, \"\", 0)\n\tif testing.Verbose() {\n\t\tlogger = log.New(os.Stdout, \"[TestAgent_HTTPCheck] \", log.Lshortfile)\n\t}\n\tagent := func() *Agent {\n\t\treturn &Agent{\n\t\t\tlogger: logger,\n\t\t\tconfig: &Config{\n\t\t\t\tAdvertiseAddrs: &AdvertiseAddrs{HTTP: \"advertise:4646\"},\n\t\t\t\tnormalizedAddrs: &Addresses{HTTP: \"normalized:4646\"},\n\t\t\t\tConsul: &sconfig.ConsulConfig{\n\t\t\t\t\tChecksUseAdvertise: helper.BoolToPtr(false),\n\t\t\t\t},\n\t\t\t\tTLSConfig: &sconfig.TLSConfig{EnableHTTP: false},\n\t\t\t},\n\t\t}\n\t}\n\n\tt.Run(\"Plain HTTP Check\", func(t *testing.T) {\n\t\ta := agent()\n\t\tcheck := a.agentHTTPCheck(false)\n\t\tif check == nil {\n\t\t\tt.Fatalf(\"expected non-nil check\")\n\t\t}\n\t\tif check.Type != \"http\" {\n\t\t\tt.Errorf(\"expected http check not: %q\", check.Type)\n\t\t}\n\t\tif expected := \"/v1/agent/health?type=client\"; check.Path != expected {\n\t\t\tt.Errorf(\"expected %q path not: %q\", expected, check.Path)\n\t\t}\n\t\tif check.Protocol != \"http\" {\n\t\t\tt.Errorf(\"expected http proto not: %q\", check.Protocol)\n\t\t}\n\t\tif expected := a.config.normalizedAddrs.HTTP; check.PortLabel != expected {\n\t\t\tt.Errorf(\"expected normalized addr not %q\", check.PortLabel)\n\t\t}\n\t})\n\n\tt.Run(\"Plain HTTP + ChecksUseAdvertise\", func(t *testing.T) {\n\t\ta := agent()\n\t\ta.config.Consul.ChecksUseAdvertise = helper.BoolToPtr(true)\n\t\tcheck := a.agentHTTPCheck(false)\n\t\tif check == nil {\n\t\t\tt.Fatalf(\"expected non-nil check\")\n\t\t}\n\t\tif expected := a.config.AdvertiseAddrs.HTTP; check.PortLabel != expected {\n\t\t\tt.Errorf(\"expected advertise addr not %q\", check.PortLabel)\n\t\t}\n\t})\n\n\tt.Run(\"HTTPS\", func(t *testing.T) {\n\t\ta := agent()\n\t\ta.config.TLSConfig.EnableHTTP = true\n\n\t\tcheck := a.agentHTTPCheck(false)\n\t\tif check == nil {\n\t\t\tt.Fatalf(\"expected non-nil check\")\n\t\t}\n\t\tif !check.TLSSkipVerify {\n\t\t\tt.Errorf(\"expected tls skip verify\")\n\t\t}\n\t\tif check.Protocol != \"https\" {\n\t\t\tt.Errorf(\"expected https not: %q\", check.Protocol)\n\t\t}\n\t})\n\n\tt.Run(\"HTTPS + VerifyHTTPSClient\", func(t *testing.T) {\n\t\ta := agent()\n\t\ta.config.TLSConfig.EnableHTTP = true\n\t\ta.config.TLSConfig.VerifyHTTPSClient = true\n\n\t\tif check := a.agentHTTPCheck(false); check != nil {\n\t\t\tt.Fatalf(\"expected nil check not: %#v\", check)\n\t\t}\n\t})\n}", "func NewHTTPTester(addr, path string) *HTTPTester {\n\treturn &HTTPTester{\n\t\tclient: http.Client{},\n\t\turl: fmt.Sprintf(\"http://%s%s\", addr, path),\n\t}\n}", "func createHealthChecks(gatewayUrl string) healthcheck.Handler {\n\thealth := healthcheck.NewHandler()\n\n\thealth.AddReadinessCheck(\"FRITZ!Box connection\",\n\t\thealthcheck.HTTPGetCheck(gatewayUrl+\"/any.xml\", time.Duration(3)*time.Second))\n\n\thealth.AddLivenessCheck(\"go-routines\", healthcheck.GoroutineCountCheck(100))\n\treturn health\n}", "func (t *TestRuntime) HealthCheck(url string) error {\n\n\turl += \"/health\"\n\tif t.waitForBundles {\n\t\turl += \"?bundles\"\n\t}\n\n\treq, err := http.NewRequest(\"GET\", url, nil)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"unexpected error creating request: %s\", err)\n\t}\n\tresp, err := t.Client.Do(req)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"unexpected error: %s\", err)\n\t}\n\tif resp.StatusCode != http.StatusOK {\n\t\treturn fmt.Errorf(\"unexpected response: %d %s\", resp.StatusCode, resp.Status)\n\t}\n\treturn nil\n}", "func NewTCPChecker() *TCPChecker {\n\treturn &TCPChecker{}\n}", "func NewTCPChecker() *TCPChecker {\n\treturn &TCPChecker{}\n}", "func NewHTTPNotifier(URL string) (*HTTPNotifier, error) {\n\tif _, err := url.Parse(URL); err != nil {\n\t\treturn nil, cerrors.NewBadRequestError(\"could not create a notifier with an invalid URL\")\n\t}\n\n\tnotifier := &HTTPNotifier{url: URL}\n\thealth.RegisterHealthchecker(\"notifier\", notifier.Healthcheck)\n\n\treturn notifier, nil\n}", "func New(svc healthcheck.Service, groups ...*echo.Group) {\n\th := HTTP{svc}\n\tv1 := groups[0].Group(\"/healthcheck\")\n\tv2 := groups[1].Group(\"/healthcheck\")\n\n\t//define /V1/healtcheck methods\n\tv1.GET(\"/:value\", h.get)\n\tv1.POST(\"/paginationtest\", h.getPagedList)\n\tv1.POST(\"\", h.post)\n\n\t//define /V2/healtcheck methods\n\tv2.GET(\"/:value\", h.getV2)\n}", "func (c clientGRPC) HealthCheck(url *url.URL) error {\n\tconn, err := getConn(url.Host, c.options)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer conn.Close()\n\n\thealthClient := healthpb.NewHealthClient(conn)\n\tctx, cancel := context.WithTimeout(context.Background(), c.options.Timeout)\n\tdefer cancel()\n\n\thealth, err := healthClient.Check(ctx, &healthpb.HealthCheckRequest{})\n\tif err != nil {\n\t\treturn err\n\t}\n\tif health.Status != healthpb.HealthCheckResponse_SERVING {\n\t\treturn ErrServiceNotAvailable\n\t}\n\treturn nil\n}", "func CheckHTTP(url string, redirect, insecure bool, host string, timeout int, format, path, expectedValue, expression string) (string, int) {\n\tconst checkName = \"CheckHttp\"\n\tvar retCode int\n\tvar msg string\n\n\tacceptText, err := getAcceptText(format)\n\tif err != nil {\n\t\tmsg, _ = resultMessage(checkName, statusTextCritical, fmt.Sprintf(\"The format (--format) \\\"%s\\\" is not valid. The only valid value is \\\"json\\\".\", format))\n\n\t\treturn msg, 2\n\t}\n\n\tstatus, body, _ := statusCode(url, insecure, timeout, acceptText, host)\n\n\tretCode, responseStateText := evaluateStatusCode(status, redirect)\n\tresponseCode := strconv.Itoa(status)\n\n\tvar checkMsg = \"\"\n\tif retCode == 0 && len(format) > 0 && len(path) > 0 {\n\t\tvar queryValue string\n\n\t\tswitch {\n\t\tcase format == \"json\":\n\t\t\texpectedValueLen := len(expectedValue)\n\t\t\texpressionLen := len(expression)\n\n\t\t\tvalue := gojsonq.New().JSONString(body).Find(path)\n\n\t\t\tif value == nil {\n\t\t\t\tretCode = 2\n\t\t\t\tresponseStateText = statusTextCritical\n\t\t\t\tcheckMsg = fmt.Sprintf(\". No entry at path %s\", path)\n\t\t\t} else if expectedValueLen > 0 && expressionLen > 0 {\n\t\t\t\tretCode = 2\n\t\t\t\tresponseStateText = statusTextCritical\n\t\t\t\tcheckMsg = fmt.Sprintf(\". Both --expectedValue and --expression given but only one is used\")\n\t\t\t} else if expectedValueLen > 0 {\n\t\t\t\tqueryValue = fmt.Sprintf(\"%v\", value)\n\t\t\t\tretCode, responseStateText, checkMsg = evaluateExpectedValue(queryValue, expectedValue, path)\n\t\t\t} else if expressionLen > 0 {\n\t\t\t\tretCode, responseStateText, checkMsg = evaluateExpression(value, expression, path)\n\t\t\t} else {\n\t\t\t\tretCode = 2\n\t\t\t\tresponseStateText = statusTextCritical\n\t\t\t\tcheckMsg = fmt.Sprintf(\". --expectedValue or --expression not given\")\n\t\t\t}\n\t\t}\n\t}\n\n\tmsg, _ = resultMessage(checkName, responseStateText, fmt.Sprintf(\"Url %s responded with %s%s\", url, responseCode, checkMsg))\n\n\treturn msg, retCode\n}", "func (c *Client) UpdateHTTPCheck(check v1alpha1.HTTPCheck) error {\n\tname := getName(check)\n\n\tpc := pingdom.HttpCheck{\n\t\tName: name,\n\t\tUserIds: []int{c.userID},\n\t\tHostname: check.Spec.Hostname,\n\t\tResolution: check.Spec.IntervalMinutes,\n\t\tEncryption: check.Spec.EnableTLS,\n\t\tSendNotificationWhenDown: check.Spec.TriggerThreshold,\n\t\tNotifyAgainEvery: check.Spec.RetriggerThreshold,\n\t\tNotifyWhenBackup: check.Spec.NotifyWhenBackup,\n\t\tTags: heimdallrTag,\n\t\tIntegrationIds: check.Spec.IntegrationIDs,\n\t}\n\n\thc, ok := c.httpChecks[name]\n\tif ok {\n\t\t_, err := c.client.Checks().Update(hc.id, &pc)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"failed to update check: %v\", err)\n\t\t}\n\t\thc.spec = check.Spec\n\t\tc.logger.Info(\"successfully updated check\", zap.String(\"name\", hc.name))\n\t} else {\n\t\tres, err := c.client.Checks().Create(&pc)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"failed to create check: %v\", err)\n\t\t}\n\t\thc = httpCheck{\n\t\t\tid: res.ID,\n\t\t\tname: name,\n\t\t\tspec: check.Spec,\n\t\t}\n\t\tc.logger.Info(\"successfully created check\", zap.String(\"name\", hc.name))\n\t}\n\n\tc.httpChecks[name] = hc\n\treturn nil\n}", "func New(t *testing.T, baseURL string) *T {\n\tclient := &http.Client{\n\t\tTimeout: time.Second * 5,\n\t}\n\n\tu, err := url.Parse(baseURL)\n\trequire.NoError(t, err)\n\n\treturn &T{\n\t\tT: t,\n\t\tURL: u,\n\t\tClient: client,\n\t}\n}", "func New(client *http.Client, req *http.Request, check RespCheck, urls []*url.URL) *FastestURL {\n\tif client == nil {\n\t\tclient = http.DefaultClient\n\t}\n\tif req == nil {\n\t\treq = &http.Request{}\n\t}\n\tif check == nil {\n\t\tcheck = func(resp *http.Response) bool {\n\t\t\treturn resp.StatusCode == http.StatusOK\n\t\t}\n\t}\n\treturn &FastestURL{\n\t\tClient: client,\n\t\tURLs: urls,\n\t\tRequest: req,\n\t\tRespCheck: check,\n\t}\n}", "func (c *Check) probeHTTP() error {\n\tclient := http.Client{\n\t\tTimeout: c.Timeout,\n\t}\n\n\tres, err := client.Get(c.URL.String())\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer res.Body.Close()\n\n\tif res.StatusCode != 200 {\n\t\treturn fmt.Errorf(\"request returned %d expected code 200: %s\", res.StatusCode, res.Status)\n\t}\n\treturn nil\n}", "func New(opts ...Option) (*Health, error) {\n\th := &Health{\n\t\tchecks: make(map[string]Config),\n\t\ttp: trace.NewNoopTracerProvider(),\n\t\tmaxConcurrent: runtime.NumCPU(),\n\t}\n\n\tfor _, o := range opts {\n\t\tif err := o(h); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\treturn h, nil\n}", "func NewChecker(config *Config) (health.Checker, error) {\n\tif err := config.checkAndSetDefaults(); err != nil {\n\t\treturn nil, trace.Wrap(err)\n\t}\n\n\treturn &checker{\n\t\tConfig: config,\n\t\tFieldLogger: logrus.WithField(trace.Component, checkerID),\n\t}, nil\n}", "func (h HealthCheckerFunc) HealthCheck(target string, port uint16, proto string) (ok bool, err error) {\n\treturn h(target, port, proto)\n}", "func NewChecker(options ...Options) *Checker {\n\toption := Options{}\n\tif len(options) > 0 {\n\t\toption = options[0]\n\t}\n\treturn &Checker{\n\t\tJSONTag: option.JSONTag,\n\t\tDefinedChecks: map[string]Check{},\n\t}\n}", "func NewHealthCheck(rt *app.Runtime) operations.HealthCheckHandler {\n\treturn &healthCheck{}\n}", "func NewHTTP() *HTTPProbe {\n\treturn &HTTPProbe{}\n}", "func HealthCheck() http.Handler {\n\tmux := http.NewServeMux()\n\tmux.HandleFunc(\"/healthcheck\", func(w http.ResponseWriter, req *http.Request) {\n\t\tw.Write([]byte(\"OK\\n\"))\n\t})\n\treturn mux\n}", "func TestProbeHTTPSHTTP(t *testing.T) {\n\tserver := httptest.NewUnstartedServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\tfmt.Fprintln(w, \"Hello world\")\n\t}))\n\tserver.Start()\n\tdefer server.Close()\n\n\tregistry := prometheus.NewRegistry()\n\n\tctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)\n\tdefer cancel()\n\n\tif err := ProbeHTTPS(ctx, newTestLogger(), server.URL, config.Module{}, registry); err == nil {\n\t\tt.Fatalf(\"expected error, but err was nil\")\n\t}\n}", "func NewHTTP(port uint16, pachClientFactory func(ctx context.Context) *client.APIClient) *HTTP {\n\tmux := http.NewServeMux()\n\thandler := &Server{\n\t\tpachClientFactory: pachClientFactory,\n\t}\n\tmux.Handle(\"/archive/\", CSRFWrapper(handler))\n\tmux.Handle(\"/healthz\", http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\tw.WriteHeader(http.StatusOK)\n\t\tw.Write([]byte(\"healthy\\n\")) //nolint:errcheck\n\t}))\n\treturn &HTTP{\n\t\tmux: mux,\n\t\tserver: &http.Server{\n\t\t\tAddr: fmt.Sprintf(\":%d\", port),\n\t\t\tHandler: mux,\n\t\t},\n\t}\n}", "func HealthCheck(w http.ResponseWriter, r *http.Request) {}", "func (t *TCPChecker) Check(extConfig external.Check) *pkg.CheckResult {\n\tc := extConfig.(v1.TCPCheck)\n\taddr, port, err := extractAddrAndPort(c.Endpoint)\n\tif err != nil {\n\t\treturn Failf(c, err.Error())\n\t}\n\n\ttimeout := time.Millisecond * time.Duration(c.ThresholdMillis)\n\tconn, err := net.DialTimeout(\"tcp\", net.JoinHostPort(addr, port), timeout)\n\tif err != nil {\n\t\treturn Failf(c, \"Connection error: %s\", err.Error())\n\t}\n\tif conn != nil {\n\t\tdefer conn.Close()\n\t}\n\treturn Passf(c, \"Successfully opened: %s\", net.JoinHostPort(addr, port))\n}", "func New(config HealthCheckConfig) *HealthCheck {\n\treturn &HealthCheck{\n\t\tbroker: &kafkaBrokerConnection{},\n\t\tzookeeper: &zkConnection{},\n\t\trandSrc: rand.NewSource(time.Now().UnixNano()),\n\t\tconfig: config,\n\t}\n}", "func NewChecker(_conf *domain.Config, servers []*domain.Server) (*HealthChecker, error) {\n\tif len(servers) == 0 {\n\t\treturn nil, errors.New(\"A server list expected, gotten an empty list\")\n\t}\n\treturn &HealthChecker{\n\t\tservers: servers,\n\t}, nil\n}", "func New(ssl bool, server string, port int, vhost, user, password string) *Check {\n\treturn &Check{\n\t\tserver: server,\n\t\tport: port,\n\t\tvhost: vhost,\n\t\tssl: ssl,\n\t\tuser: user,\n\t\tpassword: password,\n\t}\n}", "func Check(t ErrorReporter) func() {\n\thttpTransport, _ := http.DefaultTransport.(*http.Transport)\n\treturn CheckWithTransport(t, httpTransport)\n}", "func HTTPHealthChecks(namespace string, includePxL bool) []*pb.HealthCheck {\n\tchecks := []*pb.HealthCheck{\n\t\t{\n\t\t\tCheckType: &pb.HealthCheck_K8S{\n\t\t\t\tK8S: &pb.K8SPodsReadyCheck{\n\t\t\t\t\tNamespace: namespace,\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n\tif includePxL {\n\t\tt, err := template.New(\"\").Parse(httpHealthCheckScript)\n\t\tif err != nil {\n\t\t\tlog.WithError(err).Fatal(\"failed to parse HTTP healthcheck script\")\n\t\t}\n\t\tbuf := &strings.Builder{}\n\t\terr = t.Execute(buf, &struct {\n\t\t\tNamespace string\n\t\t}{\n\t\t\tNamespace: namespace,\n\t\t})\n\t\tif err != nil {\n\t\t\tlog.WithError(err).Fatal(\"failed to execute HTTP healthcheck template\")\n\t\t}\n\t\tchecks = append(checks, &pb.HealthCheck{\n\t\t\tCheckType: &pb.HealthCheck_PxL{\n\t\t\t\tPxL: &pb.PxLHealthCheck{\n\t\t\t\t\tScript: buf.String(),\n\t\t\t\t\tSuccessColumn: \"success\",\n\t\t\t\t},\n\t\t\t},\n\t\t})\n\t}\n\n\treturn checks\n}", "func HTTP(healthService string, httpTimeout time.Duration) bool {\n\tclient := http.Client{\n\t\tTimeout: httpTimeout,\n\t}\n\n\tresp, err := client.Get(healthService)\n\t// Check if response timeouts or returns an HTTP error\n\tif err != nil {\n\t\treturn false\n\t}\n\tdefer resp.Body.Close()\n\tbytes, err := ioutil.ReadAll(resp.Body)\n\tif err != nil {\n\t\treturn false\n\t}\n\n\tif strings.Contains(string(bytes), \"healthy\") {\n\t\treturn true\n\t}\n\n\treturn false\n}", "func New(url string, options ...Option) (*Client, error) {\n\topts := newOptions()\n\tfor _, option := range options {\n\t\toption(opts)\n\t}\n\n\t// Setup the HTTP client\n\thttpClient, err := tlsHTTPClient(opts)\n\tif err != nil {\n\t\treturn nil, errors.WithStack(err)\n\t}\n\n\t// Initialize the client struct\n\treturn &Client{\n\t\thttpCertificate: opts.tlsServerCert,\n\t\thttpHost: url,\n\t\thttpProtocol: \"https\",\n\t\thttpUserAgent: opts.userAgent,\n\t\thttp: httpClient,\n\t\tlogger: opts.logger,\n\t}, nil\n}", "func NewHealthcheck(name string, metric metrics.Healthcheck) interface {\n\tprometheus.Collector\n\tmetrics.Healthcheck\n} {\n\treturn healthcheck{\n\t\tHealthcheck: metric,\n\t\tgaugeAdapter: gaugeAdapter{\n\t\t\tmetric: func(snapshot interface{}) float64 {\n\t\t\t\tif snapshot.(metrics.Healthcheck).Error() != nil {\n\t\t\t\t\treturn 0\n\t\t\t\t}\n\t\t\t\treturn 1\n\t\t\t},\n\t\t\tsnapshot: func() interface{} {\n\t\t\t\treturn metric\n\t\t\t},\n\t\t\tdescription: newDescriptionFrom(name),\n\t\t},\n\t}\n}", "func NewCheckWithConfig(name string, checker Checker, logger *log.Entry, conf *config.Config) *Check {\n\tc := &Check{\n\t\tName: name,\n\t\tLogger: logger,\n\t\tconfig: conf,\n\t}\n\tc.ctx, c.cancel = context.WithCancel(context.Background())\n\tpushLogger := logging.BuildRootLogWithNameAndConfig(\"sdk.restPusher\", conf, name)\n\tpusher := rest.NewPusher(conf.Push, conf.Check.CheckID, pushLogger)\n\tr := agent.NewReportFromConfig(conf.Check)\n\tstateLogger := logging.BuildRootLogWithNameAndConfig(\"sdk.pushState\", conf, name)\n\tagentState := agent.State{Report: r}\n\tc.checkState = newState(agentState, pusher, stateLogger)\n\tc.api = newPushAPI(logger, c)\n\t// Initialize a sync point for goroutines to wait for the checker run method\n\t// to be finished, for instance a call to an abort method should wait in this sync point.\n\tc.checkerFinished = &sync.WaitGroup{}\n\tc.checker = checker\n\tc.Logger.Debug(\"New check created\")\n\treturn c\n}", "func healthcheck(ha *lib.HTTPAdapter) {\n\tr := mux.NewRouter()\n\tr.HandleFunc(\"/health\", ha.HealthHandler).Methods(\"GET\")\n\taddr := fmt.Sprintf(\"127.0.0.1:%v\", serverConfig.HealthcheckHTTPport)\n\tserver := &http.Server{Addr: addr, Handler: r}\n\tlogger.Printf(\"HTTP healthcheck listening on: %v\", addr)\n\tlogger.Println(server.ListenAndServe())\n}", "func (c HTTPGetHealthcheck) Execute() Result {\n\tinput := struct {\n\t\tURL string `json:\"url\"`\n\t}{\n\t\thttp.CleanURL(c.URL),\n\t}\n\n\tclient := http.NewClient(c.URL)\n\n\tstart := time.Now()\n\tresp, err := client.Get(\"\")\n\tend := time.Now()\n\n\tif err != nil {\n\t\treturn FailWithInput(err.Error(), input)\n\t}\n\n\tcontext := HTTPExpectationContext{\n\t\tResponse: resp,\n\t\tResponseTime: end.Sub(start),\n\t}\n\n\treturn c.VerifyExpectation(input, func(assertion interface{}) []*AssertionGroup {\n\t\treturn assertion.(HTTPResponseExpectation).Verify(context)\n\t})\n}", "func NewTCPHealthCheckerWithOptions(options *TCPHealthCheckOptions) HealthChecker {\n\treturn func() error {\n\t\tconn, err := net.DialTimeout(\"tcp\", options.Hostname+\":\"+strconv.Itoa(options.Port), options.Timeout)\n\t\tif err != nil {\n\t\t\treturn errors.Wrapf(err, \"tcp connection to %s:%s failed\", options.Hostname, options.Port)\n\t\t}\n\t\tdefer conn.Close()\n\t\treturn nil\n\t}\n}", "func HttpCheckHealthHandler(w http.ResponseWriter, r *http.Request) {\n\tresp, err := http.Get(\"http://reddit.com/r/golang.json\") //insert json-object here\n\tif err != nil {\n\t\tfmt.Println(\"Error: %g\", err)\n\t}\n\tfmt.Fprintf(w, \"<h1>Health Status</h1>\\nStatus: %s\", resp.Status)\n}", "func TestAgent_HTTPCheckPath(t *testing.T) {\n\tt.Parallel()\n\t// Agent.agentHTTPCheck only needs a config and logger\n\ta := &Agent{\n\t\tconfig: DevConfig(),\n\t\tlogger: log.New(ioutil.Discard, \"\", 0),\n\t}\n\tif err := a.config.normalizeAddrs(); err != nil {\n\t\tt.Fatalf(\"error normalizing config: %v\", err)\n\t}\n\tif testing.Verbose() {\n\t\ta.logger = log.New(os.Stderr, \"\", log.LstdFlags)\n\t}\n\n\t// Assert server check uses /v1/agent/health?type=server\n\tisServer := true\n\tcheck := a.agentHTTPCheck(isServer)\n\tif expected := \"Nomad Server HTTP Check\"; check.Name != expected {\n\t\tt.Errorf(\"expected server check name to be %q but found %q\", expected, check.Name)\n\t}\n\tif expected := \"/v1/agent/health?type=server\"; check.Path != expected {\n\t\tt.Errorf(\"expected server check path to be %q but found %q\", expected, check.Path)\n\t}\n\n\t// Assert client check uses /v1/agent/health?type=client\n\tisServer = false\n\tcheck = a.agentHTTPCheck(isServer)\n\tif expected := \"Nomad Client HTTP Check\"; check.Name != expected {\n\t\tt.Errorf(\"expected client check name to be %q but found %q\", expected, check.Name)\n\t}\n\tif expected := \"/v1/agent/health?type=client\"; check.Path != expected {\n\t\tt.Errorf(\"expected client check path to be %q but found %q\", expected, check.Path)\n\t}\n}", "func NewHealthCheck(rt *permissions.Runtime) operations.HealthCheckHandler {\n\treturn &healthCheck{}\n}", "func NewCheck() Check {\n\treturn Check{\n\t\tInterval: 60,\n\t\tquitChan: make(chan bool),\n\t}\n}", "func NewHealthCheck() jrpc2.Handler {\n\treturn handler.New(func(context.Context) HealthCheckResult {\n\t\treturn HealthCheckResult{Status: \"healthy\"}\n\t})\n}", "func HTTPClientWithTLSConfig(conf *tls.Config) *http.Client {\n\treturn &http.Client{\n\t\tTimeout: time.Second * 20,\n\t\tTransport: &http.Transport{\n\t\t\tDial: (&net.Dialer{\n\t\t\t\tTimeout: 5 * time.Second,\n\t\t\t}).Dial,\n\t\t\tTLSHandshakeTimeout: 5 * time.Second,\n\t\t\tTLSClientConfig: conf,\n\t\t},\n\t}\n}", "func New(mux *http.ServeMux, opts ...func(*Healthz)) (*Healthz, error) {\n\tif mux == nil {\n\t\treturn nil, errors.New(\"you must provide a http.ServeMux\")\n\t}\n\n\th := &Healthz{\n\t\talive: unhealthy,\n\t\tready: unhealthy,\n\t\tStatus: unhealthy,\n\t}\n\n\tfor _, opt := range opts {\n\t\topt(h)\n\t}\n\n\t// path.Join didn't work for the indexHandler here so...\n\tmux.HandleFunc(rootPrefix+\"/\", indexHandler())\n\tmux.HandleFunc(path.Join(rootPrefix, \"ready\"), h.readyHandler())\n\tmux.HandleFunc(path.Join(rootPrefix, \"alive\"), h.livelinessHandler())\n\tmux.HandleFunc(path.Join(rootPrefix, \"stats\"), h.statsHandler())\n\n\tmux.HandleFunc(path.Join(rootPrefix, \"diediedie\"), h.dieDieDieHandler())\n\tmux.HandleFunc(path.Join(rootPrefix, \"abortabortabort\"), h.abortAbortAbortHandler())\n\n\t// Add pprof outside of the default HTTP ServeMux\n\t//\n\t// path.Join doesn't work here and causes bad path to be created on the resulting\n\t// index page from the net/http/pprof package\n\tmux.HandleFunc(rootPrefix+\"/pprof/\", fakePProfIndexHandler())\n\tmux.HandleFunc(path.Join(rootPrefix, \"pprof/cmdline\"), pprof.Cmdline)\n\tmux.HandleFunc(path.Join(rootPrefix, \"pprof/profile\"), pprof.Profile)\n\tmux.HandleFunc(path.Join(rootPrefix, \"pprof/symbol\"), pprof.Symbol)\n\tmux.HandleFunc(path.Join(rootPrefix, \"pprof/trace\"), pprof.Trace)\n\n\treturn h, nil\n}", "func NewHTTPObj(u *url.URL) *HTTPBckObj {\n\thbo := &HTTPBckObj{\n\t\tBck: Bck{\n\t\t\tProvider: ProviderHTTP,\n\t\t\tNs: NsGlobal,\n\t\t},\n\t}\n\thbo.OrigURLBck, hbo.ObjName = filepath.Split(u.Path)\n\thbo.OrigURLBck = u.Scheme + \"://\" + u.Host + hbo.OrigURLBck\n\thbo.Bck.Name = OrigURLBck2Name(hbo.OrigURLBck)\n\treturn hbo\n}", "func CreateUptimeCheck(domain string, checkrate, timeout, confirmation, virus, donotfind, realbrowser, trigger, sslalert, follow int, contacts, testType, findstring, api, user, key string) bool {\n\ttarget, err := url.Parse(domain)\n\tif err != nil {\n\t\tfmt.Println(\"Please make sure to enter a valid domain (e.g https://www.domain.com)\")\n\t\treturn false\n\t}\n\tif target.Scheme == \"\" {\n\t\tfmt.Printf(\"Please add url scheme http/https to your domain %v\\n\", domain)\n\t\treturn false\n\t}\n\n\tp := url.Values{}\n\tp.Add(\"WebsiteName\", domain)\n\tp.Add(\"WebsiteURL\", domain)\n\tp.Add(\"CheckRate\", strconv.Itoa(checkrate))\n\tp.Add(\"Timeout\", strconv.Itoa(timeout))\n\tp.Add(\"Confirmation\", strconv.Itoa(confirmation))\n\tp.Add(\"Virus\", strconv.Itoa(virus))\n\tp.Add(\"RealBrowser\", strconv.Itoa(realbrowser))\n\tp.Add(\"TriggerRate\", strconv.Itoa(trigger))\n\tif testType == \"HTTP\" {\n\t\tp.Add(\"EnableSSLAlert\", strconv.Itoa(sslalert))\n\t\tp.Add(\"FollowRedirect\", strconv.Itoa(follow))\n\t}\n\tp.Add(\"ContactGroup\", contacts)\n\tp.Add(\"TestType\", testType)\n\tif len(findstring) > 0 {\n\t\tp.Add(\"FindString\", findstring)\n\t\tp.Add(\"DoNotFind\", strconv.Itoa(donotfind))\n\t}\n\tpayload := strings.NewReader(p.Encode())\n\n\tclient := &http.Client{}\n\trequest, _ := http.NewRequest(\"PUT\", api+\"/API/Tests/Update\", payload)\n\trequest.Header.Add(\"Username\", user)\n\trequest.Header.Add(\"API\", key)\n\trequest.Header.Add(\"Content-Type\", \"application/x-www-form-urlencoded\")\n\n\tresp, err := client.Do(request)\n\tif err != nil {\n\t\tlog.Println(err)\n\t\treturn false\n\t}\n\tdefer resp.Body.Close()\n\n\t// debug\n\t// responseBody, _ := ioutil.ReadAll(resp.Body)\n\t// log.Println(string(responseBody))\n\n\tif resp.StatusCode != 200 {\n\t\tmessage := helpers.ResolveStatusCode(resp.StatusCode)\n\t\tlog.Println(message)\n\t\treturn false\n\t}\n\treturn true\n}", "func RunHealthCheck(url string) (string, error) {\n\tvar (\n\t\tresp *http.Response\n\t\terr error\n\t)\n\n\tlastStatusCode, attempts := -1, 10\n\tb := &backoff.Backoff{\n\t\tFactor: 2,\n\t\tJitter: true,\n\t\tMin: 100 * time.Millisecond,\n\t\tMax: 5 * time.Second,\n\t}\n\n\tfor i := 0; i < attempts; i++ {\n\t\tresp, err = http.Get(url)\n\t\tif err == nil {\n\t\t\tlastStatusCode = resp.StatusCode\n\t\t}\n\t\tif lastStatusCode == http.StatusOK {\n\t\t\tbreak\n\t\t}\n\n\t\ttime.Sleep(b.Duration())\n\t}\n\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tbody, err := io.ReadAll(resp.Body)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\treturn string(body), nil\n}", "func NewHTTPClientWithTimeout(t time.Duration) *http.Client {\n\ttr := &http.Transport{\n\t\t// Added IdleConnTimeout to reduce the time of idle connections which\n\t\t// could potentially slow macOS reconnection when there is a sudden\n\t\t// network disconnection/issue\n\t\tIdleConnTimeout: t,\n\t\tProxy: http.ProxyFromEnvironment,\n\t}\n\th := &http.Client{\n\t\tTransport: tr,\n\t\tTimeout: t}\n\treturn h\n}", "func Check(\n\thealthCheckFunc func() bool,\n\tpollDelay time.Duration,\n\thealthTimeout time.Duration,\n\tupdates chan<- bool,\n\tquit <-chan struct{},\n) {\n\tgo check(healthCheckFunc, pollDelay,\n\t\thealthTimeout, updates, quit)\n}", "func NewWebhookChecker(period time.Duration, labelSelectors, annotationSelectors []string, kubeconfigPath string, e *exporters.WebhookExporter) *PeriodicWebhookChecker {\n\treturn &PeriodicWebhookChecker{\n\t\tperiod: period,\n\t\tlabelSelectors: labelSelectors,\n\t\tannotationSelectors: annotationSelectors,\n\t\tkubeconfigPath: kubeconfigPath,\n\t\texporter: e,\n\t}\n}", "func NewHTTPFetcher(ctx context.Context, logger *logr.Logger, reqTimeout time.Duration) Fetcher {\n\treturn &httpFetcher{\n\t\tctx,\n\t\tlogger,\n\t\t&http.Client{Timeout: reqTimeout},\n\t}\n}", "func NewHTTPObj(u *url.URL) *HTTPBckObj {\n\thbo := &HTTPBckObj{\n\t\tBck: Bck{\n\t\t\tProvider: ProviderHTTP,\n\t\t\tNs: NsGlobal,\n\t\t},\n\t}\n\thbo.OrigURLBck, hbo.ObjName = filepath.Split(u.Path)\n\thbo.OrigURLBck = u.Scheme + \"://\" + u.Host + hbo.OrigURLBck\n\thbo.Bck.Name = cos.OrigURLBck2Name(hbo.OrigURLBck)\n\treturn hbo\n}", "func NewClient(uri string, requestTimeout time.Duration) Client {\n\treturn &client{\n\t\trequester: rpc.NewEndpointRequester(uri, \"/ext/health\", \"health\", requestTimeout),\n\t}\n}", "func NewHealthz() *Healthz {\n\treturn &Healthz{\n\t\tchecks: make(map[string]HealthCheck),\n\t}\n}", "func KubeletHealth(addr string) health.Checker {\n\treturn NewHTTPHealthzChecker(\"kubelet\", fmt.Sprintf(\"%v/healthz\", addr), kubeHealthz)\n}", "func New(logger *zap.Logger) *HTTP {\n\th := &HTTP{\n\t\tlogger: logger,\n\t\tcreateClientFunc: createHTTPClient,\n\t}\n\n\treturn h\n}", "func FromString(checkType string) (Check, error) {\n\tswitch checkType {\n\tcase \"http\":\n\t\treturn &HTTPCheck{}, nil\n\t}\n\treturn nil, fmt.Errorf(\"no such type: %s\", checkType)\n}", "func HealthCheck(w http.ResponseWriter, r *http.Request, ps httprouter.Params) {\n\thttputil.SendOK(w)\n}", "func NewHTTP(config *Config, logger kitlog.Logger) *HTTP {\n\tlogger = kitlog.With(logger, \"module\", \"http\")\n\n\tsrv := &http.Server{\n\t\tAddr: config.Addr,\n\t\tReadTimeout: time.Duration(config.ServerTimeout) * time.Second,\n\t\tWriteTimeout: time.Duration(2*config.ServerTimeout) * time.Second,\n\t}\n\n\treturn &HTTP{\n\t\tlogger: logger,\n\t\tsrv: srv,\n\t\tConfig: config,\n\t}\n}", "func New() *keePassHTTP {\n\tkph := new(keePassHTTP)\n\n\t// replace mock fields\n\tkph.httpClient = &http.Client{Timeout: time.Second * 30}\n\tkph.randBytes = new(aes256CBCPksc7).randBytes\n\n\treturn kph\n}", "func WithHTTPMethod(method string) HTTPCheckerOption {\n\treturn func(c *HTTPChecker) {\n\t\tc.method = method\n\t}\n}", "func (t *TCPChecker) Check(ctx *context.Context, extConfig external.Check) pkg.Results {\n\tc := extConfig.(v1.TCPCheck)\n\tresult := pkg.Success(c, ctx.Canary)\n\tvar results pkg.Results\n\tresults = append(results, result)\n\n\tif connection, err := duty.FindConnectionByURL(ctx, db.Gorm, c.Endpoint); err != nil {\n\t\treturn results.Failf(\"failed to find TCP endpoint from connection %q: %v\", c.Endpoint, err)\n\t} else if connection != nil {\n\t\tc.Endpoint = connection.URL\n\t}\n\n\taddr, port, err := extractAddrAndPort(c.Endpoint)\n\tif err != nil {\n\t\treturn results.ErrorMessage(err)\n\t}\n\n\ttimeout := time.Millisecond * time.Duration(c.ThresholdMillis)\n\tconn, err := net.DialTimeout(\"tcp\", net.JoinHostPort(addr, port), timeout)\n\tif err != nil {\n\t\treturn results.Failf(\"Connection error: %s\", err)\n\t}\n\tif conn != nil {\n\t\tdefer conn.Close()\n\t}\n\treturn results\n}", "func (c *HTTPChecker) Run(ctx *context.Context) pkg.Results {\n\tvar results pkg.Results\n\tfor _, conf := range ctx.Canary.Spec.HTTP {\n\t\tresults = append(results, c.Check(ctx, conf)...)\n\t}\n\treturn results\n}", "func (checker *Checker) CheckHealth() (toReturn HealthCheckResponse, err error) {\n\thttpClient, err := gohclient.New(nil, checker.TargetHealthURL)\n\n\thttpResp, data, err := httpClient.Get(\"\")\n\n\tif httpResp != nil {\n\t\tif httpResp.StatusCode == http.StatusOK {\n\t\t\tif err == nil {\n\t\t\t\tif err = json.Unmarshal(data, &toReturn); err == nil {\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\terr = fmt.Errorf(\"Health Check '%v': Unable to read response\", checker.TargetHealthURL)\n\t\t\t} else {\n\t\t\t\terr = fmt.Errorf(\"Health Check '%v': Unable to communicate\", checker.TargetHealthURL)\n\t\t\t}\n\t\t} else {\n\t\t\terr = fmt.Errorf(\"Health Check '%v': Not 200 OK; Getting %v\", checker.TargetHealthURL, httpResp.StatusCode)\n\t\t}\n\t} else {\n\t\terr = fmt.Errorf(\"Health Check '%v': Not possible to communicate with server: %v\", checker.TargetHealthURL, err)\n\t}\n\n\treturn\n}", "func NewCheck(c web.C, w http.ResponseWriter, req *http.Request) {\n\tuser, err := helpers.CurrentUser(c)\n\n\tif err != nil {\n\t\trenderError(c, w, req, \"You need to re-authenticate\", http.StatusUnauthorized)\n\t\treturn\n\t}\n\n\tchecksCount, err := user.ChecksCount()\n\n\ttemplates := render.GetBaseTemplates()\n\ttemplates = append(templates, \"web/views/new_check.html\")\n\tcsrf := nosurf.Token(req)\n\terr = render.Template(c, w, req, templates, \"layout\", map[string]interface{}{\"Title\": \"New Check\", \"CSRFToken\": csrf, \"ChecksCount\": checksCount})\n\tif err != nil {\n\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t}\n}", "func NewProxyHealth(origin *url.URL) *ProxyHealth {\n\th := &ProxyHealth{\n\t\torigin: origin,\n\t\tcheck: defaultHealthCheck,\n\t\tperiod: defaultHealthCheckPeriod,\n\t\tcancel: make(chan struct{}),\n\t\tisAvailable: defaultHealthCheck(origin),\n\t}\n\th.run()\n\n\treturn h\n}", "func NewHTTPOutlet(s *Shuttle) *HTTPOutlet {\n\treturn &HTTPOutlet{\n\t\tdrops: s.Drops,\n\t\tlost: s.Lost,\n\t\tlostMark: int(float64(s.config.BackBuff) * DepthHighWatermark),\n\t\tinbox: s.Batches,\n\t\tconfig: s.config,\n\t\tnewFormatterFunc: s.NewFormatterFunc,\n\t\tuserAgent: fmt.Sprintf(\"log-shuttle/%s (%s; %s; %s; %s)\", s.config.ID, runtime.Version(), runtime.GOOS, runtime.GOARCH, runtime.Compiler),\n\t\terrLogger: s.ErrLogger,\n\t\tLogger: s.Logger,\n\t\tclient: &http.Client{\n\t\t\tTimeout: s.config.Timeout,\n\t\t\tTransport: &http.Transport{\n\t\t\t\tTLSClientConfig: &tls.Config{\n\t\t\t\t\tInsecureSkipVerify: s.config.SkipVerify,\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\tinboxLengthGauge: metrics.GetOrRegisterGauge(\"outlet.inbox.length\", s.MetricsRegistry),\n\t\tpostSuccessTimer: metrics.GetOrRegisterTimer(\"outlet.post.success\", s.MetricsRegistry),\n\t\tpostFailureTimer: metrics.GetOrRegisterTimer(\"outlet.post.failure\", s.MetricsRegistry),\n\t\tmsgLostCount: metrics.GetOrRegisterCounter(\"msg.lost\", s.MetricsRegistry),\n\t}\n}", "func NewChecker(hash, salt string) Hash {\n\treturn Hash{\n\t\tSaltLength: DefaultSaltLength,\n\t\tHashFn: sha256.New(),\n\t\tHash: hash,\n\t\tSalt: salt,\n\t}\n}", "func (s *healthchecker) Healthcheck(r *http.Request) (interface{}, error) {\n\tl := s.log.WithField(\"handler\", \"Healthcheck\")\n\tl.Debug(\"New Healthcheck request received\")\n\tl.Debug(\"Returning newly generated Healthcheck\")\n\treturn &healthcheck{Status: \"OK\", Hostname: s.hostname}, nil\n}", "func (a *DefaultApiService) HealthCheck(ctx _context.Context) (*_nethttp.Response, error) {\n\tvar (\n\t\tlocalVarHTTPMethod = _nethttp.MethodGet\n\t\tlocalVarPostBody interface{}\n\t\tlocalVarFormFileName string\n\t\tlocalVarFileName string\n\t\tlocalVarFileBytes []byte\n\t)\n\n\t// create path and map variables\n\tlocalVarPath := a.client.cfg.BasePath + \"/health\"\n\tlocalVarHeaderParams := make(map[string]string)\n\tlocalVarQueryParams := _neturl.Values{}\n\tlocalVarFormParams := _neturl.Values{}\n\n\t// to determine the Content-Type header\n\tlocalVarHTTPContentTypes := []string{}\n\n\t// set Content-Type header\n\tlocalVarHTTPContentType := selectHeaderContentType(localVarHTTPContentTypes)\n\tif localVarHTTPContentType != \"\" {\n\t\tlocalVarHeaderParams[\"Content-Type\"] = localVarHTTPContentType\n\t}\n\n\t// to determine the Accept header\n\tlocalVarHTTPHeaderAccepts := []string{}\n\n\t// set Accept header\n\tlocalVarHTTPHeaderAccept := selectHeaderAccept(localVarHTTPHeaderAccepts)\n\tif localVarHTTPHeaderAccept != \"\" {\n\t\tlocalVarHeaderParams[\"Accept\"] = localVarHTTPHeaderAccept\n\t}\n\tr, err := a.client.prepareRequest(ctx, localVarPath, localVarHTTPMethod, localVarPostBody, localVarHeaderParams, localVarQueryParams, localVarFormParams, localVarFormFileName, localVarFileName, localVarFileBytes)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tlocalVarHTTPResponse, err := a.client.callAPI(r)\n\tif err != nil || localVarHTTPResponse == nil {\n\t\treturn localVarHTTPResponse, err\n\t}\n\n\tlocalVarBody, err := _ioutil.ReadAll(localVarHTTPResponse.Body)\n\tlocalVarHTTPResponse.Body.Close()\n\tif err != nil {\n\t\treturn localVarHTTPResponse, err\n\t}\n\n\tif localVarHTTPResponse.StatusCode >= 300 {\n\t\tnewErr := GenericOpenAPIError{\n\t\t\tbody: localVarBody,\n\t\t\terror: localVarHTTPResponse.Status,\n\t\t}\n\t\treturn localVarHTTPResponse, newErr\n\t}\n\n\treturn localVarHTTPResponse, nil\n}", "func NewHTTPExecuter(options *HTTPOptions) (*HTTPExecuter, error) {\n\tvar (\n\t\tproxyURL *url.URL\n\t\terr error\n\t)\n\n\tif options.ProxyURL != \"\" {\n\t\tproxyURL, err = url.Parse(options.ProxyURL)\n\t}\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// Create the HTTP Client\n\tclient := makeHTTPClient(proxyURL, options)\n\t// nolint:bodyclose // false positive there is no body to close yet\n\tclient.CheckRetry = retryablehttp.HostSprayRetryPolicy()\n\n\tif options.CookieJar != nil {\n\t\tclient.HTTPClient.Jar = options.CookieJar\n\t} else if options.CookieReuse {\n\t\tjar, err := cookiejar.New(nil)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tclient.HTTPClient.Jar = jar\n\t}\n\n\t// initiate raw http client\n\trawClient := rawhttp.NewClient(rawhttp.DefaultOptions)\n\n\texecuter := &HTTPExecuter{\n\t\tdebug: options.Debug,\n\t\tjsonOutput: options.JSON,\n\t\tjsonRequest: options.JSONRequests,\n\t\tnoMeta: options.NoMeta,\n\t\thttpClient: client,\n\t\trawHTTPClient: rawClient,\n\t\ttraceLog: options.TraceLog,\n\t\ttemplate: options.Template,\n\t\tbulkHTTPRequest: options.BulkHTTPRequest,\n\t\twriter: options.Writer,\n\t\trandomAgent: options.RandomAgent,\n\t\tcustomHeaders: options.CustomHeaders,\n\t\tCookieJar: options.CookieJar,\n\t\tcoloredOutput: options.ColoredOutput,\n\t\tcolorizer: *options.Colorizer,\n\t\tdecolorizer: options.Decolorizer,\n\t\tstopAtFirstMatch: options.StopAtFirstMatch,\n\t\tpf: options.PF,\n\t\tratelimiter: options.RateLimiter,\n\t}\n\n\treturn executer, nil\n}", "func New(url string) *SlackNotify {\n\treturn &SlackNotify{\n\t\tURL: url,\n\t\tc: http.Client{\n\t\t\tTimeout: 10 * time.Second,\n\t\t},\n\t}\n}", "func NewBackendHealthCheck(options Options) *BackendHealthCheck {\n\treturn &BackendHealthCheck{\n\t\tOptions: options,\n\t\trequestTimeout: 5 * time.Second,\n\t}\n}", "func NewWatcher(host string, tls *TLSConfig, storeShortID bool) (Watcher, error) {\n\tvar httpClient *http.Client\n\tif tls != nil {\n\t\toptions := tlsconfig.Options{\n\t\t\tCAFile: tls.CA,\n\t\t\tCertFile: tls.Certificate,\n\t\t\tKeyFile: tls.Key,\n\t\t}\n\n\t\ttlsc, err := tlsconfig.Client(options)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\thttpClient = &http.Client{\n\t\t\tTransport: &http.Transport{\n\t\t\t\tTLSClientConfig: tlsc,\n\t\t\t},\n\t\t}\n\t}\n\n\tclient, err := NewClient(host, httpClient, nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn NewWatcherWithClient(client, 60*time.Second, storeShortID)\n}", "func (c HTTPClient) Healthy(host string) bool {\n\treturn true\n}", "func New(filepath string, opts ...Option) (*Client, error) {\n\tvar options Options\n\tfor _, o := range opts {\n\t\to(&options)\n\t}\n\n\tc := &Client{filepath: filepath}\n\tif strings.HasPrefix(filepath, \"http://\") || strings.HasPrefix(filepath, \"https://\") {\n\t\tc.isURL = true\n\t\tc.httpClient = http.Client{\n\t\t\tTimeout: 5 * time.Second,\n\t\t\tTransport: &transport{\n\t\t\t\tHeaders: options.Headers,\n\t\t\t},\n\t\t}\n\t}\n\treturn c, nil\n}", "func CheckHost(ip *netip.Addr, cfg *pb.Config) (*pb.ServerStatus, *ntp.Response, error) {\n\n\tlog := logger.Setup()\n\n\tif cfg.Samples == 0 {\n\t\tcfg.Samples = 3\n\t}\n\n\topts := ntp.QueryOptions{\n\t\tTimeout: 3 * time.Second,\n\t}\n\n\tconfigIP := cfg.GetIP()\n\tif configIP != nil && configIP.IsValid() {\n\t\topts.LocalAddress = configIP.String()\n\t\tif natIP := cfg.GetNatIP(); natIP != nil && natIP.IsValid() {\n\t\t\topts.LocalAddress = natIP.String()\n\t\t}\n\t} else {\n\t\tlog.Error(\"Did not get valid local configuration IP\", \"configIP\", configIP)\n\t}\n\n\tif ip.IsLoopback() {\n\t\treturn nil, nil, fmt.Errorf(\"loopback address\")\n\t}\n\tif ip.IsPrivate() {\n\t\treturn nil, nil, fmt.Errorf(\"private address\")\n\t}\n\tif ip.IsMulticast() {\n\t\treturn nil, nil, fmt.Errorf(\"multicast address\")\n\t}\n\tif !ip.IsValid() {\n\t\treturn nil, nil, fmt.Errorf(\"invalid IP\")\n\t}\n\n\tresponses := []*response{}\n\n\tfor i := int32(0); i < cfg.Samples; i++ {\n\n\t\tif i > 0 {\n\t\t\t// minimum headway time is 2 seconds, https://www.eecis.udel.edu/~mills/ntp/html/rate.html\n\t\t\ttime.Sleep(2 * time.Second)\n\t\t}\n\n\t\tipStr := ip.String()\n\t\tif ip.Is6() {\n\t\t\tipStr = \"[\" + ipStr + \"]:123\"\n\t\t}\n\n\t\tresp, err := ntp.QueryWithOptions(ipStr, opts)\n\t\tif err != nil {\n\t\t\tr := &response{\n\t\t\t\tStatus: &pb.ServerStatus{},\n\t\t\t}\n\t\t\tr.Status.SetIP(ip)\n\t\t\tif resp != nil {\n\t\t\t\tr.Response = resp\n\t\t\t\tr.Status = ntpResponseToStatus(ip, resp)\n\t\t\t}\n\t\t\tr.Error = err\n\t\t\tresponses = append(responses, r)\n\n\t\t\tlog.Debug(\"ntp query error\", \"host\", ip.String(), \"iteration\", i, \"error\", err)\n\n\t\t\tcontinue\n\t\t}\n\n\t\tstatus := ntpResponseToStatus(ip, resp)\n\n\t\tlog.Debug(\"ntp query\", \"host\", ip.String(), \"iteration\", i, \"rtt\", resp.RTT.String(), \"offset\", resp.ClockOffset, \"error\", err)\n\n\t\t// if we get an explicit bad response in any of the samples, we error out\n\t\tif resp.Stratum == 0 || resp.Stratum == 16 {\n\t\t\tif len(resp.KissCode) > 0 {\n\t\t\t\tif resp.KissCode == \"RATE\" {\n\t\t\t\t\tstatus.Offset = nil\n\t\t\t\t}\n\t\t\t\treturn status, resp, fmt.Errorf(\"%s\", resp.KissCode)\n\t\t\t}\n\n\t\t\trefText := fmt.Sprintf(\"%#x\", resp.ReferenceID)\n\n\t\t\trefIDStr := referenceIDString(resp.ReferenceID)\n\t\t\tif utf8.Valid([]byte(refIDStr)) {\n\t\t\t\trefText = refText + \", \" + refIDStr\n\t\t\t}\n\n\t\t\treturn status, resp,\n\t\t\t\tfmt.Errorf(\"bad stratum %d (referenceID: %s)\",\n\t\t\t\t\tresp.Stratum, refText)\n\t\t}\n\n\t\tif resp.Stratum > 6 {\n\t\t\treturn status, resp, fmt.Errorf(\"bad stratum %d\", resp.Stratum)\n\t\t}\n\n\t\tresponses = append(responses, &response{\n\t\t\tStatus: status,\n\t\t\tResponse: resp,\n\t\t})\n\t}\n\n\tvar best *response\n\n\t// log.Printf(\"for %s we collected %d samples, now find the best result\", ip.String(), len(statuses))\n\n\t// todo: if there are more than 2 (3?) samples with an offset, throw\n\t// away the offset outlier(s)\n\n\tfor _, r := range responses {\n\n\t\t// log.Printf(\"status for %s / %d: offset: %s rtt: %s err: %q\", ip.String(), i, status.Offset.AsDuration(), status.RTT.AsDuration(), status.Error)\n\n\t\tif best == nil {\n\t\t\tbest = r\n\t\t\tcontinue\n\t\t}\n\n\t\t// todo: ... and it's otherwise a valid response?\n\t\tif (r.Error == nil && best.Error != nil) || (r.Status.RTT.AsDuration() < best.Status.RTT.AsDuration()) {\n\t\t\tbest = r\n\t\t}\n\t}\n\n\t// errLog := \"\"\n\t// if len(best.Error) > 0 {\n\t// \terrLog = fmt.Sprintf(\" err: %q\", best.Error)\n\t// }\n\t// log.Printf(\"best result for %s - offset: %s rtt: %s%s\",\n\t// \tip.String(), best.Offset.AsDuration(), best.RTT.AsDuration(), errLog)\n\n\tif best.Error != nil {\n\t\treturn best.Status, best.Response, fmt.Errorf(\"%s\", best.Error)\n\t}\n\n\treturn best.Status, best.Response, nil\n}", "func NewHTTP(host string, port int) Static {\n\treturn Static{\n\t\tprotocol: ProtocolHTTP,\n\t\thost: host,\n\t\tport: port,\n\t}\n}", "func NewGenericCheck(\n\tname string,\n\tvalidatorFn ValidatorFunc,\n\tmetadata Metadata,\n\thelptext HelpText) Check {\n\treturn &genericCheckDefinition{\n\t\tname: name,\n\t\tvalidatorFn: validatorFn,\n\t\tmetadata: metadata,\n\t\thelpText: helptext,\n\t}\n}", "func NamedCheck(name string, check func() error) Checker {\n\treturn &healthzCheck{name, check}\n}", "func NewHTTPClient(url, endpoint string, timeout time.Duration) *HTTPClient {\n\treturn &HTTPClient{\n\t\turl: url,\n\t\thttpClient: &http.Client{Timeout: timeout},\n\t\tendPoint: endpoint,\n\t}\n}", "func New(dnsList, domainList []string, checkInterval time.Duration) *Checker {\n\tc := &Checker{}\n\tif len(dnsList) == 0 {\n\t\tc.DNSList = DefaultDNSList\n\t} else {\n\t\tc.DNSList = dnsList\n\t}\n\n\tif len(domainList) == 0 {\n\t\tc.DomainList = DefaultDomainList\n\t} else {\n\t\tc.DomainList = domainList\n\t}\n\n\tif checkInterval == 0 {\n\t\tc.CheckInterval = DefaultCheckInterval\n\t} else {\n\t\tc.CheckInterval = checkInterval\n\t}\n\n\tgo c.Monitor()\n\treturn c\n}", "func NewHTTPClientWithConfig(formats strfmt.Registry, cfg *TransportConfig) *CiliumHealthAPI {\n\t// ensure nullable parameters have default\n\tif cfg == nil {\n\t\tcfg = DefaultTransportConfig()\n\t}\n\n\t// create transport and client\n\ttransport := httptransport.New(cfg.Host, cfg.BasePath, cfg.Schemes)\n\treturn New(transport, formats)\n}" ]
[ "0.77532184", "0.6886548", "0.6650926", "0.59350383", "0.5832938", "0.5832938", "0.57598746", "0.57598746", "0.5694342", "0.5670222", "0.5615457", "0.55585825", "0.555668", "0.5455581", "0.5411418", "0.5404947", "0.54013574", "0.5372818", "0.5300099", "0.5285249", "0.5270361", "0.5260785", "0.5260785", "0.52587944", "0.5248054", "0.51935816", "0.51852405", "0.51388913", "0.5119891", "0.5119027", "0.5111805", "0.5104592", "0.5092121", "0.50759375", "0.5065982", "0.5010345", "0.5003917", "0.4962307", "0.49535415", "0.49533367", "0.4949673", "0.49379203", "0.4933554", "0.49091017", "0.49053934", "0.49025756", "0.48874307", "0.48419002", "0.48383528", "0.4792318", "0.47801605", "0.47733825", "0.47733814", "0.47677416", "0.47647744", "0.47590628", "0.4754422", "0.47235546", "0.4723231", "0.47186047", "0.4718254", "0.4698307", "0.4694518", "0.4691482", "0.4687369", "0.46859398", "0.46634254", "0.46631846", "0.46599594", "0.46564808", "0.46561363", "0.46481928", "0.46403268", "0.4618154", "0.4614301", "0.46035352", "0.4600095", "0.45973316", "0.45946506", "0.45931855", "0.45904738", "0.45879748", "0.45863497", "0.4584367", "0.45830268", "0.45815593", "0.45794967", "0.45738354", "0.45723808", "0.45705834", "0.45705634", "0.4556179", "0.45510247", "0.45498648", "0.45475316", "0.45474872", "0.4546451", "0.4539928", "0.45216882", "0.45207563" ]
0.8291445
0
Check implements the Checker interface and checks the HTTP endpoint status.
func (c *HTTPChecker) Check() error { client := &http.Client{ Timeout: c.timeout, } req, err := http.NewRequest(c.method, c.url, nil) if err != nil { return err } resp, err := client.Do(req) if err != nil { return err } if resp.StatusCode != http.StatusOK { return ErrCheckFailed } return nil }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (e *Endpoint) Check(ctx echo.Context) error {\n\thealthData := e.service.HealthCheck()\n\n\tif !healthData.Database {\n\t\treturn ctx.JSON(http.StatusServiceUnavailable, healthData)\n\t}\n\treturn ctx.JSON(http.StatusOK, healthData)\n}", "func (h *HealthImpl) Check(ctx context.Context, req *grpc_health_v1.HealthCheckRequest) (*grpc_health_v1.HealthCheckResponse, error) {\n\n\treturn &grpc_health_v1.HealthCheckResponse{\n\t\tStatus: grpc_health_v1.HealthCheckResponse_SERVING,\n\t}, nil\n}", "func (s *Server) Check(ctx context.Context, in *grpc_health_v1.HealthCheckRequest) (*grpc_health_v1.HealthCheckResponse, error) {\n\tresp := &grpc_health_v1.HealthCheckResponse{}\n\tif len(in.Service) == 0 || in.Service == serviceName {\n\t\tresp.Status = grpc_health_v1.HealthCheckResponse_SERVING\n\t}\n\treturn resp, nil\n}", "func (hc *HealthService) Check(ctx context.Context, request *grpchealthv1.HealthCheckRequest) (*grpchealthv1.HealthCheckResponse, error) {\n\tif request == nil {\n\t\tst := status.New(codes.InvalidArgument, \"health check request is nil\")\n\t\treturn createHealthCheckResponse(grpchealthv1.HealthCheckResponse_UNKNOWN), st.Err()\n\t}\n\n\tif err := hc.checker.Check(ctx); err != nil {\n\t\treturn createHealthCheckResponse(grpchealthv1.HealthCheckResponse_NOT_SERVING), err\n\t}\n\treturn createHealthCheckResponse(grpchealthv1.HealthCheckResponse_SERVING), nil\n}", "func (s *Server) Check(ctx context.Context, req *healthpb.HealthCheckRequest) (*healthpb.HealthCheckResponse, error) {\n\treturn &healthpb.HealthCheckResponse{\n\t\tStatus: healthpb.HealthCheckResponse_SERVING,\n\t}, nil\n}", "func (s *HealthServer) Check(ctx context.Context, in *healthpb.HealthCheckRequest) (*healthpb.HealthCheckResponse, error) {\n\tlog.Printf(\"Handling Check request [%v]\", in)\n\treturn &healthpb.HealthCheckResponse{Status: healthpb.HealthCheckResponse_SERVING}, nil\n}", "func (ac *ApiConfig) CheckStatus(w http.ResponseWriter, r *http.Request) {\n\tif r.Method != http.MethodGet {\n\t\thttp.Error(w, r.Method+\" is not available\", http.StatusInternalServerError)\n\t\tzerolog.Error().Msg(r.Method + \" is not available\")\n\t\treturn\n\t}\n\n\tstat := &models.StatusIdentifier{\n\t\tOk: true,\n\t\tMessage: \"Everything is alright\",\n\t}\n\n\terr := dResponseWriter(w, stat, http.StatusOK)\n\tif err != nil {\n\t\tzerolog.Error().Msg(err.Error())\n\t\treturn\n\t}\n\n\treturn\n}", "func HealthCheck(endpoint string) HandlerFunc {\n\treturn func(c *Context) {\n\t\tr := c.Request\n\t\tif r.Method == \"GET\" && strings.EqualFold(r.URL.Path, endpoint) {\n\t\t\tc.String(http.StatusOK, \"\")\n\t\t\tc.Abort()\n\t\t\treturn\n\t\t}\n\n\t\tc.Next()\n\t}\n}", "func HealthCheck(w http.ResponseWriter, r *http.Request) {}", "func (s *Server) Check(ctx context.Context, args *pb.HealthCheckRequest) (*pb.HealthCheckResponse, error) {\n\treturn &pb.HealthCheckResponse{\n\t\tStatus: pb.HealthCheckResponse_SERVING,\n\t}, nil\n}", "func (s *service) Check(ctx context.Context, request *Request) *Response {\n\tresponse := NewResponse()\n\n\terr := s.check(ctx, request, response)\n\tif err != nil {\n\t\tresponse.Error = err.Error()\n\t\tresponse.Status = base.StatusError\n\t} else if response.UnprocessedCount > 0 {\n\t\tresponse.Status = base.StatusUnProcess\n\t} else if len(response.Errors) > 0 {\n\t\tresponse.Status = base.StatusError\n\t\tresponse.Error = response.Errors[0].Message\n\t}\n\treturn response\n}", "func healthCheck(w http.ResponseWriter, r *http.Request) {\n\tfmt.Fprintf(w, \"Status OK.\\n\")\n}", "func (checker *Checker) CheckHealth() (toReturn HealthCheckResponse, err error) {\n\thttpClient, err := gohclient.New(nil, checker.TargetHealthURL)\n\n\thttpResp, data, err := httpClient.Get(\"\")\n\n\tif httpResp != nil {\n\t\tif httpResp.StatusCode == http.StatusOK {\n\t\t\tif err == nil {\n\t\t\t\tif err = json.Unmarshal(data, &toReturn); err == nil {\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\terr = fmt.Errorf(\"Health Check '%v': Unable to read response\", checker.TargetHealthURL)\n\t\t\t} else {\n\t\t\t\terr = fmt.Errorf(\"Health Check '%v': Unable to communicate\", checker.TargetHealthURL)\n\t\t\t}\n\t\t} else {\n\t\t\terr = fmt.Errorf(\"Health Check '%v': Not 200 OK; Getting %v\", checker.TargetHealthURL, httpResp.StatusCode)\n\t\t}\n\t} else {\n\t\terr = fmt.Errorf(\"Health Check '%v': Not possible to communicate with server: %v\", checker.TargetHealthURL, err)\n\t}\n\n\treturn\n}", "func healthCheck(w http.ResponseWriter, r *http.Request) {\n\tlog.Printf(\"Serving request: %s\", r.URL.Path)\n\tfmt.Fprintf(w, \"Ok\")\n}", "func (t *TCPChecker) Check(extConfig external.Check) *pkg.CheckResult {\n\tc := extConfig.(v1.TCPCheck)\n\taddr, port, err := extractAddrAndPort(c.Endpoint)\n\tif err != nil {\n\t\treturn Failf(c, err.Error())\n\t}\n\n\ttimeout := time.Millisecond * time.Duration(c.ThresholdMillis)\n\tconn, err := net.DialTimeout(\"tcp\", net.JoinHostPort(addr, port), timeout)\n\tif err != nil {\n\t\treturn Failf(c, \"Connection error: %s\", err.Error())\n\t}\n\tif conn != nil {\n\t\tdefer conn.Close()\n\t}\n\treturn Passf(c, \"Successfully opened: %s\", net.JoinHostPort(addr, port))\n}", "func Check(t ErrorReporter) func() {\n\thttpTransport, _ := http.DefaultTransport.(*http.Transport)\n\treturn CheckWithTransport(t, httpTransport)\n}", "func HealthCheck(w http.ResponseWriter, r *http.Request, ps httprouter.Params) {\n\thttputil.SendOK(w)\n}", "func (cs *checkoutService) Check(ctx context.Context, req *healthpb.HealthCheckRequest) (*healthpb.HealthCheckResponse, error) {\n\treturn &healthpb.HealthCheckResponse{Status: healthpb.HealthCheckResponse_SERVING}, nil\n}", "func (c *DogHouseClient) Check(ctx context.Context, req *doghouse.CheckRequest) (*doghouse.CheckResponse, error) {\n\tcheckURL := c.BaseURL.String() + \"/check\"\n\tb, err := json.Marshal(req)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\thttpReq, err := http.NewRequest(http.MethodPost, checkURL, bytes.NewReader(b))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\thttpReq = httpReq.WithContext(ctx)\n\thttpReq.Header.Set(\"Content-Type\", \"application/json\")\n\thttpReq.Header.Set(\"User-Agent\", fmt.Sprintf(\"reviewdog/%s\", commands.Version))\n\n\thttpResp, err := c.Client.Do(httpReq)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"Check request failed: %w\", err)\n\t}\n\tdefer httpResp.Body.Close()\n\n\trespb, err := ioutil.ReadAll(httpResp.Body)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif httpResp.StatusCode != http.StatusOK {\n\t\treturn nil, fmt.Errorf(\"status=%v: %s\", httpResp.StatusCode, respb)\n\t}\n\n\tvar resp doghouse.CheckResponse\n\tif err := json.Unmarshal(respb, &resp); err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to decode response: error=%w, resp=%s\", err, respb)\n\t}\n\treturn &resp, nil\n}", "func (t *TCPChecker) Check(ctx *context.Context, extConfig external.Check) pkg.Results {\n\tc := extConfig.(v1.TCPCheck)\n\tresult := pkg.Success(c, ctx.Canary)\n\tvar results pkg.Results\n\tresults = append(results, result)\n\n\tif connection, err := duty.FindConnectionByURL(ctx, db.Gorm, c.Endpoint); err != nil {\n\t\treturn results.Failf(\"failed to find TCP endpoint from connection %q: %v\", c.Endpoint, err)\n\t} else if connection != nil {\n\t\tc.Endpoint = connection.URL\n\t}\n\n\taddr, port, err := extractAddrAndPort(c.Endpoint)\n\tif err != nil {\n\t\treturn results.ErrorMessage(err)\n\t}\n\n\ttimeout := time.Millisecond * time.Duration(c.ThresholdMillis)\n\tconn, err := net.DialTimeout(\"tcp\", net.JoinHostPort(addr, port), timeout)\n\tif err != nil {\n\t\treturn results.Failf(\"Connection error: %s\", err)\n\t}\n\tif conn != nil {\n\t\tdefer conn.Close()\n\t}\n\treturn results\n}", "func (r *ReadinessCRD) Check(_ *http.Request) error {\n\tresult, err := r.check()\n\tif result && err == nil {\n\t\treturn nil\n\t}\n\n\treturn fmt.Errorf(\"CRDs are not ready\")\n}", "func healthCheck(w http.ResponseWriter, r *http.Request) {\n\tw.Write([]byte(\"Ready\"))\n}", "func (s *WorkersService) Check() (CheckResult, error) {\n\tclient := &http.Client{\n\t\tTimeout: clientTimeout,\n\t}\n\n\treq, err := http.NewRequest(http.MethodGet, s.url, nil)\n\tq := req.URL.Query()\n\tq.Add(OSKeyName, runtime.GOOS)\n\tq.Add(ArchitectureKeyName, runtime.GOARCH)\n\tq.Add(ClientVersionName, s.currentVersion)\n\n\tif s.opts.IsBeta {\n\t\tq.Add(BetaKeyName, \"true\")\n\t}\n\n\tif s.opts.RequestedVersion != \"\" {\n\t\tq.Add(VersionKeyName, s.opts.RequestedVersion)\n\t}\n\n\treq.URL.RawQuery = q.Encode()\n\tresp, err := client.Do(req)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer resp.Body.Close()\n\n\tvar v VersionResponse\n\tif err := json.NewDecoder(resp.Body).Decode(&v); err != nil {\n\t\treturn nil, err\n\t}\n\n\tif v.Error != \"\" {\n\t\treturn nil, errors.New(v.Error)\n\t}\n\n\tversionToUpdate := \"\"\n\tif v.ShouldUpdate {\n\t\tversionToUpdate = v.Version\n\t}\n\n\treturn NewWorkersVersion(v.URL, versionToUpdate, v.Checksum, s.targetPath, v.UserMessage, v.IsCompressed), nil\n}", "func healthCheck(w http.ResponseWriter, r *http.Request) {\n\tw.WriteHeader(http.StatusOK)\n\treturn\n}", "func HealthCheck(w http.ResponseWriter, r *http.Request) {\n\tw.WriteHeader(http.StatusOK)\n\tw.Write([]byte(\"tested OK\\n\"))\n}", "func (c clientGRPC) HealthCheck(url *url.URL) error {\n\tconn, err := getConn(url.Host, c.options)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer conn.Close()\n\n\thealthClient := healthpb.NewHealthClient(conn)\n\tctx, cancel := context.WithTimeout(context.Background(), c.options.Timeout)\n\tdefer cancel()\n\n\thealth, err := healthClient.Check(ctx, &healthpb.HealthCheckRequest{})\n\tif err != nil {\n\t\treturn err\n\t}\n\tif health.Status != healthpb.HealthCheckResponse_SERVING {\n\t\treturn ErrServiceNotAvailable\n\t}\n\treturn nil\n}", "func healthcheck(w http.ResponseWriter, r *http.Request) {\n\tw.WriteHeader(http.StatusOK)\n}", "func HealthCheck(w http.ResponseWriter, r *http.Request) {\n\tw.WriteHeader(http.StatusOK)\n}", "func (fwdclient *Client) HealthCheck() error {\n\tlog.Debugf(\"%s: url=%s\", fwdclient.AppName, fwdclient.ActionUrls.Health)\n\treq, err := http.NewRequest(\"GET\", fwdclient.ActionUrls.Health, nil)\n\tif err != nil {\n\t\treturn err\n\t}\n\treq.Header.Set(\"Authorization\", fmt.Sprintf(\"Splunk %s\", fwdclient.Token))\n\tresp, err := fwdclient.httpclient.Do(req)\n\tif err != nil {\n\t\treturn fmt.Errorf(\" Please check splunk authorization token. %s: Health check failed: %s\", fwdclient.AppName, err)\n\t}\n\tdefer resp.Body.Close()\n\tlog.Debugf(\"%s: status=%d %s\", fwdclient.AppName, resp.StatusCode, http.StatusText(resp.StatusCode))\n\tif resp.StatusCode != 200 {\n\t\treturn fmt.Errorf(\"%s: Failed during Health check : %d %s\", fwdclient.AppName, resp.StatusCode, http.StatusText(resp.StatusCode))\n\t}\n\trespBody, err := ioutil.ReadAll(resp.Body)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"%s: Failed while reading health response body: %s\", fwdclient.AppName, err)\n\t}\n\thealthCheckResponse := new(HealthCheckResponse)\n\tif err := json.Unmarshal(respBody, healthCheckResponse); err != nil {\n\t\treturn fmt.Errorf(\"%s: health check failed: the response is not JSON but: %s\", fwdclient.AppName, respBody)\n\t}\n\tlog.Debugf(\"%s: code=%d, text=%s\", fwdclient.AppName, healthCheckResponse.Code, healthCheckResponse.Text)\n\treturn nil\n}", "func (e AuthService) Check() (bool, error) {\n\turl := \"/authentication\"\n\n\tresp, err := e.client.MakeRequest(\n\t\t\"GET\",\n\t\turl,\n\t\t0,\n\t\tnil,\n\t)\n\tif err != nil {\n\t\treturn false, err\n\t}\n\tdefer resp.Body.Close()\n\n\tswitch resp.StatusCode {\n\tcase http.StatusOK:\n\t\treturn true, nil\n\tcase http.StatusUnauthorized:\n\t\tfallthrough\n\tcase http.StatusForbidden:\n\t\treturn false, nil\n\tdefault:\n\t\treturn false, e.client.handleUnexpectedResponse(resp)\n\t}\n}", "func (service *DaemonHeartbeat) Check(ctx context.Context, req *grpc_health_v1.HealthCheckRequest) (*grpc_health_v1.HealthCheckResponse, error) {\n\n\theartbeat, err := GetHeartbeat(config.GetString(config.HeartbeatServiceEndpoint), config.GetString(config.ServiceHeartbeatType),\n\t\tconfig.GetString(config.ServiceId))\n\n\tif strings.Compare(heartbeat.Status, Online.String()) == 0 {\n\t\treturn &grpc_health_v1.HealthCheckResponse{Status: grpc_health_v1.HealthCheckResponse_SERVING}, nil\n\t}\n\n\treturn &grpc_health_v1.HealthCheckResponse{Status: grpc_health_v1.HealthCheckResponse_SERVICE_UNKNOWN}, errors.New(\"Service heartbeat unknown \" + err.Error())\n}", "func (r *checker) Check(ctx context.Context, reporter health.Reporter) {\n\tif err := r.check(ctx, reporter); err != nil {\n\t\tr.WithError(err).Debug(\"Failed to verify latency.\")\n\t\treturn\n\t}\n\tif reporter.NumProbes() == 0 {\n\t\treporter.Add(successProbe(r.NodeName, r.LatencyThreshold))\n\t}\n}", "func (a *DefaultApiService) HealthCheck(ctx _context.Context) (*_nethttp.Response, error) {\n\tvar (\n\t\tlocalVarHTTPMethod = _nethttp.MethodGet\n\t\tlocalVarPostBody interface{}\n\t\tlocalVarFormFileName string\n\t\tlocalVarFileName string\n\t\tlocalVarFileBytes []byte\n\t)\n\n\t// create path and map variables\n\tlocalVarPath := a.client.cfg.BasePath + \"/health\"\n\tlocalVarHeaderParams := make(map[string]string)\n\tlocalVarQueryParams := _neturl.Values{}\n\tlocalVarFormParams := _neturl.Values{}\n\n\t// to determine the Content-Type header\n\tlocalVarHTTPContentTypes := []string{}\n\n\t// set Content-Type header\n\tlocalVarHTTPContentType := selectHeaderContentType(localVarHTTPContentTypes)\n\tif localVarHTTPContentType != \"\" {\n\t\tlocalVarHeaderParams[\"Content-Type\"] = localVarHTTPContentType\n\t}\n\n\t// to determine the Accept header\n\tlocalVarHTTPHeaderAccepts := []string{}\n\n\t// set Accept header\n\tlocalVarHTTPHeaderAccept := selectHeaderAccept(localVarHTTPHeaderAccepts)\n\tif localVarHTTPHeaderAccept != \"\" {\n\t\tlocalVarHeaderParams[\"Accept\"] = localVarHTTPHeaderAccept\n\t}\n\tr, err := a.client.prepareRequest(ctx, localVarPath, localVarHTTPMethod, localVarPostBody, localVarHeaderParams, localVarQueryParams, localVarFormParams, localVarFormFileName, localVarFileName, localVarFileBytes)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tlocalVarHTTPResponse, err := a.client.callAPI(r)\n\tif err != nil || localVarHTTPResponse == nil {\n\t\treturn localVarHTTPResponse, err\n\t}\n\n\tlocalVarBody, err := _ioutil.ReadAll(localVarHTTPResponse.Body)\n\tlocalVarHTTPResponse.Body.Close()\n\tif err != nil {\n\t\treturn localVarHTTPResponse, err\n\t}\n\n\tif localVarHTTPResponse.StatusCode >= 300 {\n\t\tnewErr := GenericOpenAPIError{\n\t\t\tbody: localVarBody,\n\t\t\terror: localVarHTTPResponse.Status,\n\t\t}\n\t\treturn localVarHTTPResponse, newErr\n\t}\n\n\treturn localVarHTTPResponse, nil\n}", "func (h *handlerState) Check(ctx context.Context, tracker attribute.Tracker, request *mixerpb.CheckRequest, response *mixerpb.CheckResponse) {\n\tresponse.RequestIndex = request.RequestIndex\n\tresponse.Result = h.execute(ctx, tracker, request.AttributeUpdate, config.CheckMethod)\n\tif glog.V(2) {\n\t\tglog.Infof(\"Check (%v %v) ==> %v \", tracker, request.AttributeUpdate, response)\n\t}\n}", "func (c *Client) CheckAvailability(ctx context.Context, p *CheckAvailabilityPayload) (res *CheckAvailabilityResponse, err error) {\n\tvar ires interface{}\n\tires, err = c.CheckAvailabilityEndpoint(ctx, p)\n\tif err != nil {\n\t\treturn\n\t}\n\treturn ires.(*CheckAvailabilityResponse), nil\n}", "func (s *CAServer) Check(ctx context.Context, in *ghc.HealthCheckRequest) (*ghc.HealthCheckResponse, error) {\n\treturn &ghc.HealthCheckResponse{\n\t\tStatus: ghc.HealthCheckResponse_SERVING,\n\t}, nil\n}", "func (cnt controller) HealthCheck(w http.ResponseWriter, req *http.Request) {\n\tif _, err := w.Write([]byte(successfulResponse)); err != nil {\n\t\tcnt.lgr.Warn(\"Error returning health check \" + err.Error())\n\t}\n}", "func (f *FakeTunnel) CheckStatus() error {\n\treturn nil\n}", "func Check(\n\thealthCheckFunc func() bool,\n\tpollDelay time.Duration,\n\thealthTimeout time.Duration,\n\tupdates chan<- bool,\n\tquit <-chan struct{},\n) {\n\tgo check(healthCheckFunc, pollDelay,\n\t\thealthTimeout, updates, quit)\n}", "func (s *server) Check(\n\tctx context.Context,\n\treq *envoy_service_auth_v3.CheckRequest) (*envoy_service_auth_v3.CheckResponse, error) {\n\tauthorization := req.Attributes.Request.Http.Headers[\"reversed-vpn\"]\n\n\tif len(authorization) > 0 {\n\t\tvalid, err := s.services.Check(authorization)\n\t\tif err != nil {\n\t\t\tlog.Printf(\"request from: %s denied!\\n\", req.Attributes.Request.Http.Headers[\"reversed-vpn\"])\n\t\t\treturn &envoy_service_auth_v3.CheckResponse{\n\t\t\t\tStatus: &status.Status{\n\t\t\t\t\tCode: int32(code.Code_PERMISSION_DENIED),\n\t\t\t\t},\n\t\t\t}, err\n\t\t}\n\t\tif valid {\n\t\t\tlog.Printf(\"request from: %s accepted!\\n\", req.Attributes.Request.Http.Headers[\"reversed-vpn\"])\n\t\t\treturn &envoy_service_auth_v3.CheckResponse{\n\t\t\t\tStatus: &status.Status{\n\t\t\t\t\tCode: int32(code.Code_OK),\n\t\t\t\t},\n\t\t\t}, nil\n\t\t}\n\t}\n\n\tlog.Printf(\"request from: %s denied!\\n\", req.Attributes.Request.Http.Headers[\"reversed-vpn\"])\n\treturn &envoy_service_auth_v3.CheckResponse{\n\t\tStatus: &status.Status{\n\t\t\tCode: int32(code.Code_PERMISSION_DENIED),\n\t\t},\n\t}, nil\n}", "func Check(w http.ResponseWriter, req *http.Request) {\n\tw.Header().Set(\"Content-Type\", \"application/json\")\n\tjson.NewEncoder(w).Encode(&Health{\"hello\", 200})\n}", "func HealthCheck(w http.ResponseWriter, req *http.Request) {\n\tlog.Println(\"🚑 healthcheck ok!\")\n\tw.WriteHeader(http.StatusOK)\n}", "func CheckAPI(url string) bool {\n\treturn true\n}", "func CheckStatus(w http.ResponseWriter, r *http.Request) {\n\tjson.NewEncoder(w).Encode(Status{Message : \"Alive\"})\n}", "func (ping) Check() error {\n\treturn nil\n}", "func (s ServiceInfo) HealthCheck() string {\n\thc := fmt.Sprintf(\"%s://%s:%v%s\", \"http\", s.Host, s.Port, common.ApiPingRoute)\n\treturn hc\n}", "func TestAgent_HTTPCheck(t *testing.T) {\n\tt.Parallel()\n\tlogger := log.New(ioutil.Discard, \"\", 0)\n\tif testing.Verbose() {\n\t\tlogger = log.New(os.Stdout, \"[TestAgent_HTTPCheck] \", log.Lshortfile)\n\t}\n\tagent := func() *Agent {\n\t\treturn &Agent{\n\t\t\tlogger: logger,\n\t\t\tconfig: &Config{\n\t\t\t\tAdvertiseAddrs: &AdvertiseAddrs{HTTP: \"advertise:4646\"},\n\t\t\t\tnormalizedAddrs: &Addresses{HTTP: \"normalized:4646\"},\n\t\t\t\tConsul: &sconfig.ConsulConfig{\n\t\t\t\t\tChecksUseAdvertise: helper.BoolToPtr(false),\n\t\t\t\t},\n\t\t\t\tTLSConfig: &sconfig.TLSConfig{EnableHTTP: false},\n\t\t\t},\n\t\t}\n\t}\n\n\tt.Run(\"Plain HTTP Check\", func(t *testing.T) {\n\t\ta := agent()\n\t\tcheck := a.agentHTTPCheck(false)\n\t\tif check == nil {\n\t\t\tt.Fatalf(\"expected non-nil check\")\n\t\t}\n\t\tif check.Type != \"http\" {\n\t\t\tt.Errorf(\"expected http check not: %q\", check.Type)\n\t\t}\n\t\tif expected := \"/v1/agent/health?type=client\"; check.Path != expected {\n\t\t\tt.Errorf(\"expected %q path not: %q\", expected, check.Path)\n\t\t}\n\t\tif check.Protocol != \"http\" {\n\t\t\tt.Errorf(\"expected http proto not: %q\", check.Protocol)\n\t\t}\n\t\tif expected := a.config.normalizedAddrs.HTTP; check.PortLabel != expected {\n\t\t\tt.Errorf(\"expected normalized addr not %q\", check.PortLabel)\n\t\t}\n\t})\n\n\tt.Run(\"Plain HTTP + ChecksUseAdvertise\", func(t *testing.T) {\n\t\ta := agent()\n\t\ta.config.Consul.ChecksUseAdvertise = helper.BoolToPtr(true)\n\t\tcheck := a.agentHTTPCheck(false)\n\t\tif check == nil {\n\t\t\tt.Fatalf(\"expected non-nil check\")\n\t\t}\n\t\tif expected := a.config.AdvertiseAddrs.HTTP; check.PortLabel != expected {\n\t\t\tt.Errorf(\"expected advertise addr not %q\", check.PortLabel)\n\t\t}\n\t})\n\n\tt.Run(\"HTTPS\", func(t *testing.T) {\n\t\ta := agent()\n\t\ta.config.TLSConfig.EnableHTTP = true\n\n\t\tcheck := a.agentHTTPCheck(false)\n\t\tif check == nil {\n\t\t\tt.Fatalf(\"expected non-nil check\")\n\t\t}\n\t\tif !check.TLSSkipVerify {\n\t\t\tt.Errorf(\"expected tls skip verify\")\n\t\t}\n\t\tif check.Protocol != \"https\" {\n\t\t\tt.Errorf(\"expected https not: %q\", check.Protocol)\n\t\t}\n\t})\n\n\tt.Run(\"HTTPS + VerifyHTTPSClient\", func(t *testing.T) {\n\t\ta := agent()\n\t\ta.config.TLSConfig.EnableHTTP = true\n\t\ta.config.TLSConfig.VerifyHTTPSClient = true\n\n\t\tif check := a.agentHTTPCheck(false); check != nil {\n\t\t\tt.Fatalf(\"expected nil check not: %#v\", check)\n\t\t}\n\t})\n}", "func HTTPGetCheck(url string, timeout time.Duration) Check {\n\tclient := http.Client{\n\t\tTimeout: timeout,\n\t\t// never follow redirects\n\t\tCheckRedirect: func(*http.Request, []*http.Request) error {\n\t\t\treturn http.ErrUseLastResponse\n\t\t},\n\t}\n\treturn func() error {\n\t\tresp, err := client.Get(url)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tresp.Body.Close()\n\t\tif resp.StatusCode != 200 {\n\t\t\treturn fmt.Errorf(\"returned status %d\", resp.StatusCode)\n\t\t}\n\t\treturn nil\n\t}\n}", "func HTTPGetCheck(url string, timeout time.Duration) Check {\n\tclient := http.Client{\n\t\tTimeout: timeout,\n\t\t// never follow redirects\n\t\tCheckRedirect: func(*http.Request, []*http.Request) error {\n\t\t\treturn http.ErrUseLastResponse\n\t\t},\n\t}\n\treturn func() error {\n\t\tresp, err := client.Get(url)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tresp.Body.Close()\n\t\tif resp.StatusCode != 200 {\n\t\t\treturn fmt.Errorf(\"returned status %d\", resp.StatusCode)\n\t\t}\n\t\treturn nil\n\t}\n}", "func (c *Client) HealthCheck() (*HealthStatus, error) {\n\treq, err := http.NewRequest(\"GET\", c.Host+\"/health\", nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar status HealthStatus\n\tif err := c.doReq(req, http.StatusOK, &status); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn &status, nil\n}", "func (u UserInviteCodeServiceServer) Check(_ context.Context, _ *rfpbh.HealthCheckRequest) (*rfpbh.HealthCheckResponse, error) {\n\treturn &rfpbh.HealthCheckResponse{Status: rfpbh.HealthCheckResponse_SERVING}, nil\n}", "func (h HealthCheckerFunc) HealthCheck(target string, port uint16, proto string) (ok bool, err error) {\n\treturn h(target, port, proto)\n}", "func (c *APIClient) StatusCheck() (bool, error) {\n\turl, err := c.compileStatusCheckURL()\n\n\tif err != nil {\n\t\treturn false, err\n\t}\n\n\treq, err := http.NewRequest(http.MethodGet, url, nil)\n\n\tif err != nil {\n\t\treturn false, err\n\t}\n\n\t// Use CallRaw since no authentication is needed for status check.\n\tr, err := c.Client.CallRaw(req)\n\tc.addResponseCode(r.StatusCode)\n\n\tif err != nil {\n\t\treturn false, err\n\t}\n\n\tif r.StatusCode != http.StatusOK {\n\t\treturn false, nil\n\t}\n\n\treturn true, nil\n}", "func (c Checks) ServeHTTP(w http.ResponseWriter, r *http.Request) {\n\tvar errFound bool\n\tfor _, check := range c.Checks {\n\t\terr := check.Check(c.Context)\n\t\tif err != nil {\n\t\t\tif c.Logger != nil {\n\t\t\t\tc.Logger(\"Error performing health check for %T (%s): %+v\\n\", check, check.LogInfo(c.Context), err)\n\t\t\t}\n\t\t\terrFound = true\n\t\t}\n\t}\n\tif errFound {\n\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\tw.Write([]byte(\"Everything is on fire and nothing is okay.\"))\n\t\treturn\n\t}\n\tw.WriteHeader(http.StatusOK)\n\tw.Write([]byte(\"OK\"))\n}", "func (gh *GitHubChecker) Check(req *http.Request) error {\n\tif err := checkPOSTMethod(req); err != nil {\n\t\treturn err\n\t}\n\n\tif err := gh.containsEvent(req.Header.Get(xGitHubEvent)); err != nil {\n\t\treturn err\n\t}\n\n\tbody, err := ioutil.ReadAll(req.Body)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn gh.validSignature(body, req.Header.Get(xGitHubSignature))\n}", "func HealthCheck() http.Handler {\n\tmux := http.NewServeMux()\n\tmux.HandleFunc(\"/healthcheck\", func(w http.ResponseWriter, req *http.Request) {\n\t\tw.Write([]byte(\"OK\\n\"))\n\t})\n\treturn mux\n}", "func (t *TestRuntime) HealthCheck(url string) error {\n\n\turl += \"/health\"\n\tif t.waitForBundles {\n\t\turl += \"?bundles\"\n\t}\n\n\treq, err := http.NewRequest(\"GET\", url, nil)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"unexpected error creating request: %s\", err)\n\t}\n\tresp, err := t.Client.Do(req)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"unexpected error: %s\", err)\n\t}\n\tif resp.StatusCode != http.StatusOK {\n\t\treturn fmt.Errorf(\"unexpected response: %d %s\", resp.StatusCode, resp.Status)\n\t}\n\treturn nil\n}", "func (h *Healthcheck) Check() Report {\n queue := asyncFetch(h.urls)\n report := Report{ 0, 0, 0, 0}\n for t := range queue {\n if t.Success {\n report.Success++\n } else {\n report.Failure++\n }\n report.TotalWebsites++\n report.TotalTime = report.TotalTime + t.Time\n }\n return report\n}", "func (c *CheckController) Check() {\n\tc.SuccessJSON(beego.AppConfig.String(\"version\"))\n}", "func HealthCheck(w http.ResponseWriter, r *http.Request) {\n\n\tvar err error\n\tvar bytes []byte\n\n\tapsc := gorillaContext.Get(r, \"apsc\").(push.Client)\n\n\t// Add content type header to the response\n\tcontentType := \"application/json\"\n\tcharset := \"utf-8\"\n\tw.Header().Add(\"Content-Type\", fmt.Sprintf(\"%s; charset=%s\", contentType, charset))\n\n\thealthMsg := HealthStatus{\n\t\tStatus: \"ok\",\n\t}\n\n\tpwToken := gorillaContext.Get(r, \"push_worker_token\").(string)\n\tpushEnabled := gorillaContext.Get(r, \"push_enabled\").(bool)\n\trefStr := gorillaContext.Get(r, \"str\").(stores.Store)\n\n\tif pushEnabled {\n\t\t_, err := auth.GetPushWorker(pwToken, refStr)\n\t\tif err != nil {\n\t\t\thealthMsg.Status = \"warning\"\n\t\t}\n\n\t\thealthMsg.PushServers = []PushServerInfo{\n\t\t\t{\n\t\t\t\tEndpoint: apsc.Target(),\n\t\t\t\tStatus: apsc.HealthCheck(context.TODO()).Result(),\n\t\t\t},\n\t\t}\n\n\t} else {\n\t\thealthMsg.PushFunctionality = \"disabled\"\n\t}\n\n\tif bytes, err = json.MarshalIndent(healthMsg, \"\", \" \"); err != nil {\n\t\terr := APIErrGenericInternal(err.Error())\n\t\trespondErr(w, err)\n\t\treturn\n\t}\n\n\trespondOK(w, bytes)\n}", "func HTTP(healthService string, httpTimeout time.Duration) bool {\n\tclient := http.Client{\n\t\tTimeout: httpTimeout,\n\t}\n\n\tresp, err := client.Get(healthService)\n\t// Check if response timeouts or returns an HTTP error\n\tif err != nil {\n\t\treturn false\n\t}\n\tdefer resp.Body.Close()\n\tbytes, err := ioutil.ReadAll(resp.Body)\n\tif err != nil {\n\t\treturn false\n\t}\n\n\tif strings.Contains(string(bytes), \"healthy\") {\n\t\treturn true\n\t}\n\n\treturn false\n}", "func CheckIfEndpointIsResponding(t *testing.T, endpoint string) bool {\r\n\t// we ignore certificates at this point\r\n\ttlsConfig := tls.Config{}\r\n\ttlsConfig.InsecureSkipVerify = true\r\n\r\n\terr := http_helper.HttpGetWithRetryWithCustomValidationE(\r\n\t\tt,\r\n\t\tfmt.Sprintf(\"https://%s\", endpoint),\r\n\t\t&tlsConfig,\r\n\t\t1,\r\n\t\t10*time.Second,\r\n\t\tfunc(statusCode int, body string) bool {\r\n\t\t\tif statusCode == 200 {\r\n\t\t\t\treturn true\r\n\t\t\t}\r\n\t\t\tif statusCode == 404 {\r\n\t\t\t\tt.Log(\"Warning: 404 response from endpoint. Test will still PASS.\")\r\n\t\t\t\treturn true\r\n\t\t\t}\r\n\t\t\treturn false\r\n\t\t},\r\n\t)\r\n\treturn err == nil\r\n}", "func CheckStatus(uri string) error {\n\tvar err error\n\tvar S Status\n\n\tresp, err := http.Get(uri)\n\tif err != nil {\n\t\terror2.LogError(err)\n\t\treturn err\n\t}\n\n\tdefer func() {\n\t\t_ = resp.Body.Close()\n\t}()\n\n\tbody, err := ioutil.ReadAll(resp.Body)\n\tjsonUnmarshalErr := json.Unmarshal(body, S)\n\tif err == nil && jsonUnmarshalErr == nil && S.Status == \"ok\" {\n\t\treturn nil\n\t}\n\n\terr = errors.New(\"unable to complete status request\")\n\terror2.LogError(err)\n\treturn err\n}", "func healthcheckHandler(w http.ResponseWriter, req *http.Request) {\n\tport := req.Header.Get(\"Original-Tcp-Port\")\n\tif port == \"\" {\n\t\tmsg := \"Header Original-Tcp-Port not found in request\"\n\t\tlog.Error().Msg(msg)\n\t\tsetHealthcheckResponse(w, http.StatusBadRequest, msg)\n\t\treturn\n\t}\n\n\taddress := fmt.Sprintf(\"%s:%s\", constants.LocalhostIPAddress, port)\n\tconn, err := net.Dial(\"tcp\", address)\n\tif err != nil {\n\t\tmsg := fmt.Sprintf(\"Failed to establish connection to %s\", address)\n\t\tlog.Error().Err(err).Msg(msg)\n\t\tsetHealthcheckResponse(w, http.StatusNotFound, msg)\n\t\treturn\n\t}\n\n\tif err = conn.Close(); err != nil {\n\t\tlog.Error().Err(err).Msgf(\"Failed to close connection to %s\", address)\n\t}\n\n\tmsg := fmt.Sprintf(\"Successfully established connection to %s\", address)\n\tlog.Debug().Msg(msg)\n\tsetHealthcheckResponse(w, http.StatusOK, msg)\n}", "func healthcheck(rw http.ResponseWriter, req *http.Request) {\n for name, h := range toolbox.AdminCheckList {\n if err := h.Check(); err != nil {\n fmt.Fprintf(rw, \"%s : %s\\n\", name, err.Error())\n } else {\n fmt.Fprintf(rw, \"%s : ok\\n\", name)\n }\n }\n}", "func (dc *DockerEnvContainer) HealthCheck() string {\n\n\treq, err := http.NewRequest(\"GET\", fmt.Sprintf(\"%s%s\", dc.url, HealthCheck), nil)\n\n\tif err != nil {\n\t\treturn \"\"\n\t}\n\tres, err := http.DefaultClient.Do(req)\n\tif err != nil {\n\t\treturn StatusDown\n\t}\n\tif res.StatusCode != http.StatusOK {\n\t\treturn StatusDown\n\t}\n\treturn StatusUp\n\n}", "func healthCheckHandler(w http.ResponseWriter, r *http.Request) {\n\tfmt.Fprintln(w, \"OK\")\n}", "func Example_checkStatus() {\n\t//Create client.\n\tc := APIClient{\n\t\tClient: client.New(\n\t\t\tfunc(c *client.Client) {\n\t\t\t\tc.User = \"myusername\"\n\t\t\t\tc.Password = \"mypassword\"\n\t\t\t},\n\t\t),\n\t\tBaseUrl: \"https://url.to.publit\",\n\t}\n\n\t// Check if service is up.\n\tok := c.StatusCheck()\n\n\tif !ok {\n\t\tlog.Fatal(\"Service is not up.\")\n\t}\n\n\tlog.Println(\"Service is up!\")\n\n\t// Do something\n}", "func CheckHTTP(url string, redirect, insecure bool, host string, timeout int, format, path, expectedValue, expression string) (string, int) {\n\tconst checkName = \"CheckHttp\"\n\tvar retCode int\n\tvar msg string\n\n\tacceptText, err := getAcceptText(format)\n\tif err != nil {\n\t\tmsg, _ = resultMessage(checkName, statusTextCritical, fmt.Sprintf(\"The format (--format) \\\"%s\\\" is not valid. The only valid value is \\\"json\\\".\", format))\n\n\t\treturn msg, 2\n\t}\n\n\tstatus, body, _ := statusCode(url, insecure, timeout, acceptText, host)\n\n\tretCode, responseStateText := evaluateStatusCode(status, redirect)\n\tresponseCode := strconv.Itoa(status)\n\n\tvar checkMsg = \"\"\n\tif retCode == 0 && len(format) > 0 && len(path) > 0 {\n\t\tvar queryValue string\n\n\t\tswitch {\n\t\tcase format == \"json\":\n\t\t\texpectedValueLen := len(expectedValue)\n\t\t\texpressionLen := len(expression)\n\n\t\t\tvalue := gojsonq.New().JSONString(body).Find(path)\n\n\t\t\tif value == nil {\n\t\t\t\tretCode = 2\n\t\t\t\tresponseStateText = statusTextCritical\n\t\t\t\tcheckMsg = fmt.Sprintf(\". No entry at path %s\", path)\n\t\t\t} else if expectedValueLen > 0 && expressionLen > 0 {\n\t\t\t\tretCode = 2\n\t\t\t\tresponseStateText = statusTextCritical\n\t\t\t\tcheckMsg = fmt.Sprintf(\". Both --expectedValue and --expression given but only one is used\")\n\t\t\t} else if expectedValueLen > 0 {\n\t\t\t\tqueryValue = fmt.Sprintf(\"%v\", value)\n\t\t\t\tretCode, responseStateText, checkMsg = evaluateExpectedValue(queryValue, expectedValue, path)\n\t\t\t} else if expressionLen > 0 {\n\t\t\t\tretCode, responseStateText, checkMsg = evaluateExpression(value, expression, path)\n\t\t\t} else {\n\t\t\t\tretCode = 2\n\t\t\t\tresponseStateText = statusTextCritical\n\t\t\t\tcheckMsg = fmt.Sprintf(\". --expectedValue or --expression not given\")\n\t\t\t}\n\t\t}\n\t}\n\n\tmsg, _ = resultMessage(checkName, responseStateText, fmt.Sprintf(\"Url %s responded with %s%s\", url, responseCode, checkMsg))\n\n\treturn msg, retCode\n}", "func (k *xyzProvider) Check(ctx context.Context, req *pulumirpc.CheckRequest) (*pulumirpc.CheckResponse, error) {\n\turn := resource.URN(req.GetUrn())\n\tty := urn.Type()\n\tif ty != \"xyz:index:Random\" {\n\t\treturn nil, fmt.Errorf(\"Unknown resource type '%s'\", ty)\n\t}\n\treturn &pulumirpc.CheckResponse{Inputs: req.News, Failures: nil}, nil\n}", "func healthcheck(ha *lib.HTTPAdapter) {\n\tr := mux.NewRouter()\n\tr.HandleFunc(\"/health\", ha.HealthHandler).Methods(\"GET\")\n\taddr := fmt.Sprintf(\"127.0.0.1:%v\", serverConfig.HealthcheckHTTPport)\n\tserver := &http.Server{Addr: addr, Handler: r}\n\tlogger.Printf(\"HTTP healthcheck listening on: %v\", addr)\n\tlogger.Println(server.ListenAndServe())\n}", "func (c *Check) probeHTTP() error {\n\tclient := http.Client{\n\t\tTimeout: c.Timeout,\n\t}\n\n\tres, err := client.Get(c.URL.String())\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer res.Body.Close()\n\n\tif res.StatusCode != 200 {\n\t\treturn fmt.Errorf(\"request returned %d expected code 200: %s\", res.StatusCode, res.Status)\n\t}\n\treturn nil\n}", "func TestReturns200IfThereAreNoChecks(t *testing.T) {\n\trecorder := httptest.NewRecorder()\n\n\treq, err := http.NewRequest(\"GET\", \"https://fakeurl.com/debug/health\", nil)\n\tif err != nil {\n\t\tt.Errorf(\"Failed to create request.\")\n\t}\n\n\tStatusHandler(recorder, req)\n\n\tif recorder.Code != 200 {\n\t\tt.Errorf(\"Did not get a 200.\")\n\t}\n}", "func (c *Checker) Check() *Checker {\n\n\t// set cookies\n\tc.request.Header.Set(\"Cookie\", c.generateCookieString())\n\n\trecorder := httptest.NewRecorder()\n\tc.handler.ServeHTTP(recorder, c.request)\n\n\tresp := &http.Response{\n\t\tStatusCode: recorder.Code,\n\t\tBody: NewReadCloser(recorder.Body),\n\t\tHeader: recorder.Header(),\n\t}\n\tc.handleCookies(resp)\n\tc.response = resp\n\n\treturn c\n}", "func (f *Fetch) Check(ctx context.Context, r resource.Renderer) (resource.TaskStatus, error) {\n\tch := make(chan response, 1)\n\n\tgo func(ctx context.Context, r resource.Renderer) {\n\t\tstatus, err := f.checkWithContext(ctx, r)\n\t\tch <- response{status, err}\n\t}(ctx, r)\n\n\tselect {\n\tcase <-ctx.Done():\n\t\treturn nil, ctx.Err()\n\tcase check := <-ch:\n\t\treturn check.status, check.err\n\t}\n}", "func healthcheckok(writer http.ResponseWriter, request *http.Request) {\n\twriter.WriteHeader(200)\n}", "func (p *Probe) Check() error {\n\t// First, check that Envoy has received a configuration update from Pilot.\n\tif err := p.checkConfigStatus(); err != nil {\n\t\treturn err\n\t}\n\treturn p.isEnvoyReady()\n}", "func checkURL(url string, ch chan int) {\n\ttr := &http.Transport{\n\t\tTLSClientConfig: &tls.Config{InsecureSkipVerify: insecureSkipTLSVerify},\n\t}\n\tclient := &http.Client{Transport: tr}\n\tfor {\n\t\tresp, err := client.Get(url)\n\t\tif err != nil {\n\t\t\tlog.Printf(\"Endpoint is unhealthy: %q. %q.\\n\", url, err)\n\t\t\ttime.Sleep(3 * time.Second)\n\t\t\tcontinue\n\t\t}\n\t\tif resp.StatusCode == http.StatusOK || resp.StatusCode == http.StatusUnauthorized {\n\t\t\tlog.Printf(\"Endpoint is healthy: %q.\\n\", url)\n\t\t\tch <- 1\n\t\t\treturn\n\t\t}\n\t\tlog.Println(resp.Status)\n\t\ttime.Sleep(3 * time.Second)\n\t\tcontinue\n\t}\n}", "func HttpCheckHealthHandler(w http.ResponseWriter, r *http.Request) {\n\tresp, err := http.Get(\"http://reddit.com/r/golang.json\") //insert json-object here\n\tif err != nil {\n\t\tfmt.Println(\"Error: %g\", err)\n\t}\n\tfmt.Fprintf(w, \"<h1>Health Status</h1>\\nStatus: %s\", resp.Status)\n}", "func (a *Access) CheckAPI() error {\n\tresponse := struct {\n\t\tMessage string `json:\"message\"`\n\t}{}\n\terr := a.httpGet(\"/api/\", &response)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif response.Message == \"\" {\n\t\treturn errors.New(\"hass: API is not running\")\n\t}\n\n\treturn nil\n}", "func (b *Backend) HealthCheck() error {\n\tvar healthCheckResponse interface{}\n\terr := b.RPC(0, []byte(\"{}\"), &healthCheckResponse)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}", "func (h *TCPProbe) Check() {\n\tgo h.TCPCheck()\n}", "func okHealthCheck(proxy *Proxy) error {\n\treturn nil\n}", "func EnsureHTTPStatus(o *HTTPOptions) error {\n\ttickerInt := o.TickerInterval\n\tif tickerInt == 0 {\n\t\ttickerInt = 20\n\t}\n\n\tgiveUp := make(chan bool)\n\tgo func() {\n\t\ttime.Sleep(time.Second * o.Timeout)\n\t\tgiveUp <- true\n\t}()\n\n\tclient := &http.Client{}\n\tclient.CheckRedirect = func(req *http.Request, via []*http.Request) error {\n\t\treturn errors.New(\"Redirect\")\n\t}\n\n\tqueryTicker := time.NewTicker(time.Second * tickerInt).C\n\tfor {\n\t\tselect {\n\t\tcase <-queryTicker:\n\t\t\treq, err := http.NewRequest(\"GET\", o.URL, nil)\n\t\t\tif o.Username != \"\" && o.Password != \"\" {\n\t\t\t\treq.SetBasicAuth(o.Username, o.Password)\n\t\t\t}\n\n\t\t\tif len(o.Headers) > 0 {\n\t\t\t\tfor header, value := range o.Headers {\n\t\t\t\t\tif header == \"Host\" {\n\t\t\t\t\t\treq.Host = value\n\t\t\t\t\t\tcontinue\n\t\t\t\t\t}\n\t\t\t\t\treq.Header.Add(header, value)\n\t\t\t\t}\n\t\t\t}\n\t\t\t// Make the request\n\t\t\tresp, err := client.Do(req)\n\n\t\t\tif err == nil {\n\t\t\t\tdefer resp.Body.Close()\n\t\t\t\tif resp.StatusCode == o.ExpectedStatus {\n\t\t\t\t\t// Log expected vs. actual if we do not get a match.\n\t\t\t\t\tlog.WithFields(log.Fields{\n\t\t\t\t\t\t\"URL\": o.URL,\n\t\t\t\t\t\t\"expected\": o.ExpectedStatus,\n\t\t\t\t\t\t\"got\": resp.StatusCode,\n\t\t\t\t\t}).Info(\"HTTP Status code matched expectations\")\n\t\t\t\t\treturn nil\n\t\t\t\t}\n\n\t\t\t\t// Log expected vs. actual if we do not get a match.\n\t\t\t\tlog.WithFields(log.Fields{\n\t\t\t\t\t\"URL\": o.URL,\n\t\t\t\t\t\"expected\": o.ExpectedStatus,\n\t\t\t\t\t\"got\": resp.StatusCode,\n\t\t\t\t}).Info(\"HTTP Status could not be matched\")\n\t\t\t}\n\n\t\tcase <-giveUp:\n\t\t\treturn fmt.Errorf(\"No deployment found after waiting %d seconds\", o.Timeout)\n\t\t}\n\t}\n}", "func (r RequestTester) Check(req *http.Request) error {\n\tif r.Path > \"\" && r.Path != req.URL.Path {\n\t\treturn fmt.Errorf(\"expected request path %s; got %s\", r.Path, req.URL.Path)\n\t}\n\tif r.Auth > \"\" && r.Auth != req.Header.Get(\"Authorization\") {\n\t\treturn fmt.Errorf(\"expecte auth header %s; got %s\", r.Auth, req.Header.Get(\"Authorization\"))\n\t}\n\tif r.Method > \"\" && r.Method != req.Method {\n\t\treturn fmt.Errorf(\"expected method %s; got %s\", r.Method, req.Method)\n\t}\n\tif r.Query > \"\" && r.Query != req.URL.RawQuery {\n\t\treturn fmt.Errorf(\"expected query args %s; got %s\", r.Query, req.URL.RawQuery)\n\t}\n\tif r.Host > \"\" && r.Host != req.URL.Host {\n\t\treturn fmt.Errorf(\"expected host %s; got %s\", r.Host, req.URL.Host)\n\t}\n\tif r.ContentType > \"\" && r.ContentType != req.Header.Get(\"ContentType\") {\n\t\treturn fmt.Errorf(\"expected content-type %s; got %s\", r.ContentType, req.Header.Get(\"ContentType\"))\n\t}\n\tfor k := range r.Header {\n\t\tif r.Header.Get(k) != req.Header.Get(k) {\n\t\t\treturn fmt.Errorf(\"expected header %s = %s; got %s\", k, r.Header.Get(k), req.Header.Get(k))\n\t\t}\n\t}\n\tif len(r.Payload) > 0 {\n\t\tb, err := ioutil.ReadAll(req.Body)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"unable to read request body: %v\", err)\n\t\t}\n\t\tif bytes.Compare(b, r.Payload) != 0 {\n\t\t\treturn fmt.Errorf(\"expected body %s; got %s\", string(r.Payload), string(b))\n\t\t}\n\n\t}\n\treturn nil\n}", "func (m *HealthCheck_HttpHealthCheck) Validate() error {\n\treturn m.validate(false)\n}", "func HostCheck(host string, hostWhitelist []string, handler http.Handler) http.Handler {\n\treturn hostCheck(apiVersion1, host, hostWhitelist, handler)\n}", "func (c *HTTPChecker) Run(ctx *context.Context) pkg.Results {\n\tvar results pkg.Results\n\tfor _, conf := range ctx.Canary.Spec.HTTP {\n\t\tresults = append(results, c.Check(ctx, conf)...)\n\t}\n\treturn results\n}", "func (c *Check) Health(ctx context.Context, w http.ResponseWriter, r *http.Request) error {\n\tctx, span := trace.StartSpan(ctx, \"handlers.Check.Health\")\n\tdefer span.End()\n\n\tvar health struct {\n\t\tStatus string `json:\"status\"`\n\t}\n\n\t// Check if the database is ready.\n\tif err := database.StatusCheck(ctx, c.db); err != nil {\n\n\t\t// If the database is not ready we will tell the client and use a 500\n\t\t// status. Do not respond by just returning an error because further up in\n\t\t// the call stack will interpret that as an unhandled error.\n\t\thealth.Status = \"db not ready\"\n\t\treturn web.Respond(ctx, w, health, http.StatusInternalServerError)\n\t}\n\n\thealth.Status = \"ok\"\n\treturn web.Respond(ctx, w, health, http.StatusOK)\n}", "func (c *Client) Healthcheck(ctx context.Context) (res string, err error) {\n\tvar ires interface{}\n\tires, err = c.HealthcheckEndpoint(ctx, nil)\n\tif err != nil {\n\t\treturn\n\t}\n\treturn ires.(string), nil\n}", "func healthCheckHandler(w http.ResponseWriter, r *http.Request) {\n\tw.WriteHeader(http.StatusOK)\n}", "func HealthCheck(w http.ResponseWriter, r *http.Request) {\n\t// Setando um header http de resposta\n\tw.Header().Set(\"content-type\", \"application/json\")\n\n\t// Gerando um objeto customizado à partir de um map, e o convertendo em json\n\tresponse, _ := json.Marshal(map[string]interface{}{\n\t\t\"status\": \"up\",\n\t})\n\n\t// Write escreve o conteúdo do slice de bytes no corpo da resposta\n\tw.Write(response)\n\t// WriteHeader seta o status code da resposta. É importante frisar que ele só pode ser chamado\n\t// uma única vez no contexto da resposta. Chamadas subsequentes são ignoradas, portanto convém\n\t// chamar essa função quando você estiver prestes a retornar do handler\n\tw.WriteHeader(http.StatusOK)\n\treturn\n}", "func (d *Datasource) CheckHealth(ctx context.Context, req *backend.CheckHealthRequest) (*backend.CheckHealthResult, error) {\n\tres := &backend.CheckHealthResult{}\n\tlog.DefaultLogger.Debug(\"CheckHealth called\")\n\tconfig, err := models.LoadSettings(req.PluginContext)\n\n\tif err != nil {\n\t\tres.Status = backend.HealthStatusError\n\t\tres.Message = \"Unable to load settings\"\n\t\tlog.DefaultLogger.Debug(err.Error())\n\t\treturn res, nil\n\t}\n\n\tclient, err := NewGoogleClient(ctx, *config)\n\tif err != nil {\n\t\tres.Status = backend.HealthStatusError\n\t\tres.Message = \"Unable to create client\"\n\t\tlog.DefaultLogger.Debug(err.Error())\n\t\treturn res, nil\n\t}\n\n\terr = client.TestClient()\n\tif err != nil {\n\t\tres.Status = backend.HealthStatusError\n\t\tres.Message = \"Permissions check failed\"\n\t\tlog.DefaultLogger.Debug(err.Error())\n\t\treturn res, nil\n\t}\n\n\tres.Status = backend.HealthStatusOk\n\tres.Message = \"Success\"\n\treturn res, nil\n}", "func (s *Server) HealthCheckHandler(w http.ResponseWriter, r *http.Request) {\n\ttime.Sleep(time.Duration(s.Config.Delay) * time.Second)\n\tstatus := 200\n\tif !s.Config.Healthy {\n\t\tstatus = 500\n\t}\n\tw.WriteHeader(status)\n\tlog.Info(\"host: \", r.Host, \" uri: \", r.RequestURI, \" status: \", status)\n\n}", "func HealthCheck(c echo.Context) error {\n\treturn c.String(http.StatusOK, \"WORKING!\")\n}", "func TestCheckAPI(t *testing.T) {\n\tenv := newTestEnv(t)\n\tdefer env.Shutdown()\n\tbaseURL, err := env.builder.BuildBaseURL()\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected error building base url: %v\", err)\n\t}\n\n\tresp, err := http.Get(baseURL)\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected error issuing request: %v\", err)\n\t}\n\tdefer resp.Body.Close()\n\n\tcheckResponse(t, \"issuing api base check\", resp, http.StatusOK)\n\tcheckHeaders(t, resp, http.Header{\n\t\t\"Content-Type\": []string{\"application/json\"},\n\t\t\"Content-Length\": []string{\"2\"},\n\t\t\"Gitlab-Container-Registry-Version\": []string{strings.TrimPrefix(version.Version, \"v\")},\n\t\t\"Gitlab-Container-Registry-Features\": []string{version.ExtFeatures},\n\t})\n\n\tp, err := ioutil.ReadAll(resp.Body)\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected error reading response body: %v\", err)\n\t}\n\n\tif string(p) != \"{}\" {\n\t\tt.Fatalf(\"unexpected response body: %v\", string(p))\n\t}\n}", "func HealthCheck(w http.ResponseWriter, r *http.Request) {\n\tdbUp := DBClient.Check()\n\n\tif dbUp {\n\t\tdata, _ := json.Marshal(healthCheckResponse{Status: \"UP\"})\n\t\twriteJSONResponse(w, http.StatusOK, data)\n\t} else {\n\t\tdata, _ := json.Marshal(healthCheckResponse{Status: \"Database not accessible\"})\n\t\twriteJSONResponse(w, http.StatusServiceUnavailable, data)\n\t}\n}", "func (m *BackendHttpchk) Validate(formats strfmt.Registry) error {\n\tvar res []error\n\n\tif err := m.validateMethod(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateURI(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateVersion(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}", "func NewHTTPCheck(name, endpoint string) (Check, error) {\n\tep, err := url.Parse(endpoint)\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\thc := &httpCheck{\n\t\tcheck: newCheck(name, ep.Hostname(), CheckTypeHTTP),\n\t\tURL: ep.Path,\n\t}\n\n\tif ep.Scheme == \"https\" {\n\t\thc.Encryption = true\n\t}\n\n\tif ep.User != nil {\n\t\tif ep.User.Username() != \"\" {\n\t\t\thc.Auth = ep.User.Username()\n\t\t}\n\n\t\tif pass, ok := ep.User.Password(); ok {\n\t\t\thc.Auth = hc.Auth + \":\" + pass\n\t\t}\n\t}\n\n\tif ep.Port() != \"\" {\n\t\thc.Port, err = strconv.Atoi(ep.Port())\n\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t} else if ep.Scheme == \"https\" {\n\t\thc.Port = 443\n\t}\n\n\treturn hc, nil\n}", "func checkVaultHealth() (error) {\n\n // execute ping request to docker socket\n response, err := http.Head(vaultUrl(\"sys/health\"))\n\n // fail if an error occurs during transport\n if err != nil {\n return fmt.Errorf(\"Failed to connect to vault at %s\", err)\n }\n\n // fail if vault did not respond with 200 response code\n if response.StatusCode != 200 {\n return fmt.Errorf(\"Found unhealthy or sealed vault at %s\", config.VaultAddr)\n }\n\n return nil\n}" ]
[ "0.70563394", "0.69536537", "0.68326074", "0.6758303", "0.6734101", "0.6721828", "0.67087394", "0.66680324", "0.6665401", "0.6655878", "0.66553265", "0.6563348", "0.65447074", "0.6539294", "0.6532304", "0.65218896", "0.64806414", "0.64593005", "0.6453134", "0.6451852", "0.6426893", "0.64227945", "0.64079136", "0.6394679", "0.6387456", "0.6386088", "0.63580143", "0.63502973", "0.63431", "0.6311733", "0.6309855", "0.6304916", "0.62763166", "0.6266246", "0.6263876", "0.62623346", "0.625153", "0.6247353", "0.6246269", "0.6245059", "0.61908656", "0.6190665", "0.61879265", "0.6161913", "0.61568135", "0.6150033", "0.61321324", "0.611107", "0.611107", "0.6109571", "0.6104357", "0.6099306", "0.60927474", "0.6086685", "0.6067856", "0.605681", "0.60492945", "0.6047457", "0.6044159", "0.6023502", "0.6016843", "0.60071564", "0.6000549", "0.5989797", "0.5978472", "0.59781843", "0.59735185", "0.5958947", "0.59482497", "0.59391373", "0.59292555", "0.5928623", "0.592445", "0.592063", "0.5912692", "0.5911762", "0.5901681", "0.5894496", "0.58926874", "0.5890208", "0.5888505", "0.58879733", "0.58835787", "0.58809894", "0.58799326", "0.5878396", "0.5862704", "0.58554417", "0.5844612", "0.58414125", "0.5829199", "0.58132094", "0.58056664", "0.58023", "0.58015215", "0.57994205", "0.5786588", "0.57832474", "0.5775935", "0.577072" ]
0.76647353
0
Initialize the generator from a seed
func (m *MT) Seed(theseed int64) { // Lower 32bit seed := uint32(theseed) for m.lp = 0; m.lp < 624; m.lp++ { m.Key[m.lp] = seed seed = (uint32(1812433253)*(seed^(seed>>30)) + m.lp + 1) & 0xffffffff //m.arrayL[m.lp] = 0x6c078965*(m.arrayL[m.lp-1]^(m.arrayL[m.lp-1]>>30)) + m.lp } }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func Seed(seed int64) { globalRand.Seed(seed) }", "func (s CrSource) Seed(int64) {}", "func (rng *Rng) Seed(seed int64) {\n\trng.State = uint64(seed)\n}", "func (gen *CMWC) Seed(seed int64) {\n\tgen.lock.Lock()\n\tdefer gen.lock.Unlock()\n\n\tgen.c = cmwcC\n\ts := uint32(seed)\n\tfor i := 0; i < 4096; i++ {\n\t\ts = s ^ s<<uint(i%63)\n\t\tgen.state[i] = s ^ uint32(seed)\n\t}\n}", "func Seed(seed int64) {\n\tr.Seed(seed)\n}", "func (r *Rand) Seed(s int64) {\n\t*r = Rand(uint64(s))\n}", "func (g *Generator) Seed(seed int64) {\n\tif _, err := g.hash.Write(int64ToBytes(seed)); err != nil {\n\t\tpanic(err)\n\t}\n\n\tif err := g.reseed(); err != nil {\n\t\tpanic(err)\n\t}\n}", "func Init(seed int) {\n\tif seed <= 0 {\n\t\tseed = int(time.Now().Unix())\n\t}\n\trand.Seed(int64(seed))\n}", "func Seed (seed int64) {\n\tlocalRand.Seed(seed)\n}", "func (r *IncrementRand) Seed(seed int64) {\n\tr.seed = seed\n\tr.value = seed\n}", "func (r *OrgBucketID) Seed(seed int64) {\n\tr.m.Lock()\n\tr.src = rand.New(rand.NewSource(seed))\n\tr.m.Unlock()\n}", "func (rng *splitMix64Source) Seed(seed int64) {\n\trng.state = uint64(seed)\n}", "func (r *ConstantRand) Seed(seed int64) {\n\tr.seed = seed\n}", "func (R *RAND) Seed(rawlen int, raw []byte) { /* initialise from at least 128 byte string of raw random entropy */\n\tvar b [4]byte\n\tsh := NewHASH256()\n\tR.pool_ptr = 0\n\n\tfor i := 0; i < rand_NK; i++ {\n\t\tR.ira[i] = 0\n\t}\n\tif rawlen > 0 {\n\t\tfor i := 0; i < rawlen; i++ {\n\t\t\tsh.Process(raw[i])\n\t\t}\n\t\tdigest := sh.Hash()\n\n\t\t/* initialise PRNG from distilled randomness */\n\n\t\tfor i := 0; i < 8; i++ {\n\t\t\tb[0] = digest[4*i]\n\t\t\tb[1] = digest[4*i+1]\n\t\t\tb[2] = digest[4*i+2]\n\t\t\tb[3] = digest[4*i+3]\n\t\t\tR.sirand(pack(b))\n\t\t}\n\t}\n\tR.fill_pool()\n}", "func Initialisieren (keim int64) {\n\trand.Seed (keim)\n}", "func (src *MT19937_64) Seed(seed uint64) {\n\tsrc.mt[0] = seed\n\tfor src.mti = 1; src.mti < mt19937_64NN; src.mti++ {\n\t\tsrc.mt[src.mti] = (6364136223846793005*(src.mt[src.mti-1]^(src.mt[src.mti-1]>>62)) + src.mti)\n\t}\n}", "func (pcg *PCGSource) Seed(seed uint64) {\n\tpcg.low = seed\n\tpcg.high = seed // TODO: What is right?\n}", "func init() {\n\t\n\t\t// note:\n\t\t// Each time you set the same seed, you get the same sequence\n\t\t// You have to set the seed only once\n\t\t// you simply call Intn to get the next random integer\n\t\trand.Seed(time.Now().UTC().UnixNano())\n\t}", "func Seed(seed int64) {\n\tif seed == 0 {\n\t\trand.Seed(time.Now().UTC().UnixNano())\n\t} else {\n\t\trand.Seed(seed)\n\t}\n}", "func (r *Random) initGenrand(s uint32) {\n\tvar mti uint32\n\tmt := &r.state\n\tmt[0] = s\n\tfor mti = 1; mti < cN; mti++ {\n\t\t/* See Knuth TAOCP Vol2. 3rd Ed. P.106 for multiplier. */\n\t\t/* In the previous versions, MSBs of the seed affect */\n\t\t/* only MSBs of the array mt[]. */\n\t\t/* 2002/01/09 modified by Makoto Matsumoto */\n\t\tmt[mti] = uint32(1812433253)*(mt[mti-1]^(mt[mti-1]>>30)) + mti\n\t}\n\tr.index = mti\n}", "func (p *PCG64) Seed(low, high uint64) {\n\tp.low = low\n\tp.high = high\n}", "func init() {\n\trand.Seed(time.Now().UTC().UnixNano())\n}", "func init() {\n\trand.Seed(time.Now().UTC().UnixNano())\n}", "func init() {\n\trand.Seed(time.Now().UTC().UnixNano())\n}", "func init() {\n\trand.Seed(time.Now().UTC().UnixNano())\n}", "func (p *Pcg128) Seed(stateHigh, stateLow uint64) {\n\tp.lcg_128_seed(stateHigh, stateLow)\n}", "func init() {\n\trand.Seed(time.Now().UnixNano())\n}", "func init() {\n\trand.Seed(time.Now().UnixNano())\n}", "func init() {\n\trand.Seed(time.Now().UnixNano())\n}", "func init() {\n\trand.Seed(time.Now().UnixNano())\n}", "func init() {\n\trand.Seed(time.Now().UnixNano())\n}", "func init() {\n\trand.Seed(time.Now().UnixNano())\n}", "func init() {\n\trand.Seed(time.Now().UnixNano())\n}", "func init() {\n\tvar b [8]byte\n\t_, err := crand.Read(b[:])\n\tif err != nil {\n\t\tpanic(err.Error())\n\t}\n\tvar seed = int64(binary.LittleEndian.Uint64(b[:]))\n\trand.Seed(seed)\n}", "func init() {\n\t// As rand.Seed() expect an int64 as input,\n\t// convert the time to unix nano before passing it to the function.\n\trand.Seed(time.Now().UnixNano())\n}", "func (h *Hash) initSeed() {\n\tif h.seed.s == 0 {\n\t\tseed := MakeSeed()\n\t\th.seed = seed\n\t\th.state = seed\n\t}\n}", "func init() {\n\trand.Seed(time.Now().UnixNano()) // initialize rand module\n}", "func init() {\n\t//seed := uint64(0x9E3779B97F4A7C15) // goldenRatio\n\tseed := time.Now().UnixNano()\n\trand.Seed(seed)\n\tfmt.Printf(\"seed: 0x%x\\n\", seed)\n}", "func (genA *GeneticAlgorithm) SetSeed(seed int64) {\n\tgenA.RandomEngine = rand.New(rand.NewSource(seed))\n}", "func (r *ISAAC) Seed(seed int64) {\n\tr.randrsl = padSeed(uint64(seed))\n\tr.randInit()\n}", "func (sp *SuperSpin) Seed(seed int64) {\n\tsp.seed = seed\n}", "func (r *lockedRandSource) Seed(seed int64) {\n\tr.lock.Lock()\n\tr.src.Seed(seed)\n\tr.lock.Unlock()\n}", "func NewGenerator(seed int64) *Generator {\n\tg := &Generator{\n\t\tseed: seed,\n\t\tnoise: opensimplex.New(seed),\n\t}\n\n\treturn g\n}", "func (baseModel *BaseModel) SetSeed(seed int64) {\n baseModel.Rng = rand.New(rand.NewSource(seed)).Float64\n baseModel.RngSet = true\n}", "func (mt *MT19937) seed(val uint32) {\n\tmt.index = n\n\tmt.state[0] = val\n\tfor i := 1; i < (n - 1); i++ {\n\t\tmt.state[i] = f*(mt.state[i-1]^(mt.state[i-1]>>(w-2))) + uint32(i)\n\t}\n}", "func New(seed int64) *Faker {\n\t// If passing 0 create crypto safe int64 for initial seed number\n\tif seed == 0 {\n\t\tbinary.Read(crand.Reader, binary.BigEndian, &seed)\n\t}\n\n\treturn &Faker{Rand: rand.New(&lockedSource{src: rand.NewSource(seed).(rand.Source64)})}\n}", "func (u Universe) Seed() {\n\tfor i := 0; i < (width * height / 4); i++ {\n\t\tu.Set(rand.Intn(width), rand.Intn(height), true)\n\t}\n}", "func (m *Model) SetSeed(seed int64) {}", "func seedRandomizer() {\n var b [8]byte\n _, err := crypto_rand.Read(b[:])\n if err != nil {\n panic(\"cannot seed math/rand package with cryptographically secure random number generator\")\n }\n math_rand.Seed(int64(binary.LittleEndian.Uint64(b[:])))\n}", "func New(seed int64) *rand.Rand {\n\treturn rand.New(NewSource(seed))\n}", "func init() {\n\t// Seed the random number generator.\n\trand.Seed(time.Now().UnixNano())\n}", "func newGenerator(h hash.Hash, seed []byte) generator {\n\tif h == nil {\n\t\th = sha256.New()\n\t}\n\tb := h.Size()\n\tg := generator{\n\t\tkey: make([]byte, b),\n\t\tcounter: make([]byte, 16),\n\t\tmaxBytesPerRequest: (1 << 15) * b,\n\t\ttemp: make([]byte, b),\n\t\th: h,\n\t}\n\tif len(seed) != 0 {\n\t\t_, _ = g.Write(seed)\n\t}\n\treturn g\n}", "func (u Universe) Seed() {\n\t//random numbers - using math/rand lib rand.Intn()\n\t//25% of cells in Universe\n\tpercentageOfCells := int(float64(len(u)*len(u[0])) * 0.25)\n\tfor i := 0; i <= percentageOfCells; i++ {\n\t\tu[rand.Intn(15)][rand.Intn(80)] = true\n\t}\n\t//\n}", "func (x *SplitMix64) Seed(seed int64) {\n\t*x = SplitMix64(seed)\n}", "func (r *lockedSource) Seed(seed int64) {\n\tr.mut.Lock()\n\tr.src.Seed(seed)\n\tr.mut.Unlock()\n}", "func (f *Fortuna) Seed(seed int64) {\n\tif err := f.AddRandomEvent(0, 0, int64ToBytes(seed)); err != nil {\n\t\tpanic(err)\n\t}\n}", "func (w *Wallet) InitSeed(s *aklib.DBConfig, pwd []byte) error {\n\tseed := make([]byte, 32)\n\tif _, err := rand.Read(seed); err != nil {\n\t\tpanic(err)\n\t}\n\tw.EncSeed = address.EncryptSeed(seed, pwd)\n\treturn w.put(s)\n}", "func New(seed int64) Rand {\n\treturn Rand{\n\t\tRand: rand.New(NewLockedSource64(rand.NewSource(seed))),\n\t}\n}", "func (p *prng) init(seed string) {\n\thv := sha512.Sum512([]byte(seed))\n\tcopy(p.buf[:], hv[:])\n\tp.ptr = 0\n}", "func NewGenerator(seed uint64, delay int) Generator {\n\tprng := rand.New(mt19937.New())\n\tprng.Seed(int64(seed))\n\treturn &generator{\n\t\tseed: seed,\n\t\tdelay: delay,\n\t\tprng: prng,\n\t}\n}", "func RandomizerInit() Randomizer {\n\t//initialize our starting seed and our letter dictionary\n\tr := Randomizer{\n\t\ttime.Now().Unix(),\n\t\t[52]string{\"A\", \"B\", \"C\", \"D\", \"E\", \"F\", \"G\", \"H\", \"I\", \"J\", \"K\", \"L\", \"M\", \"N\", \"O\", \"P\", \"Q\", \"R\", \"S\", \"T\", \"U\", \"V\", \"W\", \"X\", \"Y\", \"Z\", \"a\", \"b\", \"c\", \"d\", \"e\", \"f\", \"g\", \"h\", \"i\", \"j\", \"k\", \"l\", \"m\", \"n\", \"o\", \"p\", \"q\", \"r\", \"s\", \"t\", \"u\", \"v\", \"w\", \"x\", \"y\", \"z\"},\n\t}\n\n\treturn r\n}", "func (rng *Tunafish) Seed() ([]byte, error) {\n\tif !rng.Initialised() {\n\t\treturn nil, ErrNotInitialised\n\t}\n\n\tvar p = make([]byte, SeedFileLength)\n\t_, err := io.ReadFull(rng, p)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn p, nil\n}", "func getSeed() {\n\tpid := os.Getpid()\n\tvar pid64 int64\n\tpid64 = int64(pid)\n\ttimeNow := time.Now().UTC().UnixNano()\n\tseedInt := pid64 + timeNow\n\trand.Seed(seedInt)\n}", "func NewGenerator() Generator {\n\treturn Generator{\n\t\tcurrentState: rand.Intn(30),\n\t}\n}", "func TestSeed(t *testing.T) {\n\tzeroBytes := make([]byte, 32)\n\tsecurityDomain := []byte(\"test\")\n\tr := NewPRNG(securityDomain)\n\tr.Reseed(zeroBytes)\n}", "func Seed(n int) {\n\tdatastore.Seed(n)\n}", "func NewGenerator(h hash.Hash, seed []byte) io.ReadWriter {\n\tg := newGenerator(h, seed)\n\treturn &g\n}", "func NewSeed() Seed {\n\treturn SeedFromEntropy(frand.Entropy128())\n}", "func randWithSeed(max int) (rnd int) {\n\ttime.Sleep(1)\n\trand.Seed(time.Now().UnixNano())\n\trnd = rand.Intn(max)\n\treturn\n}", "func (board *Board) Seed() {\n\tr := rand.New(rand.NewSource(time.Now().UnixNano()))\n\n\ttimes := 0\n\n\tfor times == 0 {\n\t\ttimes = r.Intn(board.Rows * board.Columns)\n\t}\n\n\tfor t := 0; t < times; t++ {\n\t\ti, j := rand.Intn(board.Rows), rand.Intn(board.Columns)\n\n\t\tboard.Cells[i][j].SetAlive(true)\n\t}\n}", "func GenerateSeed(rand io.Reader) (string, error) {\n\tif rand == nil {\n\t\trand = cryptorand.Reader\n\t}\n\tbytes := make([]byte, 32)\n\tif _, err := rand.Read(bytes); err != nil {\n\t\treturn \"\", err\n\t}\n\treturn hex.EncodeToString(bytes), nil\n}", "func (sg *SeedGenerator) Next() (*Seed, error) {\n\tseed := Seed{}\n\t_, err := rand.Read(seed[:])\n\tif err != nil {\n\t\treturn nil, errors.Wrap(err, \"[ SeedGenerator::Next ]\")\n\t}\n\n\treturn &seed, nil\n}", "func randSeed() uint32 {\n\treturn uint32(time.Now().UnixNano() + int64(os.Getpid()))\n}", "func (h *Hash) Seed() Seed {\n\th.initSeed()\n\treturn h.seed\n}", "func NewSeed(seed int64) SkipList {\n\treturn NewSeedEps(seed, eps)\n}", "func (rng *RandomNumberGenerator) SetSeed(seed int64) {\n\trng.seed = seed\n}", "func seedRandom() error {\n\t/* Get an int64 from the CSPRNG */\n\tb := make([]byte, 8)\n\t_, err := crand.Read(b)\n\tif nil != err {\n\t\treturn err\n\t}\n\ti := binary.LittleEndian.Uint64(b)\n\n\t/* Use it to seed the PRNG */\n\tmrand.Seed(int64(i))\n\n\treturn nil\n}", "func (g *Game) SeedRand() {\n\n\t//Find the center (approximate for odd height or width) Cell of the board\n\txMid := g.cols / 2\n\tyMid := g.rows / 2\n\n\t//TEMP placeholder for actual random number generator\n\trand := []int{0, 1, 0, 1, 1, 1, 1, 0, 0, 0, 1, 0, 1, 0, 1, 1}\n\n\t//Iterate over a 4x4 square around the center Cell\n\ti := 0\n\tfor y := yMid - 1; y < yMid+3; y++ {\n\t\tfor x := xMid - 1; x < xMid+3; x++ {\n\t\t\tif rand[i] == 1 {\n\t\t\t\tg.state[y][x].Alive = !g.state[y][x].Alive\n\t\t\t}\n\t\t\ti++\n\t\t}\n\t}\n\n\t//Update the copy\n\tfor y := 0; y < int(g.rows); y++ {\n\t\tfor x := 0; x < int(g.cols); x++ {\n\t\t\tg.board[y][x] = g.state[y+1][x+1]\n\t\t}\n\t}\n\n\treturn\n}", "func NewRandFromSeed(seed []byte) *rand.Rand {\n\thash := sha256.Sum256(seed)\n\treturn rand.New(rand.NewSource(int64(binary.BigEndian.Uint64(hash[8:]))))\n}", "func Randomisieren () {\n\trand.Seed(time.Now().UnixNano())\n}", "func seedDice(seed uint64) {\n\trng := pcg.NewPCG64()\n\trng.Seed(rng.Random(), rng.Random(), rng.Random(), rng.Random())\n\trng.Seed(seed, DiceSeq, seed*seed, DiceSeq+1)\n\tdice.SetDefaultRandomness(rng)\n}", "func InitGenRand64(seed uint64) {\n\tC.init_genrand64(C.ulonglong(seed))\n}", "func (b *Handler) Seed() []byte {\n\tb.lock.RLock()\n\tdefer b.lock.RUnlock()\n\n\tcpy := make([]byte, len(b.seed))\n\tcopy(cpy, b.seed)\n\n\treturn cpy\n}", "func New(seed uint64) nhash.Hash64 {\n\ts := n(seed)\n\treturn s\n}", "func (this *XXHash64) SetSeed(seed uint64) {\n\tthis.seed = seed\n}", "func SeedPRNG() {\n\tb := make([]byte, 8)\n\t_, err := crand.Read(b)\n\tif nil != err {\n\t\tpanic(fmt.Sprintf(\"Rand: %v\", err))\n\t}\n\trand.Seed(int64(binary.LittleEndian.Uint64(b)))\n}", "func newDetermRand(seed []byte) io.Reader {\n\treturn &determRand{next: seed}\n}", "func (m *Model) Initialize(seed uint64) {\n\tr := rand.NewLockedRand(seed)\n\teps := m.Config.InitEps / mat.Float(m.Config.DimSeq)\n\tinitializers.Uniform(m.Proj.W.Value(), -eps, eps, r)\n\tinitializers.Constant(m.Proj.B.Value(), 1)\n}", "func Init() {\n\trand.Seed(time.Now().Unix())\n}", "func (kp *FromAddress) Seed() (string, error) {\n\treturn \"\", ErrNoSeed\n}", "func (rng *splitMix64Source) Seed64(seed uint64) {\n\trng.state = seed\n}", "func NewGenerator(opts ...Option) *Generator {\n\tg := &Generator{}\n\n\tfor _, opt := range opts {\n\t\topt.apply(g)\n\t}\n\n\t// Default time source\n\tif g.clock == nil {\n\t\tg.clock = &systemClock{}\n\t}\n\n\t// Default entropy source\n\tif g.entropy == nil {\n\t\tg.entropy = ulid.Monotonic(rand.New(rand.NewSource(g.clock.Now().UnixNano())), 0)\n\t}\n\n\treturn g\n}", "func SetSeed() error {\n\t// 为math\n\tseedByte, err := walletRand.GenerateSeedWithStrengthAndKeyLen(walletRand.KeyStrengthHard, walletRand.KeyLengthInt64)\n\tif err != nil {\n\t\treturn err\n\t}\n\tbytesBuffer := bytes.NewBuffer(seedByte)\n\tvar seed int64\n\tbinary.Read(bytesBuffer, binary.BigEndian, &seed)\n\trand.Seed(seed)\n\n\treturn nil\n}", "func sourceMathRand() {\n\tseed, _ := dice.CryptoInt64()\n\tdice.Source = rand.New(rand.NewSource(seed))\n}", "func New(seed int64) rand.Source64 {\n\tx := new(SplitMix64)\n\tx.Seed(seed)\n\treturn x\n}", "func GenerateNewSeed(Read func(buf []byte) (n int, err error)) ([]byte, error) {\n\tvar seed []byte\n\n\tif Read == nil {\n\t\tRead = rand.Read\n\t}\n\n\tfor {\n\t\t// random seed\n\t\tseed = make([]byte, 64)\n\n\t\t_, err := Read(seed)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\t// Ensure the seed can be used for generating a BLS keypair.\n\t\t// If not, we retry.\n\t\t_, err = generateKeys(seed)\n\t\tif err == nil {\n\t\t\tbreak\n\t\t}\n\n\t\tif err != io.EOF {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\treturn seed, nil\n}", "func NewEventGenerator(seed int64) *EventGenerator {\n\treturn &EventGenerator{\n\t\tr: rand.New(rand.NewSource(seed)),\n\t\tg: gen.NewGenerator(seed + 1),\n\t}\n}", "func New(seedName string) (seed model.Seed) {\n\tswitch seedName {\n\tcase \"acorn\":\n\t\tseed = Acorn\n\tcase \"blinker\":\n\t\tseed = Blinker\n\tcase \"diehard\":\n\t\tseed = DieHard\n\tcase \"glider\":\n\t\tseed = Glider\n\tcase \"rpentomino\":\n\t\tseed = RPentomino\n\tdefault:\n\t\tpanic(\"unknown seed\")\n\t}\n\treturn\n}", "func GenStrategyRandom(typ Type, seed int) interface{} {\n\treturn newRandomNumber(typ)\n}", "func New() Randomizer {\n\treturn &randomizer{seededRand: mrand.New(mrand.NewSource(time.Now().UTC().UnixNano()))} //nolint:gosec //like it\n}" ]
[ "0.7424726", "0.73450845", "0.70709896", "0.70261484", "0.701139", "0.69750804", "0.69515306", "0.6946966", "0.69348735", "0.6905683", "0.69012666", "0.68930334", "0.6877657", "0.6824542", "0.6807385", "0.68068874", "0.6796626", "0.6760957", "0.67509663", "0.66529834", "0.661328", "0.65454555", "0.65454555", "0.65454555", "0.65454555", "0.64798015", "0.64773643", "0.64773643", "0.64773643", "0.64773643", "0.64773643", "0.64773643", "0.64773643", "0.64729255", "0.64679796", "0.64471334", "0.64404505", "0.63465255", "0.6346505", "0.6342012", "0.6303129", "0.624794", "0.6235616", "0.62301284", "0.6225549", "0.62013197", "0.61167383", "0.60926884", "0.608983", "0.60786027", "0.6069972", "0.6065348", "0.6047299", "0.60386187", "0.60276747", "0.60225", "0.599943", "0.59551775", "0.59380674", "0.5934623", "0.5925542", "0.5920551", "0.59191424", "0.5877219", "0.5877198", "0.5874037", "0.5869135", "0.5827092", "0.5815588", "0.580495", "0.58008856", "0.5793767", "0.57877845", "0.5786967", "0.578532", "0.5731154", "0.5720892", "0.5716691", "0.5692926", "0.56895137", "0.5686353", "0.56679404", "0.56643987", "0.5656709", "0.56426084", "0.5630735", "0.5603068", "0.5580271", "0.5572534", "0.5566897", "0.55595577", "0.55571496", "0.5546005", "0.5538865", "0.5519288", "0.5515567", "0.5513173", "0.5496691", "0.54862225", "0.5484496" ]
0.6204893
45
Given a string, this tests variants of buffer conversion: string with trailing 0's, string exactly filling the slice passed to CFieldString (simulating an exactlyfull field), and first character (exactly filling the field).
func teststring(t *testing.T, s string) { buf := toint8(s) r := kstat.CFieldString(buf[:]) if r != s { t.Fatalf("full buf mismatch: %q vs %q", s, r) } r = kstat.CFieldString(buf[:len(s)]) if r != s { t.Fatalf("exact buf mismatch: %q vs %q", s, r) } r = kstat.CFieldString(buf[:len(s)+1]) if r != s { t.Fatalf("string + one null mismatch: %q vs %q", s, r) } if len(s) > 1 { r = kstat.CFieldString(buf[:1]) if r != s[:1] { t.Fatalf("first character mismatch: %q vs %q", s[:1], r) } } }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func TestCFieldString(t *testing.T) {\n\tteststring(t, \"this is a test string\")\n\tteststring(t, \"\")\n\tbuf := toint8(\"abc\\x00def\")\n\tr := kstat.CFieldString(buf[:])\n\tif r != \"abc\" {\n\t\tt.Fatalf(\"embedded null not properly handled: %q\", r)\n\t}\n}", "func cString(b []byte) string {\n\tn := 0\n\tfor n < len(b) && b[n] != 0 {\n\t\tn++\n\t}\n\treturn string(b[0:n])\n}", "func cstring(b []byte) string {\n\tvar i int\n\tfor i = 0; i < len(b) && b[i] != 0; i++ {\n\t}\n\treturn string(b[:i])\n}", "func TestStringWidthSlow(t *testing.T) {\n\tfor n := 1; n < 4; n++ {\n\t\tbz := make([]byte, n)\n\t\tfor {\n\t\t\twidth1 := widthOf(string(bz))\n\t\t\twidth2 := widthOfSlow(string(bz))\n\t\t\tif width1 == 0 {\n\t\t\t\tif isRepeatedWZJ(bz) {\n\t\t\t\t\t// these bytes encode one or more U+200D WZJ as UTF8.\n\t\t\t\t} else {\n\t\t\t\t\trequire.Fail(t, fmt.Sprintf(\"unexpected zero width string for bytes %X\", bz))\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\trequire.True(t, 0 < width1, \"got zero width for bytes %X\", bz)\n\t\t\t}\n\t\t\trequire.Equal(t, width1, width2)\n\t\t\tif !incBuffer(bz) {\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t}\n}", "func (suite *RunePartTestSuite) TestReadToZeroLengthBuffer() {\n\tpart := runePart{runeVal: 'a'}\n\tbuff := make([]byte, 0, 0)\n\tcount, err := part.Read(buff)\n\tsuite.Nil(err)\n\tsuite.Equal(0, count)\n\tsuite.Equal(\"\", string(buff))\n}", "func FromNTString(buf []byte) string {\n\tfor i := 0; i < len(buf); i++ {\n\t\tif buf[i] == 0 {\n\t\t\treturn string(buf[0:i])\n\t\t}\n\t}\n\treturn string(buf)\n}", "func TestStringZeroForNotEmptyString(t *testing.T) {\n\n\t// Arrange.\n\n\ts := \"Hello utilities\"\n\n\t// Act.\n\n\tresult := IsZero(s)\n\n\t// Assert.\n\n\tassert.False(t, result)\n}", "func fillString(t *testing.T, testname string, b *Builder, s string, n int, fus string) string {\n\tcheckRead(t, testname+\" (fill 1)\", b, s)\n\tfor ; n > 0; n-- {\n\t\tm, err := b.WriteString(fus)\n\t\tif m != len(fus) {\n\t\t\tt.Errorf(testname+\" (fill 2): m == %d, expected %d\", m, len(fus))\n\t\t}\n\t\tif err != nil {\n\t\t\tt.Errorf(testname+\" (fill 3): err should always be nil, found err == %s\", err)\n\t\t}\n\t\ts += fus\n\t\tcheckRead(t, testname+\" (fill 4)\", b, s)\n\t}\n\treturn s\n}", "func slicetostr(buf []uint8) string {\n\tend := 0\n\tfor i := range buf {\n\t\tend = i\n\t\tif buf[i] == 0 {\n\t\t\tbreak\n\t\t}\n\t}\n\treturn string(buf[:end])\n}", "func testFCString(t testing.TB) {\n\tvar line = \"9900000100000007000000010000000000100000Contact Name 55586755520 \"\n\tr := NewReader(strings.NewReader(line))\n\tr.line = line\n\terr := r.parseFileControl()\n\tif err != nil {\n\t\tt.Errorf(\"%T: %s\", err, err)\n\t\tlog.Fatal(err)\n\t}\n\trecord := r.File.Control\n\tif record.String() != line {\n\t\tt.Errorf(\"\\nStrings do not match %s\\n %s\", line, record.String())\n\t}\n}", "func fillString(retunString string, toLength int) string {\n\tfor {\n\t\tlengtString := len(retunString)\n\t\tif lengtString < toLength {\n\t\t\tretunString = retunString + \":\"\n\t\t\tcontinue\n\t\t}\n\t\tbreak\n\t}\n\treturn retunString\n}", "func fillString(retunString string, toLength int) string {\n\tfor {\n\t\tlengtString := len(retunString)\n\t\tif lengtString < toLength {\n\t\t\tretunString = retunString + \":\"\n\t\t\tcontinue\n\t\t}\n\t\tbreak\n\t}\n\treturn retunString\n}", "func fillString(retunString string, toLength int) string {\n\tfor {\n\t\tlengtString := len(retunString)\n\t\tif lengtString < toLength {\n\t\t\tretunString = retunString + \":\"\n\t\t\tcontinue\n\t\t}\n\t\tbreak\n\t}\n\treturn retunString\n}", "func TestString(t *testing.T) {\n\tvar s string\n\tt.Log(s)\n\ts = \"hello\"\n\tt.Log(len(s))\n\t// s[1] = '3' it's only read type\n\ts = \"\\xE4\\xB8\\xA5\"\n\n\tc := []rune(s)\n\tprint(len(c))\n\tt.Logf(\"zhong unicode %x\", c[0])\n\tprint(s)\n}", "func ztString(bytes []byte) string {\n\tfor i, b := range bytes {\n\t\tif b == 0 {\n\t\t\treturn string(bytes[:i])\n\t\t}\n\t}\n\treturn \"\"\n}", "func safeString(str string) string {\n\tif len(str) > 0 && str[len(str)-1] != '\\x00' {\n\t\tstr = str + \"\\x00\"\n\t} else if len(str) == 0 {\n\t\tstr = \"\\x00\"\n\t}\n\treturn str\n}", "func safeString(str string) string {\n\tif len(str) > 0 && str[len(str)-1] != '\\x00' {\n\t\tstr = str + \"\\x00\"\n\t} else if len(str) == 0 {\n\t\tstr = \"\\x00\"\n\t}\n\treturn str\n}", "func safeString(str string) string {\n\tif len(str) > 0 && str[len(str)-1] != '\\x00' {\n\t\tstr = str + \"\\x00\"\n\t} else if len(str) == 0 {\n\t\tstr = \"\\x00\"\n\t}\n\treturn str\n}", "func safeString(str string) string {\n\tif len(str) > 0 && str[len(str)-1] != '\\x00' {\n\t\tstr = str + \"\\x00\"\n\t} else if len(str) == 0 {\n\t\tstr = \"\\x00\"\n\t}\n\treturn str\n}", "func CheckStringZero(str string) bool {\n\treturn len(strings.TrimSpace(str)) == 0\n}", "func check(t *testing.T, testname string, buf *Buffer, s string) {\n\tbytes := buf.Bytes()\n\tstr := buf.String()\n\tif buf.Len() != len(bytes) {\n\t\tt.Errorf(\"%s: buf.Len() == %d, len(buf.Bytes()) == %d\", testname, buf.Len(), len(bytes))\n\t}\n\n\tif buf.Len() != len(str) {\n\t\tt.Errorf(\"%s: buf.Len() == %d, len(buf.String()) == %d\", testname, buf.Len(), len(str))\n\t}\n\n\tif buf.Len() != len(s) {\n\t\tt.Errorf(\"%s: buf.Len() == %d, len(s) == %d\", testname, buf.Len(), len(s))\n\t}\n\n\tif string(bytes) != s {\n\t\tt.Errorf(\"%s: string(buf.Bytes()) == %q, s == %q\", testname, string(bytes), s)\n\t}\n}", "func cleanBytesToString(s []byte) string {\n\treturn strings.SplitN(string(s), \"\\000\", 2)[0]\n}", "func ParseString(buf []byte) (string, int, error) {\n\tif len(buf) < 1 {\n\t\treturn \"\", 0, errors.New(\"empty byte slice\")\n\t}\n\tpos := 0\n\n\tif buf[0]>>4 == 0x8 {\n\t\t// tiny string\n\t\treturn ParseTinyString(buf)\n\t} else if buf[0] < 0xd0 || buf[0] > 0xd2 {\n\t\treturn \"\", 0, errors.New(\"slice doesn't look like valid string\")\n\t}\n\n\t// how many bytes is the encoding for the string length?\n\treadAhead := int(1 << int(buf[pos]&0xf))\n\tpos++\n\n\t// decode the amount of bytes to read to get the string length\n\tsizeBytes := buf[pos : pos+readAhead]\n\tsizeBytes = append(make([]byte, 8), sizeBytes...)\n\tpos = pos + readAhead\n\n\t// decode the actual string length\n\tsize := int(binary.BigEndian.Uint64(sizeBytes[len(sizeBytes)-8:]))\n\treturn string(buf[pos : pos+size]), pos + size, nil\n}", "func ReadString(data []byte) (string, int64) {\n\tif len(data) == 0 {\n\t\treturn \"\", 0\n\t}\n\tnullidx := bytes.IndexByte(data, 0)\n\tif nullidx == -1 {\n\t\tdata = append(data, 0)\n\t\tnullidx = len(data) - 1\n\t}\n\tdata = Pad(data[:nullidx+1])\n\treturn string(bytes.TrimRight(data, \"\\x00\")), int64(len(data))\n}", "func First(n int, s string) (out string, ok bool) {\n\tif n <= len(s) {\n\t\tout, ok = UnsafeFirst(n, s), true\n\t}\n\treturn\n}", "func TestFCString(t *testing.T) {\n\ttestFCString(t)\n}", "func StringFillToExact(out string, lineLen int) string {\n\tif len(out) < lineLen {\n\t\tout = out + strings.Repeat(\" \", lineLen - len(out))\n\t} else if len(out) > lineLen {\n\t\tout = out[:lineLen]\n\t}\n\treturn out\n}", "func (util *stringUtil) FillZero(num string, digit int) string {\n\tfor fillNum := len(num) - digit; fillNum > 0; fillNum-- {\n\t\tnum = \"0\" + num\n\t}\n\treturn num\n}", "func FillString(message string, size int) string {\n\tmissingPositions := size - len(message)\n\treturn message + strings.Repeat(PADDING_CHARACTER, missingPositions)\n}", "func UnsafeFirst(n int, str string) (output string) {\n\toutput = str[0:n]\n\treturn\n}", "func TestString(t *testing.T) {\n\ttests := []struct {\n\t\tin *journey.Stepper\n\t\texpect string\n\t}{\n\t\t{\n\t\t\tin: journey.NewStepper(),\n\t\t\texpect: \"0000\",\n\t\t},\n\t\t{\n\t\t\tin: &journey.Stepper{\n\t\t\t\tSteps: []uint32{20},\n\t\t\t\tI: 0,\n\t\t\t},\n\t\t\texpect: \"0020\",\n\t\t},\n\t\t{\n\t\t\tin: &journey.Stepper{\n\t\t\t\tSteps: []uint32{10, 100, 1000},\n\t\t\t\tI: 2,\n\t\t\t},\n\t\t\texpect: \"0010_0100_1000\",\n\t\t},\n\t}\n\n\tfor i, test := range tests {\n\t\tgot := test.in.String()\n\t\tif got != test.expect {\n\t\t\tt.Errorf(\"%d - expect String to be equal %s, but got %s\", i, test.expect, got)\n\t\t}\n\t}\n\n}", "func TestStringWidthRandom(t *testing.T) {\n\tmax := 10 * 1024 * 1024\n\tfor i := 0; i < max; i++ {\n\t\tif i%(max/80) == 0 {\n\t\t\tfmt.Print(\".\")\n\t\t}\n\t\tbz := libs.RandBytes(12)\n\t\twidth1 := widthOf(string(bz))\n\t\twidth2 := widthOfSlow(string(bz))\n\t\tif width1 == 0 {\n\t\t\tif isRepeatedWZJ(bz) {\n\t\t\t\t// these bytes encode one or more U+200D WZJ as UTF8.\n\t\t\t} else {\n\t\t\t\trequire.Fail(t, \"unexpected zero width string\")\n\t\t\t}\n\t\t} else {\n\t\t\trequire.True(t, 0 < width1, \"got zero width for bytes %X\", bz)\n\t\t}\n\t\trequire.Equal(t, width2, width1,\n\t\t\t\"want %d but got %d the slow way: %X\",\n\t\t\twidth1, width2, bz)\n\t}\n}", "func TestHeader(t *testing.T) {\n\n\thdr := Header{\"MyHdr1\", []byte(\"a string\")}\n\tif hdr.String() != \"MyHdr1=\\\"a string\\\"\" {\n\t\tt.Errorf(\"Unexpected: %s\", hdr.String())\n\t}\n\n\thdr = Header{\"MyHdr2\", []byte(\"a longer string that will be truncated right here <-- so you wont see this part.\")}\n\tif hdr.String() != \"MyHdr2=\\\"a longer string that will be truncated right here \\\"(30 more bytes)\" {\n\t\tt.Errorf(\"Unexpected: %s\", hdr.String())\n\t}\n\n\thdr = Header{\"MyHdr3\", []byte{1, 2, 3, 4}}\n\tif hdr.String() != \"MyHdr3=\\\"\\\\x01\\\\x02\\\\x03\\\\x04\\\"\" {\n\t\tt.Errorf(\"Unexpected: %s\", hdr.String())\n\t}\n\n}", "func HaveNULL(str string) bool {\n\tif str == \"\" {\n\t\treturn false\n\t}\n\n\tfor _, ch := range str {\n\t\tif ch == 0 {\n\t\t\treturn true\n\t\t}\n\t}\n\n\treturn false\n}", "func TrimBuffToString(bytes []byte) string {\n\tfor i, b := range bytes {\n\t\tif b == 0 {\n\t\t\treturn string(bytes[:i])\n\t\t}\n\t}\n\treturn string(bytes)\n}", "func testIATBHString(t testing.TB) {\n\tvar line = \"5220 FF3 US123456789 IATTRADEPAYMTCADUSD180621 1231380100000001\"\n\tr := NewReader(strings.NewReader(line))\n\tr.line = line\n\tif err := r.parseIATBatchHeader(); err != nil {\n\t\tt.Errorf(\"%T: %s\", err, err)\n\t}\n\trecord := r.IATCurrentBatch.GetHeader()\n\n\tif v := record.String(); v != line {\n\t\tt.Errorf(\"Strings do not match:\\n v=%q\\nline=%q\", v, line) // vertically aligned\n\t}\n}", "func FullWidth(str string) bool {\n\tif len(str) == 0 {\n\t\treturn true\n\t}\n\treturn rxFullWidth.MatchString(str)\n}", "func TestStringAdjustmentVariableLength(t *testing.T) {\n\tvar line = \"{8600}\"\n\tr := NewReader(strings.NewReader(line))\n\tr.line = line\n\n\terr := r.parseAdjustment()\n\texpected := r.parseError(NewTagMinLengthErr(10, len(r.line))).Error()\n\trequire.EqualError(t, err, expected)\n\n\tline = \"{8600}01CRDTUSD1234.56 NNN\"\n\tr = NewReader(strings.NewReader(line))\n\tr.line = line\n\n\terr = r.parseAdjustment()\n\trequire.ErrorContains(t, err, r.parseError(NewTagMaxLengthErr(errors.New(\"\"))).Error())\n\n\tline = \"{8600}01CRDTUSD1234.56****\"\n\tr = NewReader(strings.NewReader(line))\n\tr.line = line\n\n\terr = r.parseAdjustment()\n\trequire.ErrorContains(t, err, r.parseError(NewTagMaxLengthErr(errors.New(\"\"))).Error())\n\n\tline = \"{8600}01CRDTUSD1234.56*\"\n\tr = NewReader(strings.NewReader(line))\n\tr.line = line\n\n\terr = r.parseAdjustment()\n\trequire.Equal(t, err, nil)\n}", "func DecodeStringExactly(s string, l int) ([]byte, error) {\n\tb, err := hex.DecodeString(s)\n\tif err == nil {\n\t\tif len(b) != l {\n\t\t\terr = ErrDecodeLen\n\t\t}\n\t}\n\treturn b, err\n}", "func MustLenString(str string, min, max int) LenString {\n\ts := LenString{\n\t\tstr: str,\n\t\tmin: min,\n\t\tmax: max,\n\t}\n\terr := s.Validate()\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\treturn s\n}", "func (suite *RunePartTestSuite) TestReadTo1ByteBuffer() {\n\tpart := runePart{runeVal: 'a'}\n\tbuff := make([]byte, 1, 1)\n\tcount, err := part.Read(buff)\n\tsuite.Nil(err)\n\tsuite.Equal(1, count)\n\tsuite.Equal(\"a\", string(buff))\n}", "func First(n int, s string) (first string, ok bool) {\n\tif n > len(s) || n <= 0 {\n\t\treturn \"\", false\n\t}\n\treturn s[:n], true\n}", "func IsFullWidth(str string) bool {\n\tif IsNull(str) {\n\t\treturn true\n\t}\n\treturn rxFullWidth.MatchString(str)\n}", "func (i *DeserializerV1) ReadStringAt(p []byte, begin int) (string, int, error) {\n\tvar ans []byte\n\n\tvar idx = begin\n\n\t// find string length\n\t// b16 b8 <string-value>\n\tl := int(p[idx])<<8 + int(p[idx+1])\n\tidx++\n\n\tans = make([]byte, l)\n\n\tfor k := 0; k < l; k++ {\n\t\tidx++\n\t\tans[k] = p[idx]\n\t}\n\n\treturn string(ans), idx, nil\n}", "func IsZeroFilled(b []byte) bool {\n\thdr := (*reflect.SliceHeader)((unsafe.Pointer)(&b))\n\tdata := unsafe.Pointer(hdr.Data)\n\tlength := hdr.Len\n\tif length == 0 {\n\t\treturn true\n\t}\n\n\tif uintptr(data)&0x07 != 0 {\n\t\t// the data is not aligned, fallback to a simple way\n\t\treturn isZeroFilledSimple(b)\n\t}\n\n\tdataEnd := uintptr(data) + uintptr(length)\n\tdataWordsEnd := uintptr(dataEnd) & ^uintptr(0x07)\n\t// example:\n\t//\n\t// 012345678901234567\n\t// wwwwwwwwWWWWWWWWtt : w -- word 0; W -- word 1; t -- tail\n\t// ^\n\t// |\n\t// +-- dataWordsEnd\n\tfor ; uintptr(data) < dataWordsEnd; data = unsafe.Pointer(uintptr(data) + 8) {\n\t\tif *(*uint64)(data) != 0 {\n\t\t\treturn false\n\t\t}\n\t}\n\tfor ; uintptr(data) < dataEnd; data = unsafe.Pointer(uintptr(data) + 1) {\n\t\tif *(*uint8)(data) != 0 {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}", "func StringLength(str string, min int, max int) bool {\n\tslen := utf8.RuneCountInString(str)\n\treturn slen >= min && slen <= max\n}", "func validateString(fl validator.FieldLevel) bool {\n\tvar err error\n\n\tlimit := 255\n\tparam := strings.Split(fl.Param(), `:`)\n\tif len(param) > 0 {\n\t\tlimit, err = strconv.Atoi(param[0])\n\t\tif err != nil {\n\t\t\tlimit = 255\n\t\t}\n\t}\n\n\tif lengthOfString := utf8.RuneCountInString(fl.Field().String()); lengthOfString > limit {\n\t\treturn false\n\t}\n\n\treturn true\n}", "func checkString(alph map[rune]int, input string) bool {\n\tfor _, rune := range input {\n\t\talph[rune]--\n\t\tif alph[rune] < 0 {\n\t\t\treturn false\n\t\t}\n\t}\n\n\treturn true\n}", "func ValidPrintString(s string) bool {\n\tp := *(*unsafe.Pointer)(unsafe.Pointer(&s))\n\tn := uintptr(len(s))\n\n\tfor n >= 8 {\n\t\tif hasLess64(*(*uint64)(p), 0x20) || hasMore64(*(*uint64)(p), 0x7e) {\n\t\t\treturn false\n\t\t}\n\t\tp = unsafe.Pointer(uintptr(p) + 8)\n\t\tn -= 8\n\t}\n\n\tif n >= 4 {\n\t\tif hasLess32(*(*uint32)(p), 0x20) || hasMore32(*(*uint32)(p), 0x7e) {\n\t\t\treturn false\n\t\t}\n\t\tp = unsafe.Pointer(uintptr(p) + 4)\n\t\tn -= 4\n\t}\n\n\tvar x uint32\n\tswitch n {\n\tcase 3:\n\t\tx = 0x20000000 | uint32(*(*uint16)(p)) | uint32(*(*uint8)(unsafe.Pointer(uintptr(p) + 2)))<<16\n\tcase 2:\n\t\tx = 0x20200000 | uint32(*(*uint16)(p))\n\tcase 1:\n\t\tx = 0x20202000 | uint32(*(*uint8)(p))\n\tdefault:\n\t\treturn true\n\t}\n\treturn !(hasLess32(x, 0x20) || hasMore32(x, 0x7e))\n}", "func Valid(s string) bool {\n\tn := removeSpace(s)\n\tl := make([]byte, len(n))\n\t// fmt.Printf(\"%s --> %s, len: %d\", string(l), n, len(l))\n\todd := len(n) & 1\n\tif len(l) <= 1 {\n\t\treturn false\n\t}\n\tp := 0\n\tresult := 0\n\tfor x := 0; x < len(n); x++ {\n\t\tnumber := rune(n[x])\n\t\ti, err := strconv.Atoi(string(number))\n\t\tif err != nil {\n\t\t\treturn false\n\t\t}\n\n\t\tif err == nil {\n\t\t\tif (p+odd)%2 == 0 {\n\t\t\t\ti = i * 2\n\t\t\t\tif i > 9 {\n\t\t\t\t\ti = i - 9\n\t\t\t\t}\n\t\t\t}\n\t\t\td := strconv.Itoa(i)\n\t\t\tl[p] = d[0]\n\t\t\tp++\n\t\t}\n\t\tresult += i\n\t}\n\tl = l[:p]\n\n\treturn result%2 == 0\n\n}", "func WriteString(data []byte, str string, stype string_t, pos *int, l int) {\n switch stype {\n case NULLSTR:\n checkSize(len(data[*pos:]), len(str))\n // Write the string and then terminate with 0x00 byte.\n copy(data[*pos:], str)\n checkSize(len(data[*pos:]), len(str) + 1)\n *pos += len(str)\n data[*pos] = 0x00\n *pos++\n\n case LENENCSTR:\n // Write the encoded length.\n WriteLenEncInt(data, uint64(len(str)), pos)\n // Then write the string as a FIXEDSTR.\n WriteString(data, str, FIXEDSTR, pos, l)\n\n case FIXEDSTR:\n\n checkSize(len(data[*pos:]), l)\n // Pads the string with 0's to fill the specified length l.\n copy(data[*pos:*pos+l], str)\n *pos += l\n\n case EOFSTR:\n\n checkSize(len(data[*pos:]), len(str))\n // Copies the string into the data.\n *pos += copy(data[*pos:], str)\n }\n}", "func resolveBlankPaddedChar(s string, t *types.T) string {\n\tif t.Oid() == oid.T_bpchar {\n\t\t// Pad spaces on the right of the string to make it of length specified in\n\t\t// the type t.\n\t\treturn fmt.Sprintf(\"%-*v\", t.Width(), s)\n\t}\n\treturn s\n}", "func First(n int, str string) (output string, ok bool) {\n\tif n > len(str) {\n\t\treturn\n\t}\n\tok = true\n\toutput = str[0:n]\n\treturn\n}", "func isMsgPackString(b byte) bool {\n\treturn (0xbf&b) == b || b == 0xd9 || b == 0xda || b == 0xdb\n}", "func Test_invalidBitStrings(t *testing.T) {\n\n\tbs1 := &e2sm_rc_pre_v2.BitString{\n\t\tValue: []byte{0xab, 0xbc},\n\t\t//Value: []byte{0xab, 0xbc, 0x00}, // - correct set of bytes to satisfy byte length constraint\n\t\tLen: 22,\n\t}\n\t_, err := newBitString(bs1)\n\t//assert.NilError(t, err)\n\tassert.ErrorContains(t, err, \"bytes are required for length\")\n\n\tbs2 := &e2sm_rc_pre_v2.BitString{\n\t\tValue: []byte{0xab, 0xbc, 0xcf},\n\t\t//Value: []byte{0xab, 0xbc, 0xcc}, // - a correct set of bytes to satisfy octet-alignment constraint\n\t\tLen: 22,\n\t}\n\t_, err = newBitString(bs2)\n\t//assert.NilError(t, err)\n\tassert.ErrorContains(t, err, \"bit string is NOT octet-aligned\")\n\n\t// A valid bit string with proper length and proper octet alignment\n\t// 25 bits need 4 bytes to be encoded successfully\n\t// 4 bytes is 32 bits, 32-25 = 7 (unused bits)\n\t// BitString.Value byte array should be shifted on 7 (unused) bits to the left to be Octet-aligned and satisfy APER encoding rules\n\tbs3 := &e2sm_rc_pre_v2.BitString{\n\t\tValue: []byte{0x09, 0xAB, 0xCD, 0x80},\n\t\tLen: 25,\n\t}\n\tbsC, err := newBitString(bs3)\n\tassert.NilError(t, err)\n\n\tprotoBitString, err := decodeBitString(bsC)\n\tassert.NilError(t, err)\n\t//assert.Assert(t, protoBitString != nil)\n\tassert.Equal(t, int(protoBitString.Len), 25, \"unexpected bit string length\")\n\tassert.DeepEqual(t, protoBitString.Value, []byte{0x09, 0xab, 0xcd, 0x80})\n}", "func String(input []byte, startBitPos int, numOfBits int) (result string, resultPtr *string, err error) {\n\tif Len(input)-startBitPos < numOfBits {\n\t\treturn \"\", nil, errors.New(\"Input is less than \" + string(numOfBits) + \" bits\")\n\t}\n\n\ttmpArr, _, err := SubBits(input, startBitPos, numOfBits)\n\tresult = string(tmpArr)\n\treturn result, &result, err\n}", "func foobar10(s string) bool {\n\treturn len(s) == 0\n}", "func First(n int, s string) (first string, ok bool) {\n\tif len(s) < n {\n\t\treturn \"\", false\n\t}\n\treturn s[0:n], true\n}", "func UnsafeFirst(n int, s string) string {\n\treturn s[0:n]\n}", "func ReadString(data []byte, stype string_t, pos *int, l int) string {\n buf := bytes.NewBuffer(data[*pos:])\n switch stype {\n case NULLSTR:\n line, err := buf.ReadBytes(byte(0x00))\n \t\tif err != nil {\n \t\t log.Fatal(err)\n \t\t}\n *pos += len(line)\n return string(line)\n case LENENCSTR:\n n := ReadLenEncInt(data, pos)\n if n == 0 {\n break\n }\n buf.ReadByte()\n temp := make([]byte, n)\n n2, err := buf.Read(temp)\n if err != nil {\n log.Fatal(err)\n } else if n2 != n {\n log.Fatal(fmt.Sprintf(\"Read %d, expected %d\", n2, n))\n }\n *pos += n\n return string(temp)\n case FIXEDSTR, EOFSTR:\n temp := make([]byte, l)\n n2, err := buf.Read(temp)\n if err != nil {\n log.Fatal(err)\n } else if n2 != l {\n log.Fatal(fmt.Sprintf(\"Read %d, expected %d\", n2, l))\n }\n *pos += l\n return string(temp)\n }\n return \"\"\n}", "func VariableWidth(str string) bool {\n\tif len(str) == 0 {\n\t\treturn true\n\t}\n\treturn rxHalfWidth.MatchString(str) && rxFullWidth.MatchString(str)\n}", "func Valid(s string) bool { return Convert(s) == s && s != \"\" }", "func UnsafeFirst(n int, s string) string {\n\treturn s[:n]\n}", "func (f genHelperDecoder) DecStringZC(v []byte) string { return f.d.stringZC(v) }", "func UnsafeFirst(n int, s string) (out string) {\n\treturn s[:n]\n}", "func String(s string) bool {\n\tfor i := 0; i < len(s); i++ {\n\t\tif b := s[i]; b < '0' || b > '9' {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}", "func isZero(buffer []byte) bool {\n\tfor i := range buffer {\n\t\tif buffer[i] != 0 {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}", "func (rr *Reader) ReadSimpleString(dst []byte) ([]byte, error) {\n\tif err := rr.expect(TypeSimpleString); err != nil {\n\t\treturn nil, err\n\t}\n\treturn rr.readLine(dst)\n}", "func TestFilterString(t *testing.T) {\n\tt.Parallel()\n\tvar tests = []struct {\n\t\ts []string\n\t\texpected []string\n\t}{\n\t\t{[]string{\"foo\", \"bar\", \"baz\"}, []string{\"bar\", \"baz\"}},\n\t\t{[]string{\"foo\", \"\\u0062\\u0061\\u0072\", \"baz\"}, []string{\"bar\", \"baz\"}},\n\t\t{[]string{\"a\", \"ab\", \"abc\"}, []string{}},\n\t\t{[]string{}, []string{}},\n\t}\n\tfor _, test := range tests {\n\t\tactual := primitives.FilterString(test.s, func(s string) bool {\n\t\t\treturn strings.HasPrefix(s, \"ba\")\n\t\t})\n\t\tassert.True(t, primitives.EqSlices(&actual, &test.expected), \"Expected FilterString(%q, fn) to be %q, got %v\", test.s, test.expected, actual)\n\t}\n}", "func (p *Stream) ReadString() (string, *base.Error) {\n\t// empty string\n\tv := p.readFrame[p.readIndex]\n\tif v == 128 {\n\t\tif p.CanRead() {\n\t\t\tp.gotoNextReadByteUnsafe()\n\t\t\treturn \"\", nil\n\t\t}\n\t} else if v > 128 && v < 191 {\n\t\tstrLen := int(v - 128)\n\t\tif p.isSafetyReadNBytesInCurrentFrame(strLen + 2) {\n\t\t\tif p.readFrame[p.readIndex+strLen+1] == 0 {\n\t\t\t\tb := p.readFrame[p.readIndex+1 : p.readIndex+strLen+1]\n\t\t\t\tif isUTF8Bytes(b) {\n\t\t\t\t\tp.readIndex += strLen + 2\n\t\t\t\t\treturn string(b), nil\n\t\t\t\t}\n\t\t\t}\n\t\t} else if p.hasNBytesToRead(strLen + 2) {\n\t\t\treadStart := p.GetReadPos()\n\t\t\tb := make([]byte, strLen)\n\t\t\tcopyBytes := copy(b, p.readFrame[p.readIndex+1:])\n\t\t\tp.readIndex += copyBytes + 1\n\t\t\tif p.readIndex == streamBlockSize {\n\t\t\t\tp.readSeg++\n\t\t\t\tp.readFrame = *(p.frames[p.readSeg])\n\t\t\t\tp.readIndex = copy(b[copyBytes:], p.readFrame)\n\t\t\t}\n\t\t\tif p.readFrame[p.readIndex] == 0 && isUTF8Bytes(b) {\n\t\t\t\tp.gotoNextReadByteUnsafe()\n\t\t\t\treturn string(b), nil\n\t\t\t}\n\t\t\tp.SetReadPos(readStart)\n\t\t}\n\t} else if v == 191 {\n\t\treadStart := p.GetReadPos()\n\t\tstrLen := -1\n\n\t\tif p.isSafetyReadNBytesInCurrentFrame(5) {\n\t\t\tb := p.readFrame[p.readIndex:]\n\t\t\tstrLen = int(uint32(b[1])|\n\t\t\t\t(uint32(b[2])<<8)|\n\t\t\t\t(uint32(b[3])<<16)|\n\t\t\t\t(uint32(b[4])<<24)) - 6\n\t\t\tp.readIndex += 5\n\t\t} else if p.hasNBytesToRead(5) {\n\t\t\tb := p.readNBytesCrossFrameUnsafe(5)\n\t\t\tstrLen = int(uint32(b[1])|\n\t\t\t\t(uint32(b[2])<<8)|\n\t\t\t\t(uint32(b[3])<<16)|\n\t\t\t\t(uint32(b[4])<<24)) - 6\n\t\t}\n\n\t\tif strLen > 62 {\n\t\t\tif p.isSafetyReadNBytesInCurrentFrame(strLen + 1) {\n\t\t\t\tif p.readFrame[p.readIndex+strLen] == 0 {\n\t\t\t\t\tb := p.readFrame[p.readIndex : p.readIndex+strLen]\n\t\t\t\t\tif isUTF8Bytes(b) {\n\t\t\t\t\t\tp.readIndex += strLen + 1\n\t\t\t\t\t\treturn string(b), nil\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t} else if p.hasNBytesToRead(strLen + 1) {\n\t\t\t\tb := make([]byte, strLen)\n\t\t\t\treads := 0\n\t\t\t\tfor reads < strLen {\n\t\t\t\t\treadLen := copy(b[reads:], p.readFrame[p.readIndex:])\n\t\t\t\t\treads += readLen\n\t\t\t\t\tp.readIndex += readLen\n\t\t\t\t\tif p.readIndex == streamBlockSize {\n\t\t\t\t\t\tp.gotoNextReadFrameUnsafe()\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\tif p.readFrame[p.readIndex] == 0 && isUTF8Bytes(b) {\n\t\t\t\t\tp.gotoNextReadByteUnsafe()\n\t\t\t\t\treturn string(b), nil\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\tp.SetReadPos(readStart)\n\t}\n\n\treturn \"\", base.ErrStream\n}", "func DecodeAndCheckStringToBytes(s string) ([]byte, HexLineType, uint32, error) {\n\tlenAs16 := uint16(len(s))\n\tconverted := ConvertBuffer(lenAs16, []byte(s))\n\tif converted == nil {\n\t\treturn nil, HexLineType(0), 0, errors.New(\"convert buffer failed\")\n\t}\n\tvar addr uint32\n\tlt, ok := ExtractLineType(converted)\n\tif !ok {\n\t\treturn nil, DataLine, 0, NewEncodeDecodeError(fmt.Sprintf(\"unable to extract line type from: %s\", s))\n\t}\n\tif lt == DataLine {\n\t\taddr = (uint32(converted[1]) * 256) + (uint32(converted[2]))\n\t}\n\tif ok := ValidBufferLength(lenAs16, converted); ok == false {\n\t\treturn nil, lt, addr, NewEncodeDecodeError(fmt.Sprintf(\"expected buffer length to be ok, but wasn't: %s\", s))\n\t}\n\tif ok := CheckChecksum(lenAs16, converted); ok == false {\n\t\treturn nil, lt, addr, NewEncodeDecodeError(fmt.Sprintf(\"expected checksum to be ok, but wasn't:%s\", s))\n\t}\n\treturn converted, lt, addr, nil\n}", "func removeZeroPadding(str string) string {\n\tvar buffer bytes.Buffer\n\tfoundNonZeroChar := false\n\tfor _, ch := range str {\n\t\tif string(ch) == \"0\" && !foundNonZeroChar {\n\t\t\tcontinue\n\t\t}\n\t\tfoundNonZeroChar = true\n\t\tbuffer.WriteString(string(ch))\n\t}\n\treturn buffer.String()\n}", "func readString(data []byte) string {\n\t// Find the index of first 0x00.\n\ti := bytes.IndexByte(data, byte(0x00))\n\tif i == -1 {\n\t\t// If 0x00 is not found, return all the slice.\n\t\ti = len(data)\n\t}\n\treturn string(data[:i])\n}", "func StringLength(str string, params ...string) bool {\n\tif len(params) == 2 {\n\t\tstrLength := utf8.RuneCountInString(str)\n\t\tmin, _ := ToInt(params[0])\n\t\tmax, _ := ToInt(params[1])\n\t\treturn strLength >= int(min) && strLength <= int(max)\n\t}\n\n\treturn false\n}", "func consume(s *string, ch byte) (ok bool) {\n\tif *s == \"\" || (*s)[0] != ch {\n\t\treturn false\n\t}\n\t*s = (*s)[1:]\n\treturn true\n}", "func StringToBytes(str string) []byte {\n\t// Empty strings may not allocate a backing array, so we have to check first\n\tif len(str) == 0 {\n\t\t// It makes sense to return a non-nil empty byte slice since we're passing in a non-nil (although empty) string\n\t\treturn []byte{}\n\t}\n\treturn (*[0x7fff0000]byte)(unsafe.Pointer(\n\t\t(*reflect.StringHeader)(unsafe.Pointer(&str)).Data),\n\t)[:len(str):len(str)]\n}", "func nonemptyString(s string) *string {\n\tif s == \"\" {\n\t\treturn nil\n\t}\n\treturn &s\n}", "func packString(Data string, pad *scratchpad) {\n\t// Create the initial allocation and define the header.\n\ta := make([]byte, 5)\n\ta[0] = 'm'\n\n\t// Write the length.\n\tntohl32(uint32(len(Data)), a, 1)\n\n\t// Append the header.\n\tpad.endAppend(a...)\n\n\t// Append the data.\n\tpad.endAppend([]byte(Data)...)\n}", "func packPUcharString(p *C.uchar) (raw string) {\n\tif p != nil && *p != 0 {\n\t\th := (*stringHeader)(unsafe.Pointer(&raw))\n\t\th.Data = uintptr(unsafe.Pointer(p))\n\t\tfor *p != 0 {\n\t\t\tp = (*C.uchar)(unsafe.Pointer(uintptr(unsafe.Pointer(p)) + 1)) // p++\n\t\t}\n\t\th.Len = int(uintptr(unsafe.Pointer(p)) - h.Data)\n\t}\n\treturn\n}", "func (q *QQwry) readString(offset uint32) []byte {\n\tq.setOffset(int64(offset))\n\tdata := make([]byte, 0, 30)\n\tbuf := make([]byte, 1)\n\tfor {\n\t\tbuf = q.readData(1)\n\t\tif buf[0] == 0 {\n\t\t\tbreak\n\t\t}\n\t\tdata = append(data, buf[0])\n\t}\n\treturn data\n}", "func probablyV0Base58Addr(s string) bool {\n\t// Ensure the length is one of the possible values for supported version 0\n\t// addresses.\n\tif len(s) != 35 && len(s) != 53 {\n\t\treturn false\n\t}\n\n\t// The modified base58 alphabet used by Decred for version 0 addresses is:\n\t// 123456789ABCDEFGHJKLMNPQRSTUVWXYZabcdefghijkmnopqrstuvwxyz\n\tfor _, r := range s {\n\t\tif r < '1' || r > 'z' ||\n\t\t\tr == 'I' || r == 'O' || r == 'l' ||\n\t\t\t(r > '9' && r < 'A') || (r > 'Z' && r < 'a') {\n\n\t\t\treturn false\n\t\t}\n\t}\n\n\treturn true\n}", "func (suite *IntPartTestSuite) TestReadToZeroLengthBuffer() {\n\tpart, _ := newIntPartFromString(\"9\")\n\tbuff := make([]byte, 0, 0)\n\tcount, _ := part.Read(buff)\n\tsuite.Equal(0, count)\n}", "func zeroFill(prefix string, width int, suffix string) string {\n\treturn prefix + strings.Repeat(\"0\", width-len(suffix)) + suffix\n}", "func zeroFill(prefix string, width int, suffix string) string {\n\treturn prefix + strings.Repeat(\"0\", width-len(suffix)) + suffix\n}", "func zeroFill(prefix string, width int, suffix string) string {\n\treturn prefix + strings.Repeat(\"0\", width-len(suffix)) + suffix\n}", "func charsToString(ca [65]int8) string {\n\ts := make([]byte, len(ca))\n\tvar lens int\n\tfor ; lens < len(ca); lens++ {\n\t\tif ca[lens] == 0 {\n\t\t\tbreak\n\t\t}\n\t\ts[lens] = uint8(ca[lens])\n\t}\n\treturn string(s[0:lens])\n}", "func First(n int, digits string) (first string, ok bool) {\n\tif n > len(digits) {\n\t\treturn \"\", false\n\t}\n\treturn UnsafeFirst(n, digits), true\n}", "func IsZeroFilled(b []byte) bool {\n\treturn isZeroFilledSimple(b)\n}", "func (e *Encoder) PrependUtf8(str string) (uint64, error) {\n\tif e == nil {\n\t\treturn 0, errors.New(errors.KsiInvalidArgumentError)\n\t}\n\n\tvar (\n\t\tstrLen = uint64(len(str))\n\t\tbufLen = uint64(len(e.buffer))\n\t)\n\t// Verify buffer capacity.\n\tif (e.position+uint64(1) < (strLen + 1)) || bufLen <= e.position {\n\t\treturn 0, errors.New(errors.KsiBufferOverflow).AppendMessage(\"Buffer to serialize string is too small.\")\n\t}\n\n\te.buffer[e.position] = 0\n\tc := uint64(copy(e.buffer[e.position-strLen:], str))\n\te.position -= c + 1\n\n\treturn c + 1, nil\n}", "func cStr(str string) (cstr *byte, free func()) {\n\tbs := []byte(str)\n\tif len(bs) == 0 || bs[len(bs)-1] != 0 {\n\t\tbs = append(bs, 0)\n\t}\n\treturn &bs[0], func() {\n\t\truntime.KeepAlive(bs)\n\t\tbs = nil\n\t}\n}", "func DecodeAndCheckStringToBytes(s string) ([]byte, HexLineType, uint32, error) {\n\tlenAs16 := uint16(len(s))\n\tconverted := ConvertBuffer(lenAs16, []byte(s))\n\tvar addr uint32\n\tlt, ok := ExtractLineType(converted)\n\tif !ok {\n\t\treturn nil, DataLine, 0, NewEncodeDecodeError(fmt.Sprintf(\"unable to extract line type from: %s\", s))\n\t}\n\tif lt == DataLine {\n\t\taddr = (uint32(converted[1]) * 256) + (uint32(converted[2]))\n\t}\n\tif ok := ValidBufferLength(lenAs16, converted); ok == false {\n\t\treturn nil, lt, addr, NewEncodeDecodeError(fmt.Sprintf(\"expected buffer length to be ok, but wasn't: %s\", s))\n\t}\n\tif ok := CheckChecksum(lenAs16, converted); ok == false {\n\t\treturn nil, lt, addr, NewEncodeDecodeError(fmt.Sprintf(\"expected checksum to be ok, but wasn't:%s\", s))\n\t}\n\treturn converted, lt, addr, nil\n}", "func First(n int, s string) (string, bool) {\n\tif n > len(s) {\n\t\treturn \"\", false\n\t}\n\n\treturn s[:n], true\n}", "func has0xPrefix(str string) bool {\n\treturn len(str) >= 2 && str[0] == '0' && (str[1] == 'x' || str[1] == 'X')\n}", "func TestZeroLength(t *testing.T) {\n\tkey1, err := NewFixedLengthKeyFromReader(os.Stdin, 0)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tdefer key1.Wipe()\n\tif key1.data != nil {\n\t\tt.Error(\"Fixed length key from reader contained data\")\n\t}\n\n\tkey2, err := NewKeyFromReader(bytes.NewReader(nil))\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tdefer key2.Wipe()\n\tif key2.data != nil {\n\t\tt.Error(\"Key from empty reader contained data\")\n\t}\n}", "func pad(b *bytes.Buffer, str string) {\n\tif b.Len() == 0 {\n\t\treturn\n\t}\n\tb.WriteString(str)\n}", "func UnsafeFirst(n int, digits string) string {\n\treturn digits[0:n]\n}", "func appendStringComplex(dst []byte, s string, i int) []byte {\n\tstart := 0\n\tfor i < len(s) {\n\t\tb := s[i]\n\t\tif b >= utf8.RuneSelf {\n\t\t\tr, size := utf8.DecodeRuneInString(s[i:])\n\t\t\tif r == utf8.RuneError && size == 1 {\n\t\t\t\t// In case of error, first append previous simple characters to\n\t\t\t\t// the byte slice if any and append a remplacement character code\n\t\t\t\t// in place of the invalid sequence.\n\t\t\t\tif start < i {\n\t\t\t\t\tdst = append(dst, s[start:i]...)\n\t\t\t\t}\n\t\t\t\tdst = append(dst, `\\ufffd`...)\n\t\t\t\ti += size\n\t\t\t\tstart = i\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\ti += size\n\t\t\tcontinue\n\t\t}\n\t\tif b >= 0x20 && b <= 0x7e && b != '\\\\' && b != '\"' {\n\t\t\ti++\n\t\t\tcontinue\n\t\t}\n\t\t// We encountered a character that needs to be encoded.\n\t\t// Let's append the previous simple characters to the byte slice\n\t\t// and switch our operation to read and encode the remainder\n\t\t// characters byte-by-byte.\n\t\tif start < i {\n\t\t\tdst = append(dst, s[start:i]...)\n\t\t}\n\t\tswitch b {\n\t\tcase '\"', '\\\\':\n\t\t\tdst = append(dst, '\\\\', b)\n\t\tcase '\\b':\n\t\t\tdst = append(dst, '\\\\', 'b')\n\t\tcase '\\f':\n\t\t\tdst = append(dst, '\\\\', 'f')\n\t\tcase '\\n':\n\t\t\tdst = append(dst, '\\\\', 'n')\n\t\tcase '\\r':\n\t\t\tdst = append(dst, '\\\\', 'r')\n\t\tcase '\\t':\n\t\t\tdst = append(dst, '\\\\', 't')\n\t\tdefault:\n\t\t\tdst = append(dst, '\\\\', 'u', '0', '0', hex[b>>4], hex[b&0xF])\n\t\t}\n\t\ti++\n\t\tstart = i\n\t}\n\tif start < len(s) {\n\t\tdst = append(dst, s[start:]...)\n\t}\n\treturn dst\n}", "func UnsafeFirst(n int, s string) string {\n\treturn s[:n] // fingers in ears, eyes closed, \"la la la la la ...\"\n}", "func TestStringSenderToReceiverVariableLength(t *testing.T) {\n\tvar line = \"{7072}\"\n\tr := NewReader(strings.NewReader(line))\n\tr.line = line\n\n\terr := r.parseSenderToReceiver()\n\trequire.Nil(t, err)\n\n\tline = \"{7072} NNN\"\n\tr = NewReader(strings.NewReader(line))\n\tr.line = line\n\n\terr = r.parseSenderToReceiver()\n\trequire.ErrorContains(t, err, r.parseError(NewTagMaxLengthErr(errors.New(\"\"))).Error())\n\n\tline = \"{7072}**************\"\n\tr = NewReader(strings.NewReader(line))\n\tr.line = line\n\n\terr = r.parseSenderToReceiver()\n\trequire.ErrorContains(t, err, r.parseError(NewTagMaxLengthErr(errors.New(\"\"))).Error())\n\n\tline = \"{7072}*\"\n\tr = NewReader(strings.NewReader(line))\n\tr.line = line\n\n\terr = r.parseSenderToReceiver()\n\trequire.Equal(t, err, nil)\n}", "func ParseCString(byteSlice []byte) string {\n\tstrIndex := strings.Index(string(byteSlice), \"\\x00\")\n\tstringValue := string(byteSlice[:strIndex])\n\treturn stringValue\n}" ]
[ "0.71062356", "0.61365825", "0.5933366", "0.5675437", "0.5614959", "0.55958104", "0.5578647", "0.55673254", "0.54978114", "0.5477876", "0.5457079", "0.5457079", "0.5457079", "0.5429246", "0.53562295", "0.5353006", "0.5353006", "0.5353006", "0.5353006", "0.53007644", "0.52962834", "0.5260698", "0.52143425", "0.51773053", "0.5176751", "0.51728725", "0.5160319", "0.51590073", "0.51399016", "0.50388", "0.50368035", "0.5024129", "0.50232095", "0.5016048", "0.50128186", "0.5007546", "0.5007232", "0.4999451", "0.49973577", "0.49944547", "0.49868926", "0.49804717", "0.4976304", "0.49630478", "0.49623364", "0.49576578", "0.4948827", "0.49475986", "0.49434328", "0.49427435", "0.49306062", "0.49236864", "0.49223492", "0.49155205", "0.49131584", "0.49124235", "0.49077758", "0.49058923", "0.49008515", "0.48984423", "0.4888983", "0.48888472", "0.4888095", "0.48794717", "0.48726502", "0.48703933", "0.48690245", "0.48613694", "0.48530003", "0.48313606", "0.48253077", "0.48171675", "0.48104262", "0.48088014", "0.48040587", "0.47845885", "0.4783151", "0.47824383", "0.47820944", "0.47808155", "0.47791943", "0.4778878", "0.4774444", "0.4774444", "0.4774444", "0.47673452", "0.47668007", "0.47651652", "0.47632167", "0.47577894", "0.4751501", "0.4750586", "0.47468156", "0.47412503", "0.4737612", "0.47250196", "0.47234675", "0.4715803", "0.47145128", "0.47068414" ]
0.7072567
1
This function is sufficiently potentially tricky that I want to test it directly, including with some torture tests.
func TestCFieldString(t *testing.T) { teststring(t, "this is a test string") teststring(t, "") buf := toint8("abc\x00def") r := kstat.CFieldString(buf[:]) if r != "abc" { t.Fatalf("embedded null not properly handled: %q", r) } }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func TestGetNone4A(t *testing.T) {\n}", "func TestEmptyPrewrite4A(t *testing.T) {\n}", "func TestGetEmpty4A(t *testing.T) {\n}", "func TestSinglePrewrite4A(t *testing.T) {\n}", "func almostOkayFunction() {\n\treturn nil\n}", "func TestPrewriteMultiple4A(t *testing.T) {\n}", "func TestGetValueTs4A(t *testing.T) {\n}", "func TestGetValue4A(t *testing.T) {\n}", "func TestPrewriteWritten4A(t *testing.T) {\n}", "func teststring(t *testing.T, s string) {\n\tbuf := toint8(s)\n\tr := kstat.CFieldString(buf[:])\n\tif r != s {\n\t\tt.Fatalf(\"full buf mismatch: %q vs %q\", s, r)\n\t}\n\tr = kstat.CFieldString(buf[:len(s)])\n\tif r != s {\n\t\tt.Fatalf(\"exact buf mismatch: %q vs %q\", s, r)\n\t}\n\tr = kstat.CFieldString(buf[:len(s)+1])\n\tif r != s {\n\t\tt.Fatalf(\"string + one null mismatch: %q vs %q\", s, r)\n\t}\n\tif len(s) > 1 {\n\t\tr = kstat.CFieldString(buf[:1])\n\t\tif r != s[:1] {\n\t\t\tt.Fatalf(\"first character mismatch: %q vs %q\", s[:1], r)\n\t\t}\n\t}\n}", "func TestPrewriteWrittenNoConflict4A(t *testing.T) {\n}", "func TestCommitMissingPrewrite4a(t *testing.T) {\n}", "func check(t *testing.T, testname string, buf *Buffer, s string) {\n\tbytes := buf.Bytes()\n\tstr := buf.String()\n\tif buf.Len() != len(bytes) {\n\t\tt.Errorf(\"%s: buf.Len() == %d, len(buf.Bytes()) == %d\", testname, buf.Len(), len(bytes))\n\t}\n\n\tif buf.Len() != len(str) {\n\t\tt.Errorf(\"%s: buf.Len() == %d, len(buf.String()) == %d\", testname, buf.Len(), len(str))\n\t}\n\n\tif buf.Len() != len(s) {\n\t\tt.Errorf(\"%s: buf.Len() == %d, len(s) == %d\", testname, buf.Len(), len(s))\n\t}\n\n\tif string(bytes) != s {\n\t\tt.Errorf(\"%s: string(buf.Bytes()) == %q, s == %q\", testname, string(bytes), s)\n\t}\n}", "func TestHardCoded(t *testing.T) {\n\tc1, err := SigCompress(sig1big)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tt.Logf(\"big1:\\n%x\\ncom1:\\n%x\\n\", sig1big, c1)\n\n\tc2, err := SigCompress(sig2big)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tt.Logf(\"big2:\\n%x\\ncom2:\\n%x\\n\", sig2big, c2)\n\n\tc3, err := SigCompress(sig3big)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tt.Logf(\"big3:\\n%x\\ncom3:\\n%x\\n\", sig3big, c3)\n\n\tr1 := SigDecompress(c1)\n\tt.Logf(\"dec1:\\n%x\\n\", r1)\n\n\tr2 := SigDecompress(c2)\n\tt.Logf(\"dec1:\\n%x\\n\", r2)\n\n\tr3 := SigDecompress(c3)\n\tt.Logf(\"dec1:\\n%x\\n\", r3)\n\n}", "func TestPrewriteOverwrite4A(t *testing.T) {\n}", "func testWrapUnwrapEqual(wrappingKey *Key, secretKey *Key) error {\n\tdata, err := Wrap(wrappingKey, secretKey)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tsecret, err := Unwrap(wrappingKey, data)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer secret.Wipe()\n\n\tif !bytes.Equal(secretKey.data, secret.data) {\n\t\treturn fmt.Errorf(\"Got %x after wrap/unwrap with w=%x and s=%x\",\n\t\t\tsecret.data, wrappingKey.data, secretKey.data)\n\t}\n\treturn nil\n}", "func ExampleMustSqueezeTrytes() {}", "func TestPrewriteLocked4A(t *testing.T) {\n}", "func TestCommitConflictRepeat4A(t *testing.T) {\n}", "func testLenHash(b []byte) uint {\n\treturn uint(len(b))\n}", "func testFullCycleFilesystem(t *testing.T) {\n\tt.Log(\"TODO\")\n}", "func TestCommitConflictRace4A(t *testing.T) {\n}", "func Test_AreEqualSlices_one_shorter(t *testing.T) {\n // create two equal slices\n a := []byte{ 0xDE, 0xAD, 0xBE, 0xEF }\n b := []byte{ 0xDE, 0xAD, 0xBE }\n //\tmake test -> log failure but continue testing\n if AreEqualSlices(a,b) { t.Error(\"different length slices determined equal\") }\n}", "func TestUidForUsername(t *testing.T) {\n\tuid, err := uidForUsername(\"root\")\n\tif err != nil {\n\t\tt.Errorf(\"uidForUsername(\\\"root\\\") returned error: %v\", err)\n\t\treturn\n\t}\n\tif uid != 0 {\n\t\tt.Errorf(\"uidForUsername(\\\"root\\\") returned %d, should be 0\", uid)\n\t}\n\n\t_, err = uidForUsername(\"asdfASDFxxx999\")\n\tif err == nil {\n\t\tt.Errorf(\"uidForUsername(\\\"asdfASDFxxx\\\") did not return error\")\n\t\treturn\n\t}\n}", "func TestIsValidBucketName(t *testing.T) {\n\ttestCases := []struct {\n\t\tbucketName string\n\t\tshouldPass bool\n\t}{\n\t\t// cases which should pass the test.\n\t\t// passing in valid bucket names.\n\t\t{\"lol\", true},\n\t\t{\"1-this-is-valid\", true},\n\t\t{\"1-this-too-is-valid-1\", true},\n\t\t{\"this.works.too.1\", true},\n\t\t{\"1234567\", true},\n\t\t{\"123\", true},\n\t\t{\"s3-eu-west-1.amazonaws.com\", true},\n\t\t{\"ideas-are-more-powerful-than-guns\", true},\n\t\t{\"testbucket\", true},\n\t\t{\"1bucket\", true},\n\t\t{\"bucket1\", true},\n\t\t// cases for which test should fail.\n\t\t// passing invalid bucket names.\n\t\t{\"------\", false},\n\t\t{\"$this-is-not-valid-too\", false},\n\t\t{\"contains-$-dollar\", false},\n\t\t{\"contains-^-carrot\", false},\n\t\t{\"contains-$-dollar\", false},\n\t\t{\"contains-$-dollar\", false},\n\t\t{\"......\", false},\n\t\t{\"\", false},\n\t\t{\"a\", false},\n\t\t{\"ab\", false},\n\t\t{\".starts-with-a-dot\", false},\n\t\t{\"ends-with-a-dot.\", false},\n\t\t{\"ends-with-a-dash-\", false},\n\t\t{\"-starts-with-a-dash\", false},\n\t\t{\"THIS-BEINGS-WITH-UPPERCASe\", false},\n\t\t{\"tHIS-ENDS-WITH-UPPERCASE\", false},\n\t\t{\"ThisBeginsAndEndsWithUpperCase\", false},\n\t\t{\"una ñina\", false},\n\t\t{\"lalalallalallalalalallalallalala-theString-size-is-greater-than-64\", false},\n\t}\n\n\tfor i, testCase := range testCases {\n\t\tisValidBucketName := IsValidBucketName(testCase.bucketName)\n\t\tif testCase.shouldPass && !isValidBucketName {\n\t\t\tt.Errorf(\"Test case %d: Expected \\\"%s\\\" to be a valid bucket name\", i+1, testCase.bucketName)\n\t\t}\n\t\tif !testCase.shouldPass && isValidBucketName {\n\t\t\tt.Errorf(\"Test case %d: Expected bucket name \\\"%s\\\" to be invalid\", i+1, testCase.bucketName)\n\t\t}\n\t}\n}", "func TestPointerTypes(t *testing.T) {\n\n}", "func TestSpec_GetBaseType(t *testing.T) {\n\tcode := `\npackage test\nfunc main() {\n var a int\n\tb := &a\n\t_ = b\n}`\n\ts := NewSpec(code)\n\ts.SearchKind = SearchAll\n\t//fmt.Println(s.GetBaseType(\"b\"))\n\tif s.GetBaseType(\"a\") == nil && s.GetBaseType(\"b\").String() == \"int\" {\n\t} else {\n\t\tt.Error(`test failed`)\n\t}\n}", "func TestPtrMagic(t *testing.T) {\n\ttests := []struct {\n\t\tname string\n\t\tdomain string\n\t\toutput string\n\t\tfail bool\n\t}{\n\t\t// Magic IPv4:\n\t\t{\"1.2.3.4\", \"3.2.1.in-addr.arpa\", \"4\", false},\n\t\t{\"1.2.3.4\", \"2.1.in-addr.arpa\", \"4.3\", false},\n\t\t{\"1.2.3.4\", \"1.in-addr.arpa\", \"4.3.2\", false},\n\n\t\t// No magic IPv4:\n\t\t{\"1\", \"2.3.4.in-addr.arpa\", \"1\", false},\n\t\t{\"1.2\", \"3.4.in-addr.arpa\", \"1.2\", false},\n\t\t{\"1.2.3\", \"4.in-addr.arpa\", \"1.2.3\", false},\n\t\t{\"1.2.3.4\", \"in-addr.arpa\", \"1.2.3.4\", false}, // Not supported, but it works.\n\n\t\t// Magic IPv6:\n\t\t{\"1\", \"0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.8.b.d.0.1.0.0.2.ip6.arpa\", \"1\", false},\n\t\t{\"1.0\", \"0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.8.b.d.0.1.0.0.2.ip6.arpa\", \"1.0\", false},\n\t\t{\"1.0.0\", \"0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.8.b.d.0.1.0.0.2.ip6.arpa\", \"1.0.0\", false},\n\t\t{\"1.0.0.0\", \"0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.8.b.d.0.1.0.0.2.ip6.arpa\", \"1.0.0.0\", false},\n\t\t{\"1.0.0.0.0\", \"0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.8.b.d.0.1.0.0.2.ip6.arpa\", \"1.0.0.0.0\", false},\n\t\t{\"1.0.0.0.0.0\", \"0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.8.b.d.0.1.0.0.2.ip6.arpa\", \"1.0.0.0.0.0\", false},\n\t\t{\"1.0.0.0.0.0.0\", \"0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.8.b.d.0.1.0.0.2.ip6.arpa\", \"1.0.0.0.0.0.0\", false},\n\t\t{\"1.0.0.0.0.0.0.0\", \"0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.8.b.d.0.1.0.0.2.ip6.arpa\", \"1.0.0.0.0.0.0.0\", false},\n\t\t{\"1.0.0.0.0.0.0.0.0\", \"0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.8.b.d.0.1.0.0.2.ip6.arpa\", \"1.0.0.0.0.0.0.0.0\", false},\n\t\t{\"1.0.0.0.0.0.0.0.0.0\", \"0.0.0.0.0.0.0.0.0.0.0.0.0.0.8.b.d.0.1.0.0.2.ip6.arpa\", \"1.0.0.0.0.0.0.0.0.0\", false},\n\t\t{\"1.0.0.0.0.0.0.0.0.0.0\", \"0.0.0.0.0.0.0.0.0.0.0.0.0.8.b.d.0.1.0.0.2.ip6.arpa\", \"1.0.0.0.0.0.0.0.0.0.0\", false},\n\t\t{\"1.0.0.0.0.0.0.0.0.0.0.0\", \"0.0.0.0.0.0.0.0.0.0.0.0.8.b.d.0.1.0.0.2.ip6.arpa\", \"1.0.0.0.0.0.0.0.0.0.0.0\", false},\n\t\t{\"1.0.0.0.0.0.0.0.0.0.0.0.0\", \"0.0.0.0.0.0.0.0.0.0.0.8.b.d.0.1.0.0.2.ip6.arpa\", \"1.0.0.0.0.0.0.0.0.0.0.0.0\", false},\n\t\t{\"1.0.0.0.0.0.0.0.0.0.0.0.0.0\", \"0.0.0.0.0.0.0.0.0.0.8.b.d.0.1.0.0.2.ip6.arpa\", \"1.0.0.0.0.0.0.0.0.0.0.0.0.0\", false},\n\n\t\t// RFC2317 (Classless)\n\t\t// 172.20.18.160/27 is .160 - .191:\n\t\t{\"172.20.18.159\", \"160/27.18.20.172.in-addr.arpa\", \"\", true},\n\t\t{\"172.20.18.160\", \"160/27.18.20.172.in-addr.arpa\", \"160\", false},\n\t\t{\"172.20.18.191\", \"160/27.18.20.172.in-addr.arpa\", \"191\", false},\n\t\t{\"172.20.18.192\", \"160/27.18.20.172.in-addr.arpa\", \"\", true},\n\n\t\t// If it doesn't end in .arpa, the magic is disabled:\n\t\t{\"1.2.3.4\", \"example.com\", \"1.2.3.4\", false},\n\t\t{\"1\", \"example.com\", \"1\", false},\n\t\t{\"1.0.0.0\", \"example.com\", \"1.0.0.0\", false},\n\t\t{\"1.0.0.0.0.0.0.0\", \"example.com\", \"1.0.0.0.0.0.0.0\", false},\n\n\t\t// User manually reversed addresses:\n\t\t{\"1.1.1.1.in-addr.arpa.\", \"1.1.in-addr.arpa\", \"1.1\", false},\n\t\t{\"1.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.8.b.d.0.1.0.0.2.ip6.arpa.\",\n\t\t\t\"0.2.ip6.arpa\", \"1.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.8.b.d.0.1.0\", false},\n\n\t\t// Error cases:\n\t\t{\"1.1.1.1.in-addr.arpa.\", \"2.2.in-addr.arpa\", \"\", true},\n\t\t{\"1.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.8.b.d.0.1.0.0.2.ip6.arpa.\", \"9.9.ip6.arpa\", \"\", true},\n\t\t{\"3.3.3.3\", \"4.4.in-addr.arpa\", \"\", true},\n\t\t{\"2001:db8::1\", \"9.9.ip6.arpa\", \"\", true},\n\n\t\t// These should be errors but we don't check for them at this time:\n\t\t// {\"blurg\", \"3.4.in-addr.arpa\", \"blurg\", true},\n\t\t// {\"1\", \"3.4.in-addr.arpa\", \"1\", true},\n\t}\n\tfor _, tst := range tests {\n\t\tt.Run(fmt.Sprintf(\"%s %s\", tst.name, tst.domain), func(t *testing.T) {\n\t\t\to, errs := PtrNameMagic(tst.name, tst.domain)\n\t\t\tif errs != nil && !tst.fail {\n\t\t\t\tt.Errorf(\"Got error but expected none (%v)\", errs)\n\t\t\t} else if errs == nil && tst.fail {\n\t\t\t\tt.Errorf(\"Expected error but got none (%v)\", o)\n\t\t\t} else if errs == nil && o != tst.output {\n\t\t\t\tt.Errorf(\"Got (%v) expected (%v)\", o, tst.output)\n\t\t\t}\n\t\t})\n\t}\n}", "func TestMultiplePrewrites4A(t *testing.T) {\n}", "func checkRead(t *testing.T, testname string, b *Builder, s string) {\n\tbytes := b.Bytes()\n\tstr := b.String()\n\tif b.Len() != len(str) {\n\t\tt.Errorf(\"%s: b.Len() == %d, len(b.String()) == %d\", testname, b.Len(), len(str))\n\t}\n\tif string(bytes) != s {\n\t\tt.Errorf(\"%s: string(b.Bytes()) == %q, s == %q\", testname, string(bytes), s)\n\t}\n}", "func TestAckInstalledApplicationListDuplicateRegression(t *testing.T) {\n\n}", "func getValidPrefix(s string, base int64) string {\n\ttrace_util_0.Count(_util_00000, 47)\n\tvar (\n\t\tvalidLen int\n\t\tupper rune\n\t)\n\tswitch {\n\tcase base >= 2 && base <= 9:\n\t\ttrace_util_0.Count(_util_00000, 51)\n\t\tupper = rune('0' + base)\n\tcase base <= 36:\n\t\ttrace_util_0.Count(_util_00000, 52)\n\t\tupper = rune('A' + base - 10)\n\tdefault:\n\t\ttrace_util_0.Count(_util_00000, 53)\n\t\treturn \"\"\n\t}\n\ttrace_util_0.Count(_util_00000, 48)\nLoop:\n\tfor i := 0; i < len(s); i++ {\n\t\ttrace_util_0.Count(_util_00000, 54)\n\t\tc := rune(s[i])\n\t\tswitch {\n\t\tcase unicode.IsDigit(c) || unicode.IsLower(c) || unicode.IsUpper(c):\n\t\t\ttrace_util_0.Count(_util_00000, 55)\n\t\t\tc = unicode.ToUpper(c)\n\t\t\tif c < upper {\n\t\t\t\ttrace_util_0.Count(_util_00000, 58)\n\t\t\t\tvalidLen = i + 1\n\t\t\t} else {\n\t\t\t\ttrace_util_0.Count(_util_00000, 59)\n\t\t\t\t{\n\t\t\t\t\tbreak Loop\n\t\t\t\t}\n\t\t\t}\n\t\tcase c == '+' || c == '-':\n\t\t\ttrace_util_0.Count(_util_00000, 56)\n\t\t\tif i != 0 {\n\t\t\t\ttrace_util_0.Count(_util_00000, 60)\n\t\t\t\tbreak Loop\n\t\t\t}\n\t\tdefault:\n\t\t\ttrace_util_0.Count(_util_00000, 57)\n\t\t\tbreak Loop\n\t\t}\n\t}\n\ttrace_util_0.Count(_util_00000, 49)\n\tif validLen > 1 && s[0] == '+' {\n\t\ttrace_util_0.Count(_util_00000, 61)\n\t\treturn s[1:validLen]\n\t}\n\ttrace_util_0.Count(_util_00000, 50)\n\treturn s[:validLen]\n}", "func helper(t *testing.T, expectedErr interface{}, code string) {\n\t_, err := getExportedFunctions(\"\", code)\n\tif err == nil {\n\t\tt.Error(\"Expected error, got nil!\")\n\t}\n\n\texpectedErrType := reflect.TypeOf(expectedErr)\n\tactualErrType := reflect.TypeOf(err)\n\n\tif actualErrType != expectedErrType {\n\t\tt.Error(\"Expected error\", expectedErrType.String(), \"but got\",\n\t\t\tactualErrType.String(), \"with message\", err)\n\t}\n}", "func getHelperMethodIfNeeded(functionName string, indent string) (string, bool) {\n\tswitch functionName {\n\tcase \"filebase64\":\n\t\treturn `private static string ReadFileBase64(string path) {\n\t\treturn Convert.ToBase64String(System.Text.Encoding.UTF8.GetBytes(File.ReadAllText(path)));\n\t}`, true\n\tcase \"filebase64sha256\":\n\t\treturn `private static string ComputeFileBase64Sha256(string path) {\n\t\tvar fileData = System.Text.Encoding.UTF8.GetBytes(File.ReadAllText(path));\n\t\tvar hashData = SHA256.Create().ComputeHash(fileData);\n\t\treturn Convert.ToBase64String(hashData);\n\t}`, true\n\tcase \"sha1\":\n\t\treturn `private static string ComputeSHA1(string input) {\n\t\treturn BitConverter.ToString(\n\t\t\tSHA1.Create().ComputeHash(Encoding.UTF8.GetBytes(input))\n\t\t).Replace(\"-\",\"\").ToLowerInvariant());\n\t}`, true\n\tcase \"notImplemented\":\n\t\treturn fmt.Sprintf(`\n%sstatic object NotImplemented(string errorMessage) \n%s{\n%s throw new System.NotImplementedException(errorMessage);\n%s}`, indent, indent, indent, indent), true\n\tdefault:\n\t\treturn \"\", false\n\t}\n}", "func isBadVersion(version int) bool{\n return false\n}", "func goodFunction() {\n\treturn nil\n}", "func exactFunction() {\n\treturn nil\n}", "func (suite *FileSourceInternalTestSuite) TestUniqueForUniqueValues() {\n\tinput := []int{1, 2}\n\tresut := unique(input)\n\tassert.Len(suite.T(), resut, 2)\n}", "func TestRoundTrips(t *testing.T) {\n var totalSize int = 0\n\n // figure out total buffer size\n for _, v := range testStr {\n totalSize += v\n }\n\n for _ = range testByte {\n totalSize += BYTE_SIZE\n }\n\n for _ = range testUint32 {\n totalSize += UINT32_SIZE\n }\n\n for _ = range testUint64 {\n totalSize += UINT64_SIZE\n }\n\n cursor := 0\n buffer := make([]byte, totalSize)\n\n // write all data to buffer\n for k, _ := range testStr {\n WriteString(k, buffer, &cursor)\n }\n\n for i := range testByte {\n WriteByte(testByte[i], buffer, &cursor)\n }\n\n for i := range testUint32 {\n WriteUint32(testUint32[i], buffer, &cursor)\n }\n\n for i := range testUint64 {\n WriteUint64(testUint64[i], buffer, &cursor)\n }\n\n if cursor != totalSize {\n t.Fatalf(\n \"Cursor doesn't match measured size of data: %v vs %v\",\n cursor,\n totalSize,\n )\n }\n\n // retrieve all data from buffer and check round trip values\n cursor = 0\n\n for k, _ := range testStr {\n val, err := ReadString(buffer, &cursor)\n if err != nil {\n t.Fatal(err)\n }\n if !strEq(val, k) {\n t.Fatalf(\"Values don't match: %v != %v\", val, k)\n }\n }\n\n for i := range testByte {\n val, err := ReadByte(buffer, &cursor)\n if err != nil {\n t.Fatal(err)\n }\n if val != testByte[i] {\n t.Fatalf(\"Values don't match: %v != %v\", val, testByte[i])\n }\n }\n\n for i := range testUint32 {\n val, err := ReadUint32(buffer, &cursor)\n if err != nil {\n t.Fatal(err)\n }\n if val != testUint32[i] {\n t.Fatalf(\"Values don't match: %v != %v\", val, testUint32[i])\n }\n }\n\n for i := range testUint64 {\n val, err := ReadUint64(buffer, &cursor)\n if err != nil {\n t.Fatal(err)\n }\n if val != testUint64[i] {\n t.Fatalf(\"Values don't match: %v != %v\", val, testUint64[i])\n }\n }\n\n log.Println(\"TestRoundTrips: passed\")\n}", "func TestFunctionsEqual(t *testing.T) {\n\tcases := []struct {\n\t\tf, g interface{}\n\t}{\n\t\t{Second, func(x, y int32) int32 { return y }},\n\t\t{StringLen, func(s string) int { return len(s) }},\n\t\t{SliceLen, func(s []int) int { return len(s) }},\n\t\t{SliceCap, func(s []int) int { return cap(s) }},\n\t\t{ArrayThree, func(a [7]uint64) uint64 { return a[3] }},\n\t\t{FieldByte, func(s Struct) byte { return s.Byte }},\n\t\t{FieldInt8, func(s Struct) int8 { return s.Int8 }},\n\t\t{FieldUint16, func(s Struct) uint16 { return s.Uint16 }},\n\t\t{FieldInt32, func(s Struct) int32 { return s.Int32 }},\n\t\t{FieldUint64, func(s Struct) uint64 { return s.Uint64 }},\n\t\t{FieldFloat32, func(s Struct) float32 { return s.Float32 }},\n\t\t{FieldFloat64, func(s Struct) float64 { return s.Float64 }},\n\t\t{FieldStringLen, func(s Struct) int { return len(s.String) }},\n\t\t{FieldSliceCap, func(s Struct) int { return cap(s.Slice) }},\n\t\t{FieldArrayTwoBTwo, func(s Struct) byte { return s.Array[2].B[2] }},\n\t\t{FieldArrayOneC, func(s Struct) uint16 { return s.Array[1].C }},\n\t\t{FieldComplex64Imag, func(s Struct) float32 { return imag(s.Complex64) }},\n\t\t{FieldComplex128Real, func(s Struct) float64 { return real(s.Complex128) }},\n\t}\n\tfor _, c := range cases {\n\t\tif err := quick.CheckEqual(c.f, c.g, nil); err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\t}\n}", "func TestNumericPrimitives(t *testing.T) {\n\tfor i := uint64(0); i < 1e6; i += 111 {\n\t\to := old()\n\t\to.EncodeVarint(i)\n\t\ty, n := DecodeVarint(o.Bytes())\n\t\tif y != i || n != len(o.Bytes()) {\n\t\t\tt.Fatal(\"DecodeVarint(EncodeVarint(x))!=x\")\n\t\t}\n\t\tx, e := o.DecodeVarint()\n\t\tif e != nil {\n\t\t\tt.Fatal(\"DecodeVarint\")\n\t\t}\n\t\tif x != i {\n\t\t\tt.Fatal(\"varint decode fail:\", i, x)\n\t\t}\n\n\t\to = old()\n\t\to.EncodeFixed32(i)\n\t\tx, e = o.DecodeFixed32()\n\t\tif e != nil {\n\t\t\tt.Fatal(\"decFixed32\")\n\t\t}\n\t\tif x != i {\n\t\t\tt.Fatal(\"fixed32 decode fail:\", i, x)\n\t\t}\n\n\t\to = old()\n\t\to.EncodeFixed64(i * 1234567)\n\t\tx, e = o.DecodeFixed64()\n\t\tif e != nil {\n\t\t\tt.Error(\"decFixed64\")\n\t\t\tbreak\n\t\t}\n\t\tif x != i*1234567 {\n\t\t\tt.Error(\"fixed64 decode fail:\", i*1234567, x)\n\t\t\tbreak\n\t\t}\n\n\t\to = old()\n\t\ti32 := int32(i - 12345)\n\t\to.EncodeZigzag32(uint64(i32))\n\t\tx, e = o.DecodeZigzag32()\n\t\tif e != nil {\n\t\t\tt.Fatal(\"DecodeZigzag32\")\n\t\t}\n\t\t// NOTE WELL google's DecodeZigzag32() returns a signed 32-bit inside an unsigned 64-bit (bits 32-63 are 0).\n\t\t// My DecodeZigzag32() returns a signed 64-bit. So this comparison is slightly different than in the proto/all_test.go\n\t\t// (mine is different because unlike google I might be casting the result to an 'int', as well as 'int32', and 'int'\n\t\t// might turn out to be 64 bits)\n\t\tif x != uint64(i32) {\n\t\t\tt.Fatal(\"zigzag32 decode fail:\", i32, x)\n\t\t}\n\n\t\to = old()\n\t\ti64 := int64(i - 12345)\n\t\to.EncodeZigzag64(uint64(i64))\n\t\tx, e = o.DecodeZigzag64()\n\t\tif e != nil {\n\t\t\tt.Fatal(\"DecodeZigzag64\")\n\t\t}\n\t\tif x != uint64(i64) {\n\t\t\tt.Fatal(\"zigzag64 decode fail:\", i64, x)\n\t\t}\n\t}\n}", "func TestIsCompatibility(t *testing.T) {\n\tmatrix := make(map[int]map[int][3]bool)\n\tfor i1 := 0; i1 < 3; i1++ {\n\t\tmatrix[i1] = make(map[int][3]bool)\n\t\tfor i2 := 0; i2 < 3; i2++ {\n\t\t\tmatrix[i1][i2] = [3]bool{false, false, false}\n\t\t}\n\t}\n\n\t// index 0 - Jettison with an error code\n\terr1 := errors.New(\"err1\", errors.WithCode(\"ERR_1\"))\n\terr2 := errors.Wrap(err1, \"err2\")\n\tel := []error{nil, err1, err2}\n\tfor i1, e1 := range el {\n\t\tfor i2, e2 := range el {\n\t\t\trow := matrix[i1][i2]\n\t\t\trow[0] = errors.Is(e1, e2)\n\t\t\tmatrix[i1][i2] = row\n\t\t}\n\t}\n\n\t// index 1 - Jettison without an error code\n\terr1 = errors.New(\"err1\")\n\terr2 = errors.Wrap(err1, \"err2\")\n\tel = []error{nil, err1, err2}\n\tfor i1, e1 := range el {\n\t\tfor i2, e2 := range el {\n\t\t\trow := matrix[i1][i2]\n\t\t\trow[1] = errors.Is(e1, e2)\n\t\t\tmatrix[i1][i2] = row\n\t\t}\n\t}\n\n\t// index 3 - golang.org/x/exp/errors\n\terr1 = xerrors.New(\"err1\")\n\terr2 = xerrors.Errorf(\"err2: %w\", err1)\n\tel = []error{nil, err1, err2}\n\tfor i1, e1 := range el {\n\t\tfor i2, e2 := range el {\n\t\t\trow := matrix[i1][i2]\n\t\t\trow[2] = xerrors.Is(e1, e2)\n\t\t\tmatrix[i1][i2] = row\n\t\t}\n\t}\n\n\t// Each row in the compability matrix should have identical entries.\n\tfor i1, submatrix := range matrix {\n\t\tfor i2, row := range submatrix {\n\t\t\tassert.Equal(t, row[0], row[1], fmt.Sprintf(\"matrix[%d][%d] - jettison_code == jettison_no_code\", i1, i2))\n\t\t\tassert.Equal(t, row[1], row[2], fmt.Sprintf(\"matrix[%d][%d] - jettison_no_code == xerrors\", i1, i2))\n\t\t}\n\t}\n}", "func FuncChangeRet() bool { return false }", "func dumbAssert(val bool) {\n\tif val {\n\t\treturn\n\t}\n\tvar pc [5]uintptr // more than 1 just in case CallersFrames needs them to account for inlined functions\n\tcallers := runtime.Callers(2, pc[:])\n\tif callers == 0 {\n\t\tpanic(\"failed assertion, can't get runtime stack\")\n\t}\n\tframes := runtime.CallersFrames(pc[:])\n\tfor {\n\t\tframe, more := frames.Next()\n\t\tif !more {\n\t\t\tbreak\n\t\t}\n\t\tif frame.Func == nil {\n\t\t\tcontinue\n\t\t}\n\t\tif frame.File == \"\" || frame.Line == 0 {\n\t\t\tpanic(fmt.Sprintf(\"failed assertion in %q (line number unknown)\", frame.Function))\n\t\t}\n\t\tline := readLineFromFile(frame.File, frame.Line)\n\t\tmessage := fmt.Sprintf(\"failed assertion in %s:%d\", frame.File, frame.Line)\n\t\tif line != \"\" {\n\t\t\tmessage += \"\\n\\n>>> \" + line + \"\\n\"\n\t\t}\n\t\tpanic(message)\n\t}\n\tpanic(\"failed assertion, and can't get caller info\")\n}", "func TestEmptyCommit4A(t *testing.T) {\n}", "func TestSubsequencePatternMatching() {\n\tfmt.Println(subsequencePatternMatchingDpFast(\"baxmx\", \"ax\"))\n\tfmt.Println(subsequencePatternMatchingDpFast(\"tomorrow\", \"tor\"))\n}", "func TestToOneSet(t *testing.T) {}", "func TestToOneSet(t *testing.T) {}", "func TestToOneSet(t *testing.T) {}", "func TestToOneSet(t *testing.T) {}", "func (suite *FileSourceInternalTestSuite) TestUniqueForDuplicateValues() {\n\tinput := []int{1, 1}\n\tresut := unique(input)\n\tassert.Len(suite.T(), resut, 1)\n}", "func TestGetVersions4A(t *testing.T) {\n}", "func Test_AreEqualSlices_unequal(t *testing.T) {\n // create two equal slices\n a := []byte{ 0xDE, 0xAD, 0xBE, 0xEF }\n b := []byte{ 0xCA, 0xFE, 0xBA, 0xBE }\n // make test -> log failure but continue testing\n if AreEqualSlices(a,b) { t.Error(\"unequal slices determined equal\") }\n}", "func main() {\n\t// fmt.Println(isValidRow([]byte{8, 3, 3, '.', 7, '.', '.', '.', '.'}))\n}", "func TestMyFuncProcessing(t *testing.T) {\n\ttables := []struct {\n\t\tmyString string\n\t\tnullList []string\n\t\tcoalList []string\n\t\tmyValString string\n\t\tmyValCoal string\n\t\tmyValNull string\n\t\tstringFunc string\n\t}{\n\t\t{\"lower\", []string{\"yo\", \"yo\"}, []string{\"random\", \"hello\", \"random\"}, \"LOWER\", \"random\", \"\", \"UPPER\"},\n\t\t{\"LOWER\", []string{\"null\", \"random\"}, []string{\"missing\", \"hello\", \"random\"}, \"lower\", \"hello\", \"null\", \"LOWER\"},\n\t}\n\tfor _, table := range tables {\n\t\tif table.coalList != nil {\n\t\t\tmyVal := processCoalNoIndex(table.coalList)\n\t\t\tif myVal != table.myValCoal {\n\t\t\t\tt.Error()\n\t\t\t}\n\t\t}\n\t\tif table.nullList != nil {\n\t\t\tmyVal := processNullIf(table.nullList)\n\t\t\tif myVal != table.myValNull {\n\t\t\t\tt.Error()\n\t\t\t}\n\t\t}\n\t\tmyVal := applyStrFunc(table.myString, table.stringFunc)\n\t\tif myVal != table.myValString {\n\t\t\tt.Error()\n\t\t}\n\n\t}\n}", "func getHelperMethodIfNeeded(functionName string, indent string) (string, bool) {\n\tswitch functionName {\n\tcase \"filebase64sha256\":\n\t\treturn `function computeFilebase64sha256(path string) string {\n\tconst fileData = Buffer.from(fs.readFileSync(path), 'binary')\n\treturn crypto.createHash('sha256').update(fileData).digest('hex')\n}`, true\n\tcase \"notImplemented\":\n\t\treturn fmt.Sprintf(\n\t\t\t`%sfunction notImplemented(message: string) {\n%s throw new Error(message);\n%s}`, indent, indent, indent), true\n\tcase \"singleOrNone\":\n\t\treturn fmt.Sprintf(\n\t\t\t`%sfunction singleOrNone<T>(elements: pulumi.Input<T>[]): pulumi.Input<T> {\n%s if (elements.length != 1) {\n%s throw new Error(\"singleOrNone expected input list to have a single element\");\n%s }\n%s return elements[0];\n%s}`, indent, indent, indent, indent, indent, indent), true\n\tdefault:\n\t\treturn \"\", false\n\t}\n}", "func (suite *FileSourceInternalTestSuite) TestUniqueForNil() {\n\tvar input []int\n\tresut := unique(input)\n\tassert.Len(suite.T(), resut, 0)\n}", "func TestUnitAcceptableVersion(t *testing.T) {\n\tinvalidVersions := []string{\n\t\t// ascii gibberish\n\t\t\"foobar\",\n\t\t\"foobar.0\",\n\t\t\"foobar.9\",\n\t\t\"0.foobar\",\n\t\t\"9.foobar\",\n\t\t\"foobar.0.0\",\n\t\t\"foobar.9.9\",\n\t\t\"0.foobar.0\",\n\t\t\"9.foobar.9\",\n\t\t\"0.0.foobar\",\n\t\t\"9.9.foobar\",\n\t\t// utf-8 gibberish\n\t\t\"世界\",\n\t\t\"世界.0\",\n\t\t\"世界.9\",\n\t\t\"0.世界\",\n\t\t\"9.世界\",\n\t\t\"世界.0.0\",\n\t\t\"世界.9.9\",\n\t\t\"0.世界.0\",\n\t\t\"9.世界.9\",\n\t\t\"0.0.世界\",\n\t\t\"9.9.世界\",\n\t\t// missing numbers\n\t\t\".\",\n\t\t\"..\",\n\t\t\"...\",\n\t\t\"0.\",\n\t\t\".1\",\n\t\t\"2..\",\n\t\t\".3.\",\n\t\t\"..4\",\n\t\t\"5.6.\",\n\t\t\".7.8\",\n\t\t\".9.0.\",\n\t}\n\tfor _, v := range invalidVersions {\n\t\terr := acceptableVersion(v)\n\t\tif _, ok := err.(invalidVersionError); err == nil || !ok {\n\t\t\tt.Errorf(\"acceptableVersion returned %q for version %q, but expected invalidVersionError\", err, v)\n\t\t}\n\t}\n\tinsufficientVersions := []string{\n\t\t// random small versions\n\t\t\"0\",\n\t\t\"00\",\n\t\t\"0000000000\",\n\t\t\"0.0\",\n\t\t\"0000000000.0\",\n\t\t\"0.0000000000\",\n\t\t\"0.0.0.0.0.0.0.0\",\n\t\t/*\n\t\t\t\"0.0.9\",\n\t\t\t\"0.0.999\",\n\t\t\t\"0.0.99999999999\",\n\t\t\t\"0.1.2\",\n\t\t\t\"0.1.2.3.4.5.6.7.8.9\",\n\t\t\t// pre-hardfork versions\n\t\t\t\"0.3.3\",\n\t\t\t\"0.3.9.9.9.9.9.9.9.9.9.9\",\n\t\t\t\"0.3.9999999999\",\n\t\t\t\"1.3.0\",\n\t\t*/\n\t}\n\tfor _, v := range insufficientVersions {\n\t\terr := acceptableVersion(v)\n\t\tif _, ok := err.(insufficientVersionError); err == nil || !ok {\n\t\t\tt.Errorf(\"acceptableVersion returned %q for version %q, but expected insufficientVersionError\", err, v)\n\t\t}\n\t}\n\tvalidVersions := []string{\n\t\tminimumAcceptablePeerVersion,\n\t\t\"1.3.7\",\n\t\t\"1.4.0\",\n\t\t\"1.6.0\",\n\t\t\"1.6.1\",\n\t\t\"1.9\",\n\t\t\"1.999\",\n\t\t\"1.9999999999\",\n\t\t\"2\",\n\t\t\"2.0\",\n\t\t\"2.0.0\",\n\t\t\"9\",\n\t\t\"9.0\",\n\t\t\"9.0.0\",\n\t\t\"9.9.9\",\n\t}\n\tfor _, v := range validVersions {\n\t\terr := acceptableVersion(v)\n\t\tif err != nil {\n\t\t\tt.Errorf(\"acceptableVersion returned %q for version %q, but expected nil\", err, v)\n\t\t}\n\t}\n}", "func TestBrokenPrefix(t *testing.T) {\n\tsrc := [128]byte{}\n\tsrc[64] = 1\n\tdata := [10000][]byte{}\n\tfor i := range data {\n\t\tdata[i] = src[:]\n\t}\n\t// last 64 entries have a 1 in a pseudorandom position, breaking the\n\t// pattern\n\tfor i := 10000 - 64; i < 10000; i++ {\n\t\tdata[i] = src[64-((i*11)%64):]\n\t}\n\tforceRadix(BytesSlice(data[:]).Sort)\n\tif !BytesAreSorted(data[:]) {\n\t\tt.Errorf(\"broken-prefix data didn't sort\")\n\t}\n\n\tsrcStr := string(src[:])\n\tdataStr := [10000]string{}\n\tfor i := range dataStr {\n\t\tdataStr[i] = srcStr\n\t}\n\tfor i := 10000 - 64; i < 10000; i++ {\n\t\tdata[i] = src[64-((i*11)%64):]\n\t}\n\tforceRadix(StringSlice(dataStr[:]).Sort)\n\tif !StringsAreSorted(dataStr[:]) {\n\t\tt.Errorf(\"broken-prefix data didn't sort\")\n\t}\n}", "func TestRestOfInternalCode(t *testing.T) {\n\n\t// In this case unit testing will not help as we need to actually corever\n\t// this package with test. Because real functions hide under internal structures\n\t// which we do not expose, so our previous approach will no longer works.\n\t// Well it works but coverage does not detect that we are testing actual\n\t// implementation\n\n\t// In order to cover this part we will need to either pretend that we are\n\t// testing something or create real integration tests and ensure that mongod\n\t// process is running. In my case I will just fake my testing and do not use\n\t// assert. This way my test will pass either way\n\n\t// Create database context. I use real database, but it is possible to mock\n\t// database and configuration through interfaces.\n\tconf := config.GetConfig()\n\tclient, _ := databases.NewClient(conf)\n\tclient.StartSession()\n\n\tdb := databases.NewDatabase(conf, client)\n\tclient.Connect()\n\tdb.Client()\n\tvar result interface{}\n\t// because we do not care for actual results, we just quickly timeout the\n\t// call and we use incorrect call method\n\ttimeoutCtx, _ := context.WithTimeout(context.Background(), 1*time.Microsecond)\n\tdb.Collection(\"non-fake-existing-collection\").FindOne(timeoutCtx, \"incorrect-value\").Decode(&result)\n\n\t// insert and delete functions seems to panic instead of returning and error.\n\t// I did not investigate anything in this case as this is not our main goal.\n\t// Just define assert panic function and use this panicing function in it.\n\tvar mongoPanics assert.PanicTestFunc\n\n\tmongoPanics = func() {\n\t\tdb.Collection(\"non-fake-existing-collection\").InsertOne(timeoutCtx, result)\n\t}\n\tassert.Panics(t, mongoPanics)\n\n\tmongoPanics = func() {\n\t\tdb.Collection(\"non-fake-existing-collection\").DeleteOne(timeoutCtx, result)\n\t}\n\tassert.Panics(t, mongoPanics)\n\n\t// And it is done. We do not need to have mongo running and our code is\n\t// covered 100%. Well the actual implementation is faked, but it should be\n\t// tested via integration tests, not unit tests.\n\n}", "func TestSpec_MustGetValidType(t *testing.T) {\n\tcode := `\npackage haha\nimport \"fmt\"\nconst a = 1\nfunc main() {\n var b = 2.0\n type c struct {\n d string\n }\n fmt.Println(a, b)\n}`\n\ttestGet := func(s *Spec, v string) (r string) {\n\t\tdefer func() {\n\t\t\tif err := recover(); err != nil {\n\t\t\t\tr = fmt.Sprintf(\"%s\", err)\n\t\t\t}\n\t\t}()\n\t\ts.MustGetValidType(v)\n\t\treturn\n\t}\n\n\ts := NewSpec(code)\n\ts.SearchKind = SearchOnlyPackage\n\tif testGet(s, \"bool\") == \"find <bool> in code <\"+code+\"> failed\" &&\n\t\ttestGet(s, \"fmt\") == \"find <fmt> in code <\"+code+\"> failed\" &&\n\t\ts.MustGetValidType(\"a\").String() == \"untyped int\" &&\n\t\ttestGet(s, \"b\") == \"find <b> in code <\"+code+\"> failed\" {\n\t} else {\n\t\tt.Error(`test failed`)\n\t}\n\ts.SearchKind = SearchPackageAndUniverse\n\tif s.MustGetValidType(\"bool\").String() == \"bool\" &&\n\t\ttestGet(s, \"fmt\") == \"find <fmt> in code <\"+code+\"> failed\" &&\n\t\ts.MustGetValidType(\"a\").String() == \"untyped int\" &&\n\t\ttestGet(s, \"b\") == \"find <b> in code <\"+code+\"> failed\" {\n\t} else {\n\t\tt.Error(`test failed`)\n\t}\n\ts.SearchKind = SearchAll\n\tif s.MustGetValidType(\"bool\").String() == \"bool\" &&\n\t\ts.MustGetValidType(\"fmt\").String() == \"invalid type\" &&\n\t\ts.MustGetValidType(\"a\").String() == \"untyped int\" &&\n\t\ts.MustGetValidType(\"b\").String() == \"float64\" {\n\t} else {\n\t\tt.Error(`test failed`)\n\t}\n}", "func TestCountPalindromicSubstring() {\n\tfmt.Println(countPalindromicSubstring(\"abdbca\"))\n\tfmt.Println(countPalindromicSubstring(\"cddpd\"))\n\tfmt.Println(countPalindromicSubstring(\"pqr\"))\n\tfmt.Println(countPalindromicSubstring(\"qqq\"))\n}", "func testReader(r io.Reader, content []byte) error {\n\tif len(content) > 0 {\n\t\tn, err := r.Read(nil)\n\t\tif n != 0 || err != nil {\n\t\t\treturn fmt.Errorf(\"Read(0) = %d, %v, want 0, nil\", n, err)\n\t\t}\n\t}\n\n\tdata, err := io.ReadAll(&minByteReader{r: r, min: offsetSize})\n\tif err != nil {\n\t\treturn err\n\t}\n\tif !bytes.Equal(data, content) {\n\t\treturn fmt.Errorf(\"ReadAll(varied amounts) = %q\\n\\twant %q\", data, content)\n\t}\n\n\tn, err := r.Read(make([]byte, offsetSize))\n\tif n != 0 || err != io.EOF {\n\t\treturn fmt.Errorf(\"Read(offsetSize) at EOF = %v, %v, want 0, EOF\", n, err)\n\t}\n\n\treturn nil\n}", "func TestNoOp(t *testing.T) {}", "func TestManifestAPI_Get_UnknownSchema(t *testing.T) {}", "func TestGenString(t *testing.T) {\n\tif genStr := GenString(); genStr != \"hello world\" {\n\t\tt.Error(\"The generated string should be \\\"hello word\\\", got \", genStr)\n\t}\n}", "func notTested() string {\n\treturn \"This function isn't tested!\"\n}", "func internalStartsWith(str string, prefix string, ignoreCase bool) bool {\n\tif str == \"\" || prefix == \"\" {\n\t\treturn (str == \"\" && prefix == \"\")\n\t}\n\tif utf8.RuneCountInString(prefix) > utf8.RuneCountInString(str) {\n\t\treturn false\n\t}\n\tif ignoreCase {\n\t\treturn strings.HasPrefix(strings.ToLower(str), strings.ToLower(prefix))\n\t}\n\treturn strings.HasPrefix(str, prefix)\n}", "func TestOrdering(t *testing.T) {\n\t// setup\n\tmb := newMemBlobs()\n\tstrings := []string{\"zz\", \"fox\", \"elephant\", \"antilope\", \"frog\", \"zebra\", \"cocodrile\"}\n\t// exercise\n\tfor i, s := range strings {\n\t\tmb.insert(s)\n\t\tassert(sort.StringsAreSorted(mb.keynames), t, \"insert did not order %s into %v\", s, mb.keynames)\n\t\tassert(len(mb.keynames) == (i+1), t, \"expected growed size %d but got %d\", (i + 1), len(mb.keynames))\n\t}\n\tfor i, s := range strings {\n\t\tmb.extract(s)\n\t\tassert(sort.StringsAreSorted(mb.keynames), t, \"extract broke order removing %s from %v\", s, mb.keynames)\n\t\texpectedSize := len(strings) - i - 1\n\t\tassert(len(mb.keynames) == expectedSize, t,\n\t\t\t\"expected shrinked size %d but got %d\", expectedSize, len(mb.keynames))\n\t}\n\n}", "func TestBasic(t *testing.T) {\n\n\tq := \"This is the time for all good men to come to the aid of their country...\"\n\t//qq := []byte{\"xThis is the time for all good men to come to the aid of their country...\"}\n\t//qqq := []byte{\"xxThis is the time for all good men to come to the aid of their country...\"}\n\t//qqqq[] := []byte{\"xxxThis is the time for all good men to come to the aid of their country...\"}\n\n\tu := stu(q)\n\th1 := jenkins3.HashWordsLen(u, 13)\n\tfmt.Printf(\"%08x, %0x8, %08x\\n\", h1)\n\n\tb, c := uint32(0), uint32(0)\n\tc, b = jenkins3.HashString(\"\", c, b)\n\t//fmt.Printf(\"%08x, %08x\\n\", c, b)\n\tif c != 0xdeadbeef || b != 0xdeadbeef {\n\t\tt.Logf(\"c=0x%x != 0xdeadbeef || b=0x%x != 0xdeadbeef\\n\", c, b)\n\t\tt.FailNow()\n\t}\n\n\tb, c = 0xdeadbeef, 0\n\tc, b = jenkins3.HashString(\"\", c, b)\n\t//fmt.Printf(\"%08x, %08x\\n\", c, b)\t// bd5b7dde deadbeef\n\tif c != 0xbd5b7dde || b != 0xdeadbeef {\n\t\tt.Logf(\"c=0x%x != 0xbd5b7dde || b=0x%x != 0xdeadbeef\\n\", c, b)\n\t\tt.FailNow()\n\t}\n\n\tb, c = 0xdeadbeef, 0xdeadbeef\n\tc, b = jenkins3.HashString(\"\", c, b)\n\t//fmt.Printf(\"%08x, %08x\\n\", c, b)\t// 9c093ccd bd5b7dde\n\tif c != 0x9c093ccd || b != 0xbd5b7dde {\n\t\tt.Logf(\"c=0x%x != 0x9c093ccd || b=0x%x != 0xbd5b7dde\\n\", c, b)\n\t\tt.FailNow()\n\t}\n\n\tb, c = 0, 0\n\tc, b = jenkins3.HashString(\"Four score and seven years ago\", c, b)\n\t//fmt.Printf(\"%08x, %08x\\n\", c, b)\t// 17770551 ce7226e6\n\tif c != 0x17770551 || b != 0xce7226e6 {\n\t\tt.Logf(\"c=0x%x != 0x17770551 || b=0x%x != 0xce7226e6\\n\", c, b)\n\t\tt.FailNow()\n\t}\n\n\tb, c = 1, 0\n\tc, b = jenkins3.HashString(\"Four score and seven years ago\", c, b)\n\t//fmt.Printf(\"%08x, %08x\\n\", c, b)\t// e3607cae bd371de4\n\tif c != 0xe3607cae || b != 0xbd371de4 {\n\t\tt.Logf(\"c=0x%x != 0xe3607cae || b=0x%x != 0xbd371de4\\n\", c, b)\n\t\tt.FailNow()\n\t}\n\n\tb, c = 0, 1\n\tc, b = jenkins3.HashString(\"Four score and seven years ago\", c, b)\n\t//fmt.Printf(\"%08x, %08x\\n\", c, b)\t// cd628161 6cbea4b3\n\tif c != 0xcd628161 || b != 0x6cbea4b3 {\n\t\tt.Logf(\"c=0x%x != 0xcd628161 || b=0x%x != 0x6cbea4b3\\n\", c, b)\n\t\tt.FailNow()\n\t}\n\n}", "func main() {\n\tassertEqual(minAddToMakeValid(\"\"), 0)\n\tassertEqual(minAddToMakeValid(\"(\"), 1)\n\tassertEqual(minAddToMakeValid(\"((()(())))\"), 0)\n\tassertEqual(minAddToMakeValid(\"((()(()))\"), 1)\n\tassertEqual(minAddToMakeValid(\")))(((\"), 6)\n\n\t// online\n\tassertEqual(minAddToMakeValid(\"())\"), 1)\n\tassertEqual(minAddToMakeValid(\"(((\"), 3)\n\tassertEqual(minAddToMakeValid(\"()\"), 0)\n\tassertEqual(minAddToMakeValid(\"()))((\"), 4)\n}", "func differentUnderlyingStorage(x, y []byte) bool {\n\treturn &x[0:cap(x)][cap(x)-1] != &y[0:cap(y)][cap(y)-1]\n}", "func TestAliasingRecursionValid(t *testing.T) {\n\tp, errs := PrototypeString(`\n alias p = x {\n p\n }\n p`)\n\tgoutil.AssertNow(t, errs == nil, \"errs should be nil\")\n\tgoutil.AssertNow(t, p.elements[\"x\"] != nil, \"x is nil\")\n}", "func TestLengths(t *testing.T) {\n // strings\n for k, v := range testStr {\n val := LenString(k)\n if val != v {\n t.Fatalf(\"%v returned %v (expected %v)\", k, val, v)\n }\n }\n\n // bytes\n bVal := LenByte()\n if bVal != BYTE_SIZE {\n t.Fatalf(\"Byte returned %v (expected %v)\", bVal, 4)\n }\n\n // uints\n uval32 := LenUint32()\n if uval32 != UINT32_SIZE {\n t.Fatalf(\"Uint32 returned %v (expected %v)\", uval32, 4)\n }\n uval64 := LenUint64()\n if uval64 != UINT64_SIZE {\n t.Fatalf(\"Uint64 returned %v (expected %v)\", uval64, 8)\n }\n\n log.Println(\"TestLengths: passed\")\n}", "func TestHelperParseURL(t *testing.T) {\n\ttests := []struct {\n\t\turl string\n\t\texpectedURL string\n\t\terr error\n\t}{\n\t\t{url: \"foobar.docker.io\", expectedURL: \"//foobar.docker.io\"},\n\t\t{url: \"foobar.docker.io:2376\", expectedURL: \"//foobar.docker.io:2376\"},\n\t\t{url: \"//foobar.docker.io:2376\", expectedURL: \"//foobar.docker.io:2376\"},\n\t\t{url: \"http://foobar.docker.io:2376\", expectedURL: \"http://foobar.docker.io:2376\"},\n\t\t{url: \"https://foobar.docker.io:2376\", expectedURL: \"https://foobar.docker.io:2376\"},\n\t\t{url: \"https://foobar.docker.io:2376/some/path\", expectedURL: \"https://foobar.docker.io:2376/some/path\"},\n\t\t{url: \"https://foobar.docker.io:2376/some/other/path?foo=bar\", expectedURL: \"https://foobar.docker.io:2376/some/other/path\"},\n\t\t{url: \"/foobar.docker.io\", err: errors.New(\"no hostname in URL\")},\n\t\t{url: \"ftp://foobar.docker.io:2376\", err: errors.New(\"unsupported scheme: ftp\")},\n\t}\n\n\tfor _, te := range tests {\n\t\tu, err := Parse(te.url)\n\n\t\tif te.err == nil && err != nil {\n\t\t\tt.Errorf(\"Error: failed to parse URL %q: %s\", te.url, err)\n\t\t\tcontinue\n\t\t}\n\t\tif te.err != nil && err == nil {\n\t\t\tt.Errorf(\"Error: expected error %q, got none when parsing URL %q\", te.err, te.url)\n\t\t\tcontinue\n\t\t}\n\t\tif te.err != nil && err.Error() != te.err.Error() {\n\t\t\tt.Errorf(\"Error: expected error %q, got %q when parsing URL %q\", te.err, err, te.url)\n\t\t\tcontinue\n\t\t}\n\t\tif u != nil && u.String() != te.expectedURL {\n\t\t\tt.Errorf(\"Error: expected URL: %q, but got %q for URL: %q\", te.expectedURL, u.String(), te.url)\n\t\t}\n\t}\n}", "func TestSpec_MustGetValidTypeObject(t *testing.T) {\n\tcode := `\npackage haha\nimport \"fmt\"\nconst a = 1\nfunc main() {\n var b = 2.0\n type c struct {\n d string\n }\n fmt.Println(a, b)\n}`\n\ttestGet := func(s *Spec, v string) (r string) {\n\t\tdefer func() {\n\t\t\tif err := recover(); err != nil {\n\t\t\t\tr = fmt.Sprintf(\"%s\", err)\n\t\t\t}\n\t\t}()\n\t\ts.MustGetValidTypeObject(v)\n\t\treturn\n\t}\n\n\ts := NewSpec(code)\n\ts.SearchKind = SearchOnlyPackage\n\tif testGet(s, \"bool\") == \"find <bool> in code <\"+code+\"> failed\" &&\n\t\ttestGet(s, \"fmt\") == \"find <fmt> in code <\"+code+\"> failed\" &&\n\t\ts.MustGetValidTypeObject(\"a\").String() == \"const haha.a untyped int\" &&\n\t\ttestGet(s, \"b\") == \"find <b> in code <\"+code+\"> failed\" {\n\t} else {\n\t\tt.Error(`test failed`)\n\t}\n\ts.SearchKind = SearchPackageAndUniverse\n\tif s.MustGetValidTypeObject(\"bool\").String() == \"type bool\" &&\n\t\ttestGet(s, \"fmt\") == \"find <fmt> in code <\"+code+\"> failed\" &&\n\t\ts.MustGetValidTypeObject(\"a\").String() == \"const haha.a untyped int\" &&\n\t\ttestGet(s, \"b\") == \"find <b> in code <\"+code+\"> failed\" {\n\t} else {\n\t\tt.Error(`test failed`)\n\t}\n\ts.SearchKind = SearchAll\n\tif s.MustGetValidTypeObject(\"bool\").String() == \"type bool\" &&\n\t\ts.MustGetValidTypeObject(\"fmt\").String() == \"package fmt\" &&\n\t\ts.MustGetValidTypeObject(\"a\").String() == \"const haha.a untyped int\" &&\n\t\ts.MustGetValidTypeObject(\"b\").String() == \"var b float64\" {\n\t} else {\n\t\tt.Error(`test failed`)\n\t}\n}", "func TestAliasingRecursionInvalid(t *testing.T) {\n\t_, errs := PrototypeString(`alias x = x\n\t\tx`)\n\tgoutil.AssertNow(t, errs != nil, \"errs should not be nil\")\n}", "func testFailWithTweak(key *Key, data *metadata.WrappedKeyData, tweak []byte) error {\n\ttweak[0]++\n\tkey, err := Unwrap(key, data)\n\tif err == nil {\n\t\tkey.Wipe()\n\t}\n\ttweak[0]--\n\treturn err\n}", "func TestCheckSignatureEncoding(t *testing.T) {\n\tt.Parallel()\n\n\ttests := []struct {\n\t\tname string\n\t\tsig []byte\n\t\tisValid bool\n\t}{\n\t\t{\n\t\t\tname: \"valid signature\",\n\t\t\tsig: decodeHex(\"304402204e45e16932b8af514961a1d3a1a25\" +\n\t\t\t\t\"fdf3f4f7732e9d624c6c61548ab5fb8cd41022018152\" +\n\t\t\t\t\"2ec8eca07de4860a4acdd12909d831cc56cbbac46220\" +\n\t\t\t\t\"82221a8768d1d09\"),\n\t\t\tisValid: true,\n\t\t},\n\t\t{\n\t\t\tname: \"empty.\",\n\t\t\tsig: nil,\n\t\t\tisValid: false,\n\t\t},\n\t\t{\n\t\t\tname: \"bad magic\",\n\t\t\tsig: decodeHex(\"314402204e45e16932b8af514961a1d3a1a25\" +\n\t\t\t\t\"fdf3f4f7732e9d624c6c61548ab5fb8cd41022018152\" +\n\t\t\t\t\"2ec8eca07de4860a4acdd12909d831cc56cbbac46220\" +\n\t\t\t\t\"82221a8768d1d09\"),\n\t\t\tisValid: false,\n\t\t},\n\t\t{\n\t\t\tname: \"bad 1st int marker magic\",\n\t\t\tsig: decodeHex(\"304403204e45e16932b8af514961a1d3a1a25\" +\n\t\t\t\t\"fdf3f4f7732e9d624c6c61548ab5fb8cd41022018152\" +\n\t\t\t\t\"2ec8eca07de4860a4acdd12909d831cc56cbbac46220\" +\n\t\t\t\t\"82221a8768d1d09\"),\n\t\t\tisValid: false,\n\t\t},\n\t\t{\n\t\t\tname: \"bad 2nd int marker\",\n\t\t\tsig: decodeHex(\"304402204e45e16932b8af514961a1d3a1a25\" +\n\t\t\t\t\"fdf3f4f7732e9d624c6c61548ab5fb8cd41032018152\" +\n\t\t\t\t\"2ec8eca07de4860a4acdd12909d831cc56cbbac46220\" +\n\t\t\t\t\"82221a8768d1d09\"),\n\t\t\tisValid: false,\n\t\t},\n\t\t{\n\t\t\tname: \"short len\",\n\t\t\tsig: decodeHex(\"304302204e45e16932b8af514961a1d3a1a25\" +\n\t\t\t\t\"fdf3f4f7732e9d624c6c61548ab5fb8cd41022018152\" +\n\t\t\t\t\"2ec8eca07de4860a4acdd12909d831cc56cbbac46220\" +\n\t\t\t\t\"82221a8768d1d09\"),\n\t\t\tisValid: false,\n\t\t},\n\t\t{\n\t\t\tname: \"long len\",\n\t\t\tsig: decodeHex(\"304502204e45e16932b8af514961a1d3a1a25\" +\n\t\t\t\t\"fdf3f4f7732e9d624c6c61548ab5fb8cd41022018152\" +\n\t\t\t\t\"2ec8eca07de4860a4acdd12909d831cc56cbbac46220\" +\n\t\t\t\t\"82221a8768d1d09\"),\n\t\t\tisValid: false,\n\t\t},\n\t\t{\n\t\t\tname: \"long X\",\n\t\t\tsig: decodeHex(\"304402424e45e16932b8af514961a1d3a1a25\" +\n\t\t\t\t\"fdf3f4f7732e9d624c6c61548ab5fb8cd41022018152\" +\n\t\t\t\t\"2ec8eca07de4860a4acdd12909d831cc56cbbac46220\" +\n\t\t\t\t\"82221a8768d1d09\"),\n\t\t\tisValid: false,\n\t\t},\n\t\t{\n\t\t\tname: \"long Y\",\n\t\t\tsig: decodeHex(\"304402204e45e16932b8af514961a1d3a1a25\" +\n\t\t\t\t\"fdf3f4f7732e9d624c6c61548ab5fb8cd41022118152\" +\n\t\t\t\t\"2ec8eca07de4860a4acdd12909d831cc56cbbac46220\" +\n\t\t\t\t\"82221a8768d1d09\"),\n\t\t\tisValid: false,\n\t\t},\n\t\t{\n\t\t\tname: \"short Y\",\n\t\t\tsig: decodeHex(\"304402204e45e16932b8af514961a1d3a1a25\" +\n\t\t\t\t\"fdf3f4f7732e9d624c6c61548ab5fb8cd41021918152\" +\n\t\t\t\t\"2ec8eca07de4860a4acdd12909d831cc56cbbac46220\" +\n\t\t\t\t\"82221a8768d1d09\"),\n\t\t\tisValid: false,\n\t\t},\n\t\t{\n\t\t\tname: \"trailing crap\",\n\t\t\tsig: decodeHex(\"304402204e45e16932b8af514961a1d3a1a25\" +\n\t\t\t\t\"fdf3f4f7732e9d624c6c61548ab5fb8cd41022018152\" +\n\t\t\t\t\"2ec8eca07de4860a4acdd12909d831cc56cbbac46220\" +\n\t\t\t\t\"82221a8768d1d0901\"),\n\t\t\tisValid: false,\n\t\t},\n\t\t{\n\t\t\tname: \"X == N \",\n\t\t\tsig: decodeHex(\"30440220fffffffffffffffffffffffffffff\" +\n\t\t\t\t\"ffebaaedce6af48a03bbfd25e8cd0364141022018152\" +\n\t\t\t\t\"2ec8eca07de4860a4acdd12909d831cc56cbbac46220\" +\n\t\t\t\t\"82221a8768d1d09\"),\n\t\t\tisValid: false,\n\t\t},\n\t\t{\n\t\t\tname: \"X == N \",\n\t\t\tsig: decodeHex(\"30440220fffffffffffffffffffffffffffff\" +\n\t\t\t\t\"ffebaaedce6af48a03bbfd25e8cd0364142022018152\" +\n\t\t\t\t\"2ec8eca07de4860a4acdd12909d831cc56cbbac46220\" +\n\t\t\t\t\"82221a8768d1d09\"),\n\t\t\tisValid: false,\n\t\t},\n\t\t{\n\t\t\tname: \"Y == N\",\n\t\t\tsig: decodeHex(\"304402204e45e16932b8af514961a1d3a1a25\" +\n\t\t\t\t\"fdf3f4f7732e9d624c6c61548ab5fb8cd410220fffff\" +\n\t\t\t\t\"ffffffffffffffffffffffffffebaaedce6af48a03bb\" +\n\t\t\t\t\"fd25e8cd0364141\"),\n\t\t\tisValid: false,\n\t\t},\n\t\t{\n\t\t\tname: \"Y > N\",\n\t\t\tsig: decodeHex(\"304402204e45e16932b8af514961a1d3a1a25\" +\n\t\t\t\t\"fdf3f4f7732e9d624c6c61548ab5fb8cd410220fffff\" +\n\t\t\t\t\"ffffffffffffffffffffffffffebaaedce6af48a03bb\" +\n\t\t\t\t\"fd25e8cd0364142\"),\n\t\t\tisValid: false,\n\t\t},\n\t\t{\n\t\t\tname: \"0 len X\",\n\t\t\tsig: decodeHex(\"302402000220181522ec8eca07de4860a4acd\" +\n\t\t\t\t\"d12909d831cc56cbbac4622082221a8768d1d09\"),\n\t\t\tisValid: false,\n\t\t},\n\t\t{\n\t\t\tname: \"0 len Y\",\n\t\t\tsig: decodeHex(\"302402204e45e16932b8af514961a1d3a1a25\" +\n\t\t\t\t\"fdf3f4f7732e9d624c6c61548ab5fb8cd410200\"),\n\t\t\tisValid: false,\n\t\t},\n\t\t{\n\t\t\tname: \"extra R padding\",\n\t\t\tsig: decodeHex(\"30450221004e45e16932b8af514961a1d3a1a\" +\n\t\t\t\t\"25fdf3f4f7732e9d624c6c61548ab5fb8cd410220181\" +\n\t\t\t\t\"522ec8eca07de4860a4acdd12909d831cc56cbbac462\" +\n\t\t\t\t\"2082221a8768d1d09\"),\n\t\t\tisValid: false,\n\t\t},\n\t\t{\n\t\t\tname: \"extra S padding\",\n\t\t\tsig: decodeHex(\"304502204e45e16932b8af514961a1d3a1a25\" +\n\t\t\t\t\"fdf3f4f7732e9d624c6c61548ab5fb8cd41022100181\" +\n\t\t\t\t\"522ec8eca07de4860a4acdd12909d831cc56cbbac462\" +\n\t\t\t\t\"2082221a8768d1d09\"),\n\t\t\tisValid: false,\n\t\t},\n\t}\n\n\t// flags := ScriptVerifyStrictEncoding\n\tflags := StandardVerifyFlags\n\tfor _, test := range tests {\n\t\terr := TstCheckSignatureEncoding(test.sig, flags)\n\t\tif err != nil && test.isValid {\n\t\t\tt.Errorf(\"checkSignatureEncoding test '%s' failed \"+\n\t\t\t\t\"when it should have succeeded: %v\", test.name,\n\t\t\t\terr)\n\t\t} else if err == nil && !test.isValid {\n\t\t\tt.Errorf(\"checkSignatureEncooding test '%s' succeeded \"+\n\t\t\t\t\"when it should have failed\", test.name)\n\t\t}\n\t}\n}", "func TestInt(t *testing.T) {\n\tone := starlark.MakeInt(1)\n\n\tfor _, test := range []struct {\n\t\ti starlark.Int\n\t\twantInt64 string\n\t\twantUint64 string\n\t}{\n\t\t{starlark.MakeInt64(math.MinInt64).Sub(one), \"error\", \"error\"},\n\t\t{starlark.MakeInt64(math.MinInt64), \"-9223372036854775808\", \"error\"},\n\t\t{starlark.MakeInt64(-1), \"-1\", \"error\"},\n\t\t{starlark.MakeInt64(0), \"0\", \"0\"},\n\t\t{starlark.MakeInt64(1), \"1\", \"1\"},\n\t\t{starlark.MakeInt64(math.MaxInt64), \"9223372036854775807\", \"9223372036854775807\"},\n\t\t{starlark.MakeUint64(math.MaxUint64), \"error\", \"18446744073709551615\"},\n\t\t{starlark.MakeUint64(math.MaxUint64).Add(one), \"error\", \"error\"},\n\t} {\n\t\tgotInt64, gotUint64 := \"error\", \"error\"\n\t\tif i, ok := test.i.Int64(); ok {\n\t\t\tgotInt64 = fmt.Sprint(i)\n\t\t}\n\t\tif u, ok := test.i.Uint64(); ok {\n\t\t\tgotUint64 = fmt.Sprint(u)\n\t\t}\n\t\tif gotInt64 != test.wantInt64 {\n\t\t\tt.Errorf(\"(%s).Int64() = %s, want %s\", test.i, gotInt64, test.wantInt64)\n\t\t}\n\t\tif gotUint64 != test.wantUint64 {\n\t\t\tt.Errorf(\"(%s).Uint64() = %s, want %s\", test.i, gotUint64, test.wantUint64)\n\t\t}\n\t}\n}", "func TestMarshal(t *testing.T) {\n\tfor _, c := range []struct {\n\t\tname string\n\t\tfieldPairs []interface{}\n\t\t// errNeedle is \"\" if we expect no error. Otherwise, it is a string that\n\t\t// we expect to see in the resulting err.Error().\n\t\terrNeedle string\n\t}{\n\t\t{\n\t\t\t\"no fields\",\n\t\t\t[]interface{}{},\n\t\t\t\"\",\n\t\t},\n\t\t{\n\t\t\t\"simple\",\n\t\t\t[]interface{}{\"k0\", \"v0\"},\n\t\t\t\"\",\n\t\t},\n\t\t{\n\t\t\t\"mixed value types\",\n\t\t\t// Numeric types turn into float64s in JSON.\n\t\t\t[]interface{}{\"k0\", \"v0\", \"k1\", float64(1), \"k2\", true},\n\t\t\t\"\",\n\t\t},\n\t\t{\n\t\t\t\"odd field pairs\",\n\t\t\t[]interface{}{\"k0\", \"v0\", \"k1\"},\n\t\t\t\"even\",\n\t\t},\n\t\t{\n\t\t\t\"non-string key\",\n\t\t\t[]interface{}{0, \"v0\"},\n\t\t\t\"string\",\n\t\t},\n\t\t{\n\t\t\t\"duplicate keys\",\n\t\t\t[]interface{}{\"k0\", \"v0\", \"k0\", \"v1\"},\n\t\t\t\"duplicate\",\n\t\t},\n\t} {\n\t\tt.Run(c.name, func(t *testing.T) {\n\t\t\tmarshalOK := c.errNeedle == \"\"\n\t\t\ts, err := Marshal(c.name, c.fieldPairs)\n\t\t\tif got, want := err == nil, marshalOK; got != want {\n\t\t\t\tt.Fatalf(\"got %v, want %v\", got, want)\n\t\t\t}\n\t\t\tif !marshalOK {\n\t\t\t\tif !strings.Contains(err.Error(), c.errNeedle) {\n\t\t\t\t\tt.Errorf(\"error %q does not contain expected substring %q\", err.Error(), c.errNeedle)\n\t\t\t\t}\n\t\t\t\treturn\n\t\t\t}\n\t\t\tvar m map[string]interface{}\n\t\t\terr = json.Unmarshal([]byte(s), &m)\n\t\t\tif err != nil {\n\t\t\t\tt.Fatalf(\"unmarshaling failed: %v\", err)\n\t\t\t}\n\t\t\t// The +1 is for the eventType.\n\t\t\tif got, want := len(m), (len(c.fieldPairs)/2)+1; got != want {\n\t\t\t\tt.Errorf(\"got %v, want %v\", got, want)\n\t\t\t}\n\t\t\ttyp, ok := m[eventTypeFieldKey]\n\t\t\tif ok {\n\t\t\t\tif got, want := typ, c.name; got != want {\n\t\t\t\t\tt.Errorf(\"got %v, want %v\", got, want)\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\tt.Errorf(\"eventType field not marshaled\")\n\t\t\t}\n\t\t\tfor i := 0; i < len(c.fieldPairs); i++ {\n\t\t\t\tkey := c.fieldPairs[i].(string)\n\t\t\t\ti++\n\t\t\t\tvalue := c.fieldPairs[i]\n\t\t\t\tmvalue, ok := m[key]\n\t\t\t\tif !ok {\n\t\t\t\t\tt.Errorf(\"field with key %q not marshaled\", key)\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\tif got, want := mvalue, value; got != want {\n\t\t\t\t\tt.Errorf(\"got %v(%T), want %v(%T)\", got, got, want, want)\n\t\t\t\t}\n\t\t\t}\n\t\t})\n\t}\n}", "func assertStack(t *testing.T, tc end2endTest, s *types.State) {\n\t// Oops, cannot check stack size like this, initialized at a size of 10...\n\t/*if lEx, lAc := len(tc.stack), len(s.Stack); lEx != lAc {\n\t\tt.Errorf(\"%s: expected %d stack size, got %d\", tc.name, lEx, lAc)\n\t} else {*/\n\tori := \"stack\"\n\t// Same stack size, check values\n\tfor i, vEx := range tc.stack {\n\t\tif i == 0 && vEx == nil {\n\t\t\t// Ignore if expected is nil at position 0 (will be the startup function)\n\t\t\tcontinue\n\t\t}\n\t\tvAc := s.Stack[i]\n\t\ttc.context = fmt.Sprintf(\"%s.%d\", ori, i)\n\t\tassertValues(t, tc, vEx, vAc)\n\t}\n\t//}\n\n\tif tc.top != s.Top {\n\t\tt.Errorf(\"%s: expected %d top-of-stack value, got %d\", tc.name, tc.top, s.Top)\n\t}\n}", "func TestReadValidStlFile(t *testing.T) {\n\tt.Error(\"Not yet implemented\")\n}", "func TestCallFunc_arguments(t *testing.T) {\n\n}", "func TestGetSource(t *testing.T) {\n\tcurrentSource := func() string { return getSource() }\n\tgotSource := currentSource()\n\t// Hard coded line number, 32, in the \"expectedSource\" value\n\texpectedSource := \"[namespace-lock_test.go:33:TestGetSource()]\"\n\tif gotSource != expectedSource {\n\t\tt.Errorf(\"expected : %s, got : %s\", expectedSource, gotSource)\n\t}\n}", "func TestMySizeFunction(t *testing.T) {\n\ttables := []struct {\n\t\tmyRecord []string\n\t\texpected int64\n\t}{\n\t\t{[]string{\"test1\", \"test2\", \"test3\", \"test4\", \"test5\"}, 30},\n\t}\n\tfor _, table := range tables {\n\t\tif processSize(table.myRecord) != table.expected {\n\t\t\tt.Error()\n\t\t}\n\n\t}\n}", "func TestSingleCommit4A(t *testing.T) {\n}", "func TestLeafSimilarTrees(t *testing.T) {\n\n}", "func TestUFormat(t *testing.T) {\n\tvar golden = []struct {\n\t\tf function\n\t\tname, serial string\n\t}{\n\t\t{bringUp, \"STARTDT_ACT\", \"680407000000\"},\n\t\t{bringUpOK, \"STARTDT_CON\", \"68040b000000\"},\n\t\t{bringDown, \"STOPDT_ACT\", \"680413000000\"},\n\t\t{bringDownOK, \"STOPDT_CON\", \"680423000000\"},\n\t\t{keepAlive, \"TESTFR_ACT\", \"680443000000\"},\n\t\t{keepAliveOK, \"TESTFR_CON\", \"680483000000\"},\n\t\t{bringUp | bringDown, \"<illegal 0x14>\", \"680417000000\"},\n\t}\n\n\tfor _, gold := range golden {\n\t\tu := newFunc(gold.f)\n\n\t\tif got := u.Format(); got != uFrame {\n\t\t\tt.Errorf(\"%s(%s): got %c-frame\", gold.serial, gold.name, got)\n\t\t}\n\t\tif got := u.Function().String(); got != gold.name {\n\t\t\tt.Errorf(\"%s(%s): got function %q\", gold.serial, gold.name, got)\n\t\t}\n\t\tif got := hex.EncodeToString(u[:]); !strings.HasPrefix(got, gold.serial) {\n\t\t\tt.Errorf(\"%s(%s): got serial 0x%s\", gold.serial, gold.name, got)\n\t\t}\n\n\t\tvar buf bytes.Buffer\n\t\tif n, err := u.Marshal(&buf, 0); n != 6 || err != nil {\n\t\t\tt.Errorf(\"%s(%s): marshall returned (%d, %#v)\", gold.serial, gold.name, n, err)\n\t\t}\n\t\tif got := hex.EncodeToString(buf.Bytes()); got != gold.serial {\n\t\t\tt.Errorf(\"%s(%s): marshalled 0x%s\", gold.serial, gold.name, got)\n\t\t}\n\t}\n}", "func TestGetPartSizeFromIdx(t *testing.T) {\n\t// Create test cases\n\ttestCases := []struct {\n\t\ttotalSize int64\n\t\tpartSize int64\n\t\tpartIndex int\n\t\texpectedSize int64\n\t}{\n\t\t// Total size is zero\n\t\t{0, 10, 1, 0},\n\t\t// part size 2MiB, total size 4MiB\n\t\t{4 * humanize.MiByte, 2 * humanize.MiByte, 1, 2 * humanize.MiByte},\n\t\t{4 * humanize.MiByte, 2 * humanize.MiByte, 2, 2 * humanize.MiByte},\n\t\t{4 * humanize.MiByte, 2 * humanize.MiByte, 3, 0},\n\t\t// part size 2MiB, total size 5MiB\n\t\t{5 * humanize.MiByte, 2 * humanize.MiByte, 1, 2 * humanize.MiByte},\n\t\t{5 * humanize.MiByte, 2 * humanize.MiByte, 2, 2 * humanize.MiByte},\n\t\t{5 * humanize.MiByte, 2 * humanize.MiByte, 3, 1 * humanize.MiByte},\n\t\t{5 * humanize.MiByte, 2 * humanize.MiByte, 4, 0},\n\t}\n\n\tfor i, testCase := range testCases {\n\t\ts, err := calculatePartSizeFromIdx(context.Background(), testCase.totalSize, testCase.partSize, testCase.partIndex)\n\t\tif err != nil {\n\t\t\tt.Errorf(\"Test %d: Expected to pass but failed. %s\", i+1, err)\n\t\t}\n\t\tif err == nil && s != testCase.expectedSize {\n\t\t\tt.Errorf(\"Test %d: The calculated part size is incorrect: expected = %d, found = %d\\n\", i+1, testCase.expectedSize, s)\n\t\t}\n\t}\n\n\ttestCasesFailure := []struct {\n\t\ttotalSize int64\n\t\tpartSize int64\n\t\tpartIndex int\n\t\terr error\n\t}{\n\t\t// partSize is 0, returns error.\n\t\t{10, 0, 1, errPartSizeZero},\n\t\t// partIndex is 0, returns error.\n\t\t{10, 1, 0, errPartSizeIndex},\n\t\t// Total size is -1, returns error.\n\t\t{-2, 10, 1, errInvalidArgument},\n\t}\n\n\tfor i, testCaseFailure := range testCasesFailure {\n\t\t_, err := calculatePartSizeFromIdx(context.Background(), testCaseFailure.totalSize, testCaseFailure.partSize, testCaseFailure.partIndex)\n\t\tif err == nil {\n\t\t\tt.Errorf(\"Test %d: Expected to failed but passed. %s\", i+1, err)\n\t\t}\n\t\tif err != nil && err != testCaseFailure.err {\n\t\t\tt.Errorf(\"Test %d: Expected err %s, but got %s\", i+1, testCaseFailure.err, err)\n\t\t}\n\t}\n}", "func TestMyInfoProtocolFunctions(t *testing.T) {\n\toptions := &Options{\n\t\tHasHeader: true,\n\t\tRecordDelimiter: \"\\n\",\n\t\tFieldDelimiter: \",\",\n\t\tComments: \"\",\n\t\tName: \"S3Object\", // Default table name for all objects\n\t\tReadFrom: bytes.NewReader([]byte(\"name1,name2,name3,name4\" + \"\\n\" + \"5,is,a,string\" + \"\\n\" + \"random,random,stuff,stuff\")),\n\t\tCompressed: \"\",\n\t\tExpression: \"\",\n\t\tOutputFieldDelimiter: \",\",\n\t\tStreamSize: 20,\n\t}\n\ts3s, err := NewInput(options)\n\tif err != nil {\n\t\tt.Error(err)\n\t}\n\tmyVal, _ := s3s.createStatXML()\n\tmyOtherVal, _ := s3s.createProgressXML()\n\n\ttables := []struct {\n\t\tpayloadStatMsg string\n\t\tpayloadProgressMsg string\n\t\texpectedStat int\n\t\texpectedProgress int\n\t}{\n\t\t{myVal, myOtherVal, 233, 243},\n\t}\n\tfor _, table := range tables {\n\t\tvar currBuf = &bytes.Buffer{}\n\t\tif len(s3s.writeStatMessage(table.payloadStatMsg, currBuf).Bytes()) != table.expectedStat {\n\t\t\tt.Error()\n\t\t}\n\t\tcurrBuf.Reset()\n\t\tif len(s3s.writeProgressMessage(table.payloadProgressMsg, currBuf).Bytes()) != table.expectedProgress {\n\t\t\tt.Error()\n\t\t}\n\t}\n}", "func isBound(pfn unsafe.Pointer, fn string) string {\n\tinc := \" \"\n\tif pfn != nil {\n\t\tinc = \"+\"\n\t}\n\treturn fmt.Sprintf(\" [%s] %s\", inc, fn)\n}", "func TestSizeof(t *testing.T) {\n\tconst _64bit = ^uint(0)>>32 != 0\n\n\tvar tests = []struct {\n\t\tval any // type as a value\n\t\t_32bit uintptr // size on 32bit platforms\n\t\t_64bit uintptr // size on 64bit platforms\n\t}{\n\t\t// Types\n\t\t{Basic{}, 16, 32},\n\t\t{Array{}, 16, 24},\n\t\t{Slice{}, 8, 16},\n\t\t{Struct{}, 24, 48},\n\t\t{Pointer{}, 8, 16},\n\t\t{Tuple{}, 12, 24},\n\t\t{Signature{}, 28, 56},\n\t\t{Union{}, 12, 24},\n\t\t{Interface{}, 44, 88},\n\t\t{Map{}, 16, 32},\n\t\t{Chan{}, 12, 24},\n\t\t{Named{}, 56, 104},\n\t\t{TypeParam{}, 28, 48},\n\t\t{term{}, 12, 24},\n\n\t\t// Objects\n\t\t{PkgName{}, 48, 88},\n\t\t{Const{}, 48, 88},\n\t\t{TypeName{}, 40, 72},\n\t\t{Var{}, 44, 80},\n\t\t{Func{}, 44, 80},\n\t\t{Label{}, 44, 80},\n\t\t{Builtin{}, 44, 80},\n\t\t{Nil{}, 40, 72},\n\n\t\t// Misc\n\t\t{Scope{}, 44, 88},\n\t\t{Package{}, 40, 80},\n\t\t{_TypeSet{}, 28, 56},\n\t}\n\tfor _, test := range tests {\n\t\tgot := reflect.TypeOf(test.val).Size()\n\t\twant := test._32bit\n\t\tif _64bit {\n\t\t\twant = test._64bit\n\t\t}\n\t\tif got != want {\n\t\t\tt.Errorf(\"unsafe.Sizeof(%T) = %d, want %d\", test.val, got, want)\n\t\t}\n\t}\n}", "func TestOrganizingContainerFirstGivenCase(t *testing.T) {\n\n\tsize := int32(2)\n\tcontainer := [][]int32{{1, 1}, {1, 1}}\n\texpected := \"Possible\"\n\n\tresult := organizingContainers(container, size)\n\n\tif result != expected {\n\t\tt.Errorf(\"Organizing container first case was incorrect, got: %s, want: %s.\", result, expected)\n\t}\n}", "func TestComplexTypes(t *testing.T) {\n\n}", "func (da *DoubleArray) _decideBaseOffset(firstChars []uint8, existsTerminator bool, offset uint8, rootIndex uint32, baseSearchOffset uint32) (uint32, uint32) {\n for {\n if baseSearchOffset >= uint32(len(da.Base)) {\n da._resizeDoubleArray()\n }\n if da.Check[baseSearchOffset] == 0 {\n break\n }\n baseSearchOffset++\n }\n var baseOffset uint32\n if baseSearchOffset <= charIndexCount + 2 {\n baseOffset = 2\n } else {\n baseOffset = baseSearchOffset - charIndexCount\n }\n for {\n if baseOffset + charIndexCount >= uint32(len(da.Base)) {\n da._resizeDoubleArray()\n }\n if !da._checkCollision(firstChars, existsTerminator, baseOffset) {\n // 衝突しない場合\n var i uint32\n for i = 1; i < charIndexCount; i++ {\n if firstChars[i] != 0 {\n da.Check[baseOffset + i] = rootIndex\n }\n }\n if existsTerminator {\n da.Check[baseOffset + charIndexCount] = rootIndex\n }\n\t\t\t//daCount++\n\t\t\t//if daCount % 1000 == 0 {\n\t\t\t//\tfmt.Printf(\"DEBUG decideBaseOffset %d %d %d\\n\", daCount, baseOffset, baseSearchOffset)\n\t\t\t//}\n return baseOffset, baseSearchOffset\n }\n baseOffset++\n }\n}", "func TestAssertEqualBytes(t *testing.T) {\n\tdata := []byte{9, 9, 1, 1, 1, 9, 9}\n\tassertBytesEqual(t, data, data, \"Self\")\n\tassertBytesEqual(t, data[1:4], data[1:4], \"Self\")\n\tassertBytesEqual(t, []byte{1, 1}, []byte{1, 1}, \"Simple match\")\n\tassertBytesEqual(t, []byte{1, 2, 3}, []byte{1, 2, 3}, \"content mismatch\")\n\tassertBytesEqual(t, []byte{1, 1, 1}, data[2:5], \"slice match\")\n}", "func TestCommitOverwrite4A(t *testing.T) {\n}", "func TestEx009(t *testing.T) {\n\n\tinput := []string{\"Hello world\", \"Practice makes perfect\"}\n\twant := []string{\"HELLO WORLD\", \"PRACTICE MAKES PERFECT\"}\n\n\tgot := Ex009(input)\n\n\tif !reflect.DeepEqual(got, want) {\n\t\tt.Errorf(\"Ex009() = %v, want %v\", got, want)\n\t}\n}", "func TestHashOrder(t *testing.T) {\n\ttestCases := []struct {\n\t\tobjectName string\n\t\thashedOrder []int\n\t}{\n\t\t// cases which should pass the test.\n\t\t// passing in valid object name.\n\t\t{\"object\", []int{14, 15, 16, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13}},\n\t\t{\"The Shining Script <v1>.pdf\", []int{16, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15}},\n\t\t{\"Cost Benefit Analysis (2009-2010).pptx\", []int{15, 16, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14}},\n\t\t{\"117Gn8rfHL2ACARPAhaFd0AGzic9pUbIA/5OCn5A\", []int{3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 1, 2}},\n\t\t{\"SHØRT\", []int{11, 12, 13, 14, 15, 16, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10}},\n\t\t{\"There are far too many object names, and far too few bucket names!\", []int{15, 16, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14}},\n\t\t{\"a/b/c/\", []int{3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 1, 2}},\n\t\t{\"/a/b/c\", []int{6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 1, 2, 3, 4, 5}},\n\t\t{string([]byte{0xff, 0xfe, 0xfd}), []int{15, 16, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14}},\n\t}\n\n\t// Tests hashing order to be consistent.\n\tfor i, testCase := range testCases {\n\t\thashedOrder := hashOrder(testCase.objectName, 16)\n\t\tif !reflect.DeepEqual(testCase.hashedOrder, hashedOrder) {\n\t\t\tt.Errorf(\"Test case %d: Expected \\\"%v\\\" but failed \\\"%v\\\"\", i+1, testCase.hashedOrder, hashedOrder)\n\t\t}\n\t}\n\n\t// Tests hashing order to fail for when order is '-1'.\n\tif hashedOrder := hashOrder(\"This will fail\", -1); hashedOrder != nil {\n\t\tt.Errorf(\"Test: Expect \\\"nil\\\" but failed \\\"%#v\\\"\", hashedOrder)\n\t}\n\n\tif hashedOrder := hashOrder(\"This will fail\", 0); hashedOrder != nil {\n\t\tt.Errorf(\"Test: Expect \\\"nil\\\" but failed \\\"%#v\\\"\", hashedOrder)\n\t}\n}", "func TestGetStackFramePanicsOnLargeSkip(t *testing.T) {\n\tdefer func() {\n\t\tif r := recover(); r == nil {\n\t\t\tt.Error(\"The code did not panic\")\n\t\t}\n\t}()\n\n\tgetStackFrame(1024)\n}" ]
[ "0.58128554", "0.5600196", "0.5483316", "0.54533887", "0.53604615", "0.5296184", "0.5257959", "0.52424866", "0.5234794", "0.5217169", "0.51338357", "0.5128234", "0.50885046", "0.50372916", "0.50313133", "0.5021976", "0.50210726", "0.50012547", "0.4976728", "0.49484867", "0.49470112", "0.49395767", "0.48993504", "0.48874468", "0.48855922", "0.48663172", "0.4862494", "0.48540965", "0.4853781", "0.48303378", "0.48283765", "0.48259562", "0.4819555", "0.4817978", "0.48173264", "0.4816915", "0.48144206", "0.48132545", "0.48096505", "0.47994107", "0.4783619", "0.47740337", "0.47721437", "0.47710946", "0.4766907", "0.47577733", "0.47563154", "0.47563154", "0.47563154", "0.47563154", "0.4754849", "0.47450683", "0.4742857", "0.47419158", "0.4738432", "0.4732173", "0.4731986", "0.47292504", "0.472838", "0.4725444", "0.47249064", "0.47237834", "0.47228596", "0.47185996", "0.47148374", "0.47082916", "0.4703136", "0.46946067", "0.46917742", "0.46915746", "0.46862042", "0.46802834", "0.4678288", "0.467426", "0.46715504", "0.4668588", "0.46682635", "0.46652064", "0.4646371", "0.46460423", "0.46460298", "0.4645558", "0.46422854", "0.46414894", "0.46400216", "0.46396726", "0.46385616", "0.4634918", "0.46218857", "0.461894", "0.46180803", "0.46176884", "0.46168384", "0.4613383", "0.4611469", "0.46015882", "0.4596192", "0.45908841", "0.45853934", "0.4582812", "0.45796448" ]
0.0
-1
Deprecated: Use Request.ProtoReflect.Descriptor instead.
func (*Request) Descriptor() ([]byte, []int) { return file_imgCode_proto_rawDescGZIP(), []int{0} }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (*ModifyRequest) Descriptor() ([]byte, []int) {\n\treturn file_proto_engine_proto_rawDescGZIP(), []int{10}\n}", "func (*PatchCollectorsRequest) Descriptor() ([]byte, []int) {\n\treturn file_proto_clarifai_api_service_proto_rawDescGZIP(), []int{161}\n}", "func (*AddPeerRequest) Descriptor() ([]byte, []int) {\n\treturn file_github_com_yahuizhan_dappley_metrics_go_api_rpc_pb_rpc_proto_rawDescGZIP(), []int{8}\n}", "func (*WebhookRequest) Descriptor() ([]byte, []int) {\n\treturn file_google_cloud_dialogflow_v2beta1_webhook_proto_rawDescGZIP(), []int{0}\n}", "func (*GetRequest) Descriptor() ([]byte, []int) {\n\treturn file_proto_comments_proto_rawDescGZIP(), []int{3}\n}", "func (*GetPeerInfoRequest) Descriptor() ([]byte, []int) {\n\treturn file_github_com_yahuizhan_dappley_metrics_go_api_rpc_pb_rpc_proto_rawDescGZIP(), []int{6}\n}", "func (*GetServiceRequest) Descriptor() ([]byte, []int) {\n\treturn file_google_appengine_v1_appengine_proto_rawDescGZIP(), []int{6}\n}", "func (*ListenRequest) Descriptor() ([]byte, []int) {\n\treturn file_faultinjector_proto_rawDescGZIP(), []int{8}\n}", "func (*Request) Descriptor() ([]byte, []int) {\n\treturn file_helloworld_helloworld_proto_rawDescGZIP(), []int{2}\n}", "func (*UpdateRequest) Descriptor() ([]byte, []int) {\n\treturn file_teams_v1_teams_proto_rawDescGZIP(), []int{5}\n}", "func (*Request) Descriptor() ([]byte, []int) {\n\treturn file_msgs_msgs_proto_rawDescGZIP(), []int{14}\n}", "func (*DeleteRequest) Descriptor() ([]byte, []int) {\n\treturn file_dictybase_api_jsonapi_request_proto_rawDescGZIP(), []int{7}\n}", "func (*DescribeRequest) Descriptor() ([]byte, []int) {\n\treturn file_proto_engine_proto_rawDescGZIP(), []int{4}\n}", "func (x *fastReflection_QueryParamsRequest) Descriptor() protoreflect.MessageDescriptor {\n\treturn md_QueryParamsRequest\n}", "func (*UpdatePermissionRequest) Descriptor() ([]byte, []int) {\n\treturn file_pkg_role_pb_request_proto_rawDescGZIP(), []int{9}\n}", "func (*DiagnoseRequest) Descriptor() ([]byte, []int) {\n\treturn file_proto_api_proto_rawDescGZIP(), []int{16}\n}", "func (*PaqueteRequest) Descriptor() ([]byte, []int) {\n\treturn file_helloworld_helloworld_proto_rawDescGZIP(), []int{2}\n}", "func (*WatchRequestTypeProto) Descriptor() ([]byte, []int) {\n\treturn file_raft_proto_rawDescGZIP(), []int{25}\n}", "func (*CMsgLoadedRequest) Descriptor() ([]byte, []int) {\n\treturn file_steam_htmlmessages_proto_rawDescGZIP(), []int{46}\n}", "func (*Request) Descriptor() ([]byte, []int) {\n\treturn file_api_protobuf_spec_example_example_proto_rawDescGZIP(), []int{1}\n}", "func (*CodeLensRequest) Descriptor() ([]byte, []int) {\n\treturn file_protocol_rpc_rpc_proto_rawDescGZIP(), []int{163}\n}", "func (*UpdateRequest) Descriptor() ([]byte, []int) {\n\treturn file_proto_contact_proto_rawDescGZIP(), []int{12}\n}", "func (*UpdateRequest) Descriptor() ([]byte, []int) {\n\treturn file_recordwants_proto_rawDescGZIP(), []int{6}\n}", "func (*FeedbackRequest) Descriptor() ([]byte, []int) {\n\treturn file_ssn_dataservice_v1_dataservice_proto_rawDescGZIP(), []int{10}\n}", "func (*Request) Descriptor() ([]byte, []int) {\n\treturn file_pkg_smgrpc_smgrpc_proto_rawDescGZIP(), []int{0}\n}", "func (*RequestPresentationRequest) Descriptor() ([]byte, []int) {\n\treturn file_messages_proto_rawDescGZIP(), []int{0}\n}", "func (*ListenRequest) Descriptor() ([]byte, []int) {\n\treturn file_threads_proto_rawDescGZIP(), []int{46}\n}", "func (*MetricsRequest) Descriptor() ([]byte, []int) {\n\treturn file_ssn_dataservice_v1_dataservice_proto_rawDescGZIP(), []int{11}\n}", "func (*ProxyRequest) Descriptor() ([]byte, []int) {\n\treturn file_proto_url_proto_rawDescGZIP(), []int{5}\n}", "func (*LogMessageRequest) Descriptor() ([]byte, []int) {\n\treturn file_protocol_rpc_rpc_proto_rawDescGZIP(), []int{59}\n}", "func (*Request) Descriptor() ([]byte, []int) {\n\treturn file_internal_services_profile_proto_profile_proto_rawDescGZIP(), []int{0}\n}", "func (*ProxyRequest) Descriptor() ([]byte, []int) {\n\treturn file_proto_sample_proto_rawDescGZIP(), []int{4}\n}", "func (*ValidateRequest) Descriptor() ([]byte, []int) {\n\treturn file_protobuf_clusrun_proto_rawDescGZIP(), []int{17}\n}", "func (*CMsgClientToGCPlayerStatsRequest) Descriptor() ([]byte, []int) {\n\treturn file_dota_gcmessages_client_proto_rawDescGZIP(), []int{143}\n}", "func (*TelemetryRequest) Descriptor() ([]byte, []int) {\n\treturn file_automate_gateway_api_telemetry_telemetry_proto_rawDescGZIP(), []int{0}\n}", "func (*DebugInstanceRequest) Descriptor() ([]byte, []int) {\n\treturn file_google_appengine_v1_appengine_proto_rawDescGZIP(), []int{19}\n}", "func (*PatchAnnotationsRequest) Descriptor() ([]byte, []int) {\n\treturn file_proto_clarifai_api_service_proto_rawDescGZIP(), []int{4}\n}", "func (*Request) Descriptor() ([]byte, []int) {\n\treturn file_Trd_ModifyOrder_proto_rawDescGZIP(), []int{2}\n}", "func (*UpdateConversationRequest) Descriptor() ([]byte, []int) {\n\treturn file_proto_threads_proto_rawDescGZIP(), []int{8}\n}", "func (*EndpointRequest) Descriptor() ([]byte, []int) {\n\treturn file_messages_proto_rawDescGZIP(), []int{13}\n}", "func (*GeneratedRequest) Descriptor() ([]byte, []int) {\n\treturn file_proto_auth_proto_rawDescGZIP(), []int{0}\n}", "func (*GetCollectorRequest) Descriptor() ([]byte, []int) {\n\treturn file_proto_clarifai_api_service_proto_rawDescGZIP(), []int{163}\n}", "func (*CMsgGCPlayerInfoRequest) Descriptor() ([]byte, []int) {\n\treturn file_dota_gcmessages_client_proto_rawDescGZIP(), []int{117}\n}", "func (*QueryPermissionRequest) Descriptor() ([]byte, []int) {\n\treturn file_pkg_permission_pb_request_proto_rawDescGZIP(), []int{0}\n}", "func (*GetRequest) Descriptor() ([]byte, []int) {\n\treturn file_dictybase_api_jsonapi_request_proto_rawDescGZIP(), []int{0}\n}", "func (*ChangeRequest) Descriptor() ([]byte, []int) {\n\treturn file_authorization_proto_rawDescGZIP(), []int{0}\n}", "func (*Request) Descriptor() ([]byte, []int) {\n\treturn file_protobuf_newfindmaxpb_newfindmaxpb_proto_rawDescGZIP(), []int{0}\n}", "func ProtoFromDescriptor(d protoreflect.Descriptor) proto.Message {\n\tswitch d := d.(type) {\n\tcase protoreflect.FileDescriptor:\n\t\treturn ProtoFromFileDescriptor(d)\n\tcase protoreflect.MessageDescriptor:\n\t\treturn ProtoFromMessageDescriptor(d)\n\tcase protoreflect.FieldDescriptor:\n\t\treturn ProtoFromFieldDescriptor(d)\n\tcase protoreflect.OneofDescriptor:\n\t\treturn ProtoFromOneofDescriptor(d)\n\tcase protoreflect.EnumDescriptor:\n\t\treturn ProtoFromEnumDescriptor(d)\n\tcase protoreflect.EnumValueDescriptor:\n\t\treturn ProtoFromEnumValueDescriptor(d)\n\tcase protoreflect.ServiceDescriptor:\n\t\treturn ProtoFromServiceDescriptor(d)\n\tcase protoreflect.MethodDescriptor:\n\t\treturn ProtoFromMethodDescriptor(d)\n\tdefault:\n\t\t// WTF??\n\t\tif res, ok := d.(DescriptorProtoWrapper); ok {\n\t\t\treturn res.AsProto()\n\t\t}\n\t\treturn nil\n\t}\n}", "func (*AddRequest) Descriptor() ([]byte, []int) {\n\treturn file_grpc_calculator_proto_calc_proto_rawDescGZIP(), []int{0}\n}", "func (*OriginalDetectIntentRequest) Descriptor() ([]byte, []int) {\n\treturn file_google_cloud_dialogflow_v2beta1_webhook_proto_rawDescGZIP(), []int{2}\n}", "func (*AddRequest) Descriptor() ([]byte, []int) {\n\treturn file_proto_calculate_proto_rawDescGZIP(), []int{3}\n}", "func (*Request) Descriptor() ([]byte, []int) {\n\treturn file_example_proto_rawDescGZIP(), []int{1}\n}", "func (*DescribePermissionRequest) Descriptor() ([]byte, []int) {\n\treturn file_pkg_role_pb_request_proto_rawDescGZIP(), []int{6}\n}", "func (*GetRequest) Descriptor() ([]byte, []int) {\n\treturn file_proto_user_proto_rawDescGZIP(), []int{2}\n}", "func (*GetRequest) Descriptor() ([]byte, []int) {\n\treturn file_grpc_exercicio_proto_rawDescGZIP(), []int{3}\n}", "func (*DeleteRequest) Descriptor() ([]byte, []int) {\n\treturn file_teams_v1_teams_proto_rawDescGZIP(), []int{10}\n}", "func (*RelationshipRequest) Descriptor() ([]byte, []int) {\n\treturn file_dictybase_api_jsonapi_request_proto_rawDescGZIP(), []int{3}\n}", "func (*ModelControlRequest) Descriptor() ([]byte, []int) {\n\treturn file_grpc_service_proto_rawDescGZIP(), []int{4}\n}", "func (*DeleteMicroRequest) Descriptor() ([]byte, []int) {\n\treturn file_pkg_micro_pb_request_proto_rawDescGZIP(), []int{4}\n}", "func (*CMsgProfileRequest) Descriptor() ([]byte, []int) {\n\treturn file_dota_gcmessages_client_proto_rawDescGZIP(), []int{275}\n}", "func (*RefreshRequest) Descriptor() ([]byte, []int) {\n\treturn file_cloudprovider_externalgrpc_protos_externalgrpc_proto_rawDescGZIP(), []int{16}\n}", "func (*ProofRequest) Descriptor() ([]byte, []int) {\n\treturn file_messages_proto_rawDescGZIP(), []int{35}\n}", "func (x *fastReflection_AddressStringToBytesRequest) Descriptor() protoreflect.MessageDescriptor {\n\treturn md_AddressStringToBytesRequest\n}", "func (*DescribeMicroRequest) Descriptor() ([]byte, []int) {\n\treturn file_pkg_micro_pb_request_proto_rawDescGZIP(), []int{3}\n}", "func (*Request) Descriptor() ([]byte, []int) {\n\treturn file_kv_proto_rawDescGZIP(), []int{0}\n}", "func (*CreateAlterRequest) Descriptor() ([]byte, []int) {\n\treturn file_grpc_exercicio_proto_rawDescGZIP(), []int{1}\n}", "func (*GetRequest) Descriptor() ([]byte, []int) {\n\treturn file_index_faults_rpc_rpc_proto_rawDescGZIP(), []int{2}\n}", "func (*LanguageDetectorRequest) Descriptor() ([]byte, []int) {\n\treturn file_proto_language_proto_rawDescGZIP(), []int{1}\n}", "func (*Request) Descriptor() ([]byte, []int) {\n\treturn file_example_proto_rawDescGZIP(), []int{0}\n}", "func (x *fastReflection_AddressBytesToStringRequest) Descriptor() protoreflect.MessageDescriptor {\n\treturn md_AddressBytesToStringRequest\n}", "func (*Request) Descriptor() ([]byte, []int) {\n\treturn file_ocis_messages_policies_v0_policies_proto_rawDescGZIP(), []int{2}\n}", "func (*BatchUpdateReferencesRequest_Request) Descriptor() ([]byte, []int) {\n\treturn file_pkg_proto_icas_icas_proto_rawDescGZIP(), []int{1, 0}\n}", "func (*QueryMicroRequest) Descriptor() ([]byte, []int) {\n\treturn file_pkg_micro_pb_request_proto_rawDescGZIP(), []int{2}\n}", "func (*DeleteRequest) Descriptor() ([]byte, []int) {\n\treturn file_grpc_exercicio_proto_rawDescGZIP(), []int{7}\n}", "func (*DelRequest) Descriptor() ([]byte, []int) {\n\treturn file_patrol_proto_rawDescGZIP(), []int{8}\n}", "func (*GetVersionRequest) Descriptor() ([]byte, []int) {\n\treturn file_github_com_yahuizhan_dappley_metrics_go_api_rpc_pb_rpc_proto_rawDescGZIP(), []int{9}\n}", "func (*ChangeRequest) Descriptor() ([]byte, []int) {\n\treturn file_management_proto_rawDescGZIP(), []int{2}\n}", "func (*ChangeRequest) Descriptor() ([]byte, []int) {\n\treturn file_management_proto_rawDescGZIP(), []int{2}\n}", "func (*Request) Descriptor() ([]byte, []int) {\n\treturn file_activity_proto_rawDescGZIP(), []int{10}\n}", "func (*CalculatorRequest) Descriptor() ([]byte, []int) {\n\treturn file_basicpb_unary_api_proto_rawDescGZIP(), []int{4}\n}", "func (*RecentMessagesRequest) Descriptor() ([]byte, []int) {\n\treturn file_proto_threads_proto_rawDescGZIP(), []int{16}\n}", "func (*GetRequest) Descriptor() ([]byte, []int) {\n\treturn file_api_collector_collector_proto_rawDescGZIP(), []int{3}\n}", "func (*Request) Descriptor() ([]byte, []int) {\n\treturn file_proto_fandncloud_service_user_user_proto_rawDescGZIP(), []int{10}\n}", "func (*GenerateMessageRequest) Descriptor() ([]byte, []int) {\n\treturn file_google_ai_generativelanguage_v1beta2_discuss_service_proto_rawDescGZIP(), []int{0}\n}", "func (*HelloRequest) Descriptor() ([]byte, []int) {\n\treturn file_proto_hello_proto_rawDescGZIP(), []int{0}\n}", "func (*Request) Descriptor() ([]byte, []int) {\n\treturn file_service_face_detector_proto_rawDescGZIP(), []int{0}\n}", "func (*FindWebhookCallRequest) Descriptor() ([]byte, []int) {\n\treturn file_events_Event_proto_rawDescGZIP(), []int{9}\n}", "func (*SendRequest) Descriptor() ([]byte, []int) {\n\treturn file_github_com_yahuizhan_dappley_metrics_go_api_rpc_pb_rpc_proto_rawDescGZIP(), []int{5}\n}", "func (*CheckPermissionRequest) Descriptor() ([]byte, []int) {\n\treturn file_pkg_permission_pb_request_proto_rawDescGZIP(), []int{2}\n}", "func (*FriendRequest) Descriptor() ([]byte, []int) {\n\treturn file_grpc_user_proto_rawDescGZIP(), []int{5}\n}", "func (*ListRequest) Descriptor() ([]byte, []int) {\n\treturn file_dictybase_api_jsonapi_request_proto_rawDescGZIP(), []int{5}\n}", "func (*DecodeRequest) Descriptor() ([]byte, []int) {\n\treturn file_proto_videoservice_proto_rawDescGZIP(), []int{0}\n}", "func (*UpdateRequest) Descriptor() ([]byte, []int) {\n\treturn file_todo_proto_rawDescGZIP(), []int{5}\n}", "func (*RequestPresentation) Descriptor() ([]byte, []int) {\n\treturn file_messages_proto_rawDescGZIP(), []int{1}\n}", "func (*TodoRequest) Descriptor() ([]byte, []int) {\n\treturn file_protobuf_todolist_proto_rawDescGZIP(), []int{0}\n}", "func (*Request) Descriptor() ([]byte, []int) {\n\treturn file_api_api_proto_rawDescGZIP(), []int{3}\n}", "func (*CBroadcast_WebRTCStopped_Request) Descriptor() ([]byte, []int) {\n\treturn file_steammessages_broadcast_steamclient_proto_rawDescGZIP(), []int{47}\n}", "func (*UpdateRequest) Descriptor() ([]byte, []int) {\n\treturn file_proto_user_proto_rawDescGZIP(), []int{4}\n}", "func (*FindWebhookCallRequest) Descriptor() ([]byte, []int) {\n\treturn file_uac_Event_proto_rawDescGZIP(), []int{7}\n}", "func (*GetRequest) Descriptor() ([]byte, []int) {\n\treturn file_vote_proto_rawDescGZIP(), []int{9}\n}", "func (*ApplyRequest) Descriptor() ([]byte, []int) {\n\treturn file_github_com_containerd_containerd_api_services_diff_v1_diff_proto_rawDescGZIP(), []int{0}\n}" ]
[ "0.7147806", "0.7124804", "0.70896095", "0.7056412", "0.7031377", "0.7022057", "0.6997947", "0.69858086", "0.69592947", "0.6954594", "0.69518965", "0.6942627", "0.69331664", "0.69186646", "0.6911725", "0.69091153", "0.69048876", "0.6903502", "0.6894547", "0.6889418", "0.68856514", "0.6885372", "0.6885275", "0.6881128", "0.6877387", "0.68734556", "0.6868812", "0.68648905", "0.68630534", "0.6863034", "0.68627614", "0.68622017", "0.6861354", "0.6860519", "0.6859189", "0.68537825", "0.6844858", "0.68395555", "0.6839408", "0.6838421", "0.683774", "0.683675", "0.6834337", "0.6832608", "0.683078", "0.6829059", "0.68290013", "0.68288076", "0.6825711", "0.6823474", "0.6821358", "0.68198615", "0.6819196", "0.6818204", "0.6817342", "0.6814559", "0.6814415", "0.6813354", "0.6813189", "0.68081474", "0.6806998", "0.6806831", "0.6804555", "0.6803379", "0.6801659", "0.6796603", "0.67953116", "0.67936856", "0.67921805", "0.6792007", "0.67900026", "0.6788716", "0.6783424", "0.67822903", "0.67822164", "0.67811394", "0.67798626", "0.67798626", "0.6779186", "0.6772742", "0.6771466", "0.6769149", "0.67651516", "0.67649907", "0.67600346", "0.67580795", "0.6755207", "0.675468", "0.67545474", "0.67539096", "0.6750728", "0.67465156", "0.67461246", "0.67407644", "0.67387486", "0.67358804", "0.6735208", "0.6733434", "0.6731891", "0.6730574", "0.67299855" ]
0.0
-1
Deprecated: Use Response.ProtoReflect.Descriptor instead.
func (*Response) Descriptor() ([]byte, []int) { return file_imgCode_proto_rawDescGZIP(), []int{1} }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (*AddPeerResponse) Descriptor() ([]byte, []int) {\n\treturn file_github_com_yahuizhan_dappley_metrics_go_api_rpc_pb_rpc_proto_rawDescGZIP(), []int{30}\n}", "func (x *fastReflection_MsgUpdateParamsResponse) Descriptor() protoreflect.MessageDescriptor {\n\treturn md_MsgUpdateParamsResponse\n}", "func (*GetPeerInfoResponse) Descriptor() ([]byte, []int) {\n\treturn file_github_com_yahuizhan_dappley_metrics_go_api_rpc_pb_rpc_proto_rawDescGZIP(), []int{28}\n}", "func (*ModifyResponse) Descriptor() ([]byte, []int) {\n\treturn file_proto_engine_proto_rawDescGZIP(), []int{11}\n}", "func (*GetMetricsInfoResponse) Descriptor() ([]byte, []int) {\n\treturn file_github_com_yahuizhan_dappley_metrics_go_api_rpc_pb_rpc_proto_rawDescGZIP(), []int{44}\n}", "func (*ListResponse) Descriptor() ([]byte, []int) {\n\treturn file_teams_v1_teams_proto_rawDescGZIP(), []int{1}\n}", "func (*ListenResponse) Descriptor() ([]byte, []int) {\n\treturn file_faultinjector_proto_rawDescGZIP(), []int{9}\n}", "func (*Deprecation) Descriptor() ([]byte, []int) {\n\treturn file_external_cfgmgmt_response_nodes_proto_rawDescGZIP(), []int{8}\n}", "func (*DeleteTeam_Response) Descriptor() ([]byte, []int) {\n\treturn file_uac_Team_proto_rawDescGZIP(), []int{6, 0}\n}", "func (*DeleteResponse) Descriptor() ([]byte, []int) {\n\treturn file_teams_v1_teams_proto_rawDescGZIP(), []int{11}\n}", "func (*DiagnoseResponse) Descriptor() ([]byte, []int) {\n\treturn file_proto_api_proto_rawDescGZIP(), []int{17}\n}", "func (*ListResponse) Descriptor() ([]byte, []int) {\n\treturn file_proto_contact_proto_rawDescGZIP(), []int{15}\n}", "func (*ProtoResponse) Descriptor() ([]byte, []int) {\n\treturn file_user_proto_rawDescGZIP(), []int{2}\n}", "func (*DescribeResponse) Descriptor() ([]byte, []int) {\n\treturn file_proto_engine_proto_rawDescGZIP(), []int{5}\n}", "func (*GetTeamById_Response) Descriptor() ([]byte, []int) {\n\treturn file_uac_Team_proto_rawDescGZIP(), []int{1, 0}\n}", "func (*UpdateResponse) Descriptor() ([]byte, []int) {\n\treturn file_teams_v1_teams_proto_rawDescGZIP(), []int{7}\n}", "func (*GetVersionResponse) Descriptor() ([]byte, []int) {\n\treturn file_github_com_yahuizhan_dappley_metrics_go_api_rpc_pb_rpc_proto_rawDescGZIP(), []int{31}\n}", "func (*MetricsResponse) Descriptor() ([]byte, []int) {\n\treturn file_protobuf_index_proto_rawDescGZIP(), []int{25}\n}", "func (*GetStatsResponse) Descriptor() ([]byte, []int) {\n\treturn file_github_com_yahuizhan_dappley_metrics_go_api_rpc_pb_rpc_proto_rawDescGZIP(), []int{45}\n}", "func (*CreateAlterResponse) Descriptor() ([]byte, []int) {\n\treturn file_grpc_exercicio_proto_rawDescGZIP(), []int{2}\n}", "func (*Response) Descriptor() ([]byte, []int) {\n\treturn file_api_protobuf_spec_example_example_proto_rawDescGZIP(), []int{2}\n}", "func (*RefreshResponse) Descriptor() ([]byte, []int) {\n\treturn file_cloudprovider_externalgrpc_protos_externalgrpc_proto_rawDescGZIP(), []int{17}\n}", "func (*Response) Descriptor() ([]byte, []int) {\n\treturn file_protobuf_newfindmaxpb_newfindmaxpb_proto_rawDescGZIP(), []int{1}\n}", "func (*ApplyResponse) Descriptor() ([]byte, []int) {\n\treturn file_github_com_containerd_containerd_api_services_diff_v1_diff_proto_rawDescGZIP(), []int{1}\n}", "func (*DelResponse) Descriptor() ([]byte, []int) {\n\treturn file_patrol_proto_rawDescGZIP(), []int{9}\n}", "func (*ListResponse) Descriptor() ([]byte, []int) {\n\treturn file_proto_url_proto_rawDescGZIP(), []int{4}\n}", "func (x *fastReflection_AddressStringToBytesResponse) Descriptor() protoreflect.MessageDescriptor {\n\treturn md_AddressStringToBytesResponse\n}", "func (*FindWebhookCallRequest_Response) Descriptor() ([]byte, []int) {\n\treturn file_uac_Event_proto_rawDescGZIP(), []int{7, 0}\n}", "func (*Response) Descriptor() ([]byte, []int) {\n\treturn file_helloworld_helloworld_proto_rawDescGZIP(), []int{3}\n}", "func (*GetTeamByName_Response) Descriptor() ([]byte, []int) {\n\treturn file_uac_Team_proto_rawDescGZIP(), []int{2, 0}\n}", "func (*ApiVersionResponse) Descriptor() ([]byte, []int) {\n\treturn file_api_ocp_check_api_ocp_check_api_proto_rawDescGZIP(), []int{14}\n}", "func (*FindWebhookCallRequest_Response) Descriptor() ([]byte, []int) {\n\treturn file_events_Event_proto_rawDescGZIP(), []int{9, 0}\n}", "func (*AddPeerRequest) Descriptor() ([]byte, []int) {\n\treturn file_github_com_yahuizhan_dappley_metrics_go_api_rpc_pb_rpc_proto_rawDescGZIP(), []int{8}\n}", "func (*DeleteResponse) Descriptor() ([]byte, []int) {\n\treturn file_grpc_exercicio_proto_rawDescGZIP(), []int{8}\n}", "func (x *fastReflection_QueryParamsResponse) Descriptor() protoreflect.MessageDescriptor {\n\treturn md_QueryParamsResponse\n}", "func (*TelemetryResponse) Descriptor() ([]byte, []int) {\n\treturn file_automate_gateway_api_telemetry_telemetry_proto_rawDescGZIP(), []int{1}\n}", "func (*PerformanceResponse) Descriptor() ([]byte, []int) {\n\treturn file_commissionService_proto_rawDescGZIP(), []int{5}\n}", "func (*ListMyTeams_Response) Descriptor() ([]byte, []int) {\n\treturn file_uac_Team_proto_rawDescGZIP(), []int{4, 0}\n}", "func (*ProxyResponse) Descriptor() ([]byte, []int) {\n\treturn file_proto_url_proto_rawDescGZIP(), []int{6}\n}", "func (*UpdateTelemetryReportedResponse) Descriptor() ([]byte, []int) {\n\treturn file_external_applications_applications_proto_rawDescGZIP(), []int{30}\n}", "func (*Response) Descriptor() ([]byte, []int) {\n\treturn file_interservice_notifications_service_events_proto_rawDescGZIP(), []int{10}\n}", "func (*UpdateResponse) Descriptor() ([]byte, []int) {\n\treturn file_recordwants_proto_rawDescGZIP(), []int{7}\n}", "func (*AddResponse) Descriptor() ([]byte, []int) {\n\treturn file_grpc_calculator_proto_calc_proto_rawDescGZIP(), []int{1}\n}", "func (*AddProducerResponse) Descriptor() ([]byte, []int) {\n\treturn file_github_com_yahuizhan_dappley_metrics_go_api_rpc_pb_rpc_proto_rawDescGZIP(), []int{23}\n}", "func (*WebhookResponse) Descriptor() ([]byte, []int) {\n\treturn file_google_cloud_dialogflow_v2beta1_webhook_proto_rawDescGZIP(), []int{1}\n}", "func (*DeleteResponse) Descriptor() ([]byte, []int) {\n\treturn file_github_com_containerd_containerd_runtime_v1_shim_v1_shim_proto_rawDescGZIP(), []int{2}\n}", "func (*SendResponse) Descriptor() ([]byte, []int) {\n\treturn file_github_com_yahuizhan_dappley_metrics_go_api_rpc_pb_rpc_proto_rawDescGZIP(), []int{27}\n}", "func (*PatchCollectorsRequest) Descriptor() ([]byte, []int) {\n\treturn file_proto_clarifai_api_service_proto_rawDescGZIP(), []int{161}\n}", "func (*Response) Descriptor() ([]byte, []int) {\n\treturn file_Notify_proto_rawDescGZIP(), []int{7}\n}", "func (*DecodeReply) Descriptor() ([]byte, []int) {\n\treturn file_proto_videoservice_proto_rawDescGZIP(), []int{1}\n}", "func (*LivenessCheckResponse) Descriptor() ([]byte, []int) {\n\treturn file_protobuf_index_proto_rawDescGZIP(), []int{0}\n}", "func (*GetPeerInfoRequest) Descriptor() ([]byte, []int) {\n\treturn file_github_com_yahuizhan_dappley_metrics_go_api_rpc_pb_rpc_proto_rawDescGZIP(), []int{6}\n}", "func (*ProxyResponse) Descriptor() ([]byte, []int) {\n\treturn file_proto_sample_proto_rawDescGZIP(), []int{5}\n}", "func (*ListResponse) Descriptor() ([]byte, []int) {\n\treturn file_weather_proto_rawDescGZIP(), []int{17}\n}", "func (*DeregisterResponse) Descriptor() ([]byte, []int) {\n\treturn file_proto_engine_proto_rawDescGZIP(), []int{9}\n}", "func (*GetResponse) Descriptor() ([]byte, []int) {\n\treturn file_proto_comments_proto_rawDescGZIP(), []int{4}\n}", "func (*ResponseAssertions) Descriptor() ([]byte, []int) {\n\treturn file_github_com_solo_io_gloo_projects_gloo_api_external_envoy_extensions_advanced_http_advanced_http_proto_rawDescGZIP(), []int{1}\n}", "func (*Response) Descriptor() ([]byte, []int) {\n\treturn file_proto_service_proto_rawDescGZIP(), []int{2}\n}", "func (*Response) Descriptor() ([]byte, []int) {\n\treturn file_example_proto_rawDescGZIP(), []int{1}\n}", "func (*SetTeam_Response) Descriptor() ([]byte, []int) {\n\treturn file_uac_Team_proto_rawDescGZIP(), []int{5, 0}\n}", "func (*ModelControlResponse) Descriptor() ([]byte, []int) {\n\treturn file_grpc_service_proto_rawDescGZIP(), []int{5}\n}", "func (*LanguageDetectorResponse) Descriptor() ([]byte, []int) {\n\treturn file_proto_language_proto_rawDescGZIP(), []int{2}\n}", "func (*HelloResponse) Descriptor() ([]byte, []int) {\n\treturn file_proto_hello_proto_rawDescGZIP(), []int{1}\n}", "func (*GenerateMessageResponse) Descriptor() ([]byte, []int) {\n\treturn file_google_ai_generativelanguage_v1beta2_discuss_service_proto_rawDescGZIP(), []int{1}\n}", "func (*InferResponse) Descriptor() ([]byte, []int) {\n\treturn file_grpc_service_proto_rawDescGZIP(), []int{9}\n}", "func (*MultiCollectorResponse) Descriptor() ([]byte, []int) {\n\treturn file_proto_clarifai_api_service_proto_rawDescGZIP(), []int{165}\n}", "func (*CodeLensResponse) Descriptor() ([]byte, []int) {\n\treturn file_protocol_rpc_rpc_proto_rawDescGZIP(), []int{32}\n}", "func (*Response) Descriptor() ([]byte, []int) {\n\treturn file_Trd_ModifyOrder_proto_rawDescGZIP(), []int{3}\n}", "func (*TypeDefinitionResponse) Descriptor() ([]byte, []int) {\n\treturn file_protocol_rpc_rpc_proto_rawDescGZIP(), []int{26}\n}", "func (*AddResponse) Descriptor() ([]byte, []int) {\n\treturn file_proto_calculate_proto_rawDescGZIP(), []int{4}\n}", "func (*MultiScopeResponse) Descriptor() ([]byte, []int) {\n\treturn file_proto_clarifai_api_service_proto_rawDescGZIP(), []int{111}\n}", "func (x *fastReflection_AddressBytesToStringResponse) Descriptor() protoreflect.MessageDescriptor {\n\treturn md_AddressBytesToStringResponse\n}", "func (*GetSomesResponse) Descriptor() ([]byte, []int) {\n\treturn file_grpc_exercicio_proto_rawDescGZIP(), []int{6}\n}", "func (*TelemetryResponse) Descriptor() ([]byte, []int) {\n\treturn file_interservice_license_control_license_control_proto_rawDescGZIP(), []int{12}\n}", "func (*CreateResponse) Descriptor() ([]byte, []int) {\n\treturn file_teams_v1_teams_proto_rawDescGZIP(), []int{4}\n}", "func (*IntrospectResp) Descriptor() ([]byte, []int) {\n\treturn file_external_iam_v2_response_introspect_proto_rawDescGZIP(), []int{1}\n}", "func (*FindWebhookRequest_Response) Descriptor() ([]byte, []int) {\n\treturn file_events_Event_proto_rawDescGZIP(), []int{3, 0}\n}", "func (*EndpointResponse) Descriptor() ([]byte, []int) {\n\treturn file_messages_proto_rawDescGZIP(), []int{14}\n}", "func (*ShowResponse) Descriptor() ([]byte, []int) {\n\treturn file_teams_v1_teams_proto_rawDescGZIP(), []int{9}\n}", "func (*CollectResponse) Descriptor() ([]byte, []int) {\n\treturn file_orc8r_cloud_go_services_analytics_protos_collector_proto_rawDescGZIP(), []int{1}\n}", "func (*DefinitionResponse) Descriptor() ([]byte, []int) {\n\treturn file_protocol_rpc_rpc_proto_rawDescGZIP(), []int{25}\n}", "func (*RequestPresentationResponse) Descriptor() ([]byte, []int) {\n\treturn file_messages_proto_rawDescGZIP(), []int{4}\n}", "func (*ListResponse) Descriptor() ([]byte, []int) {\n\treturn file_versions_v1_versions_proto_rawDescGZIP(), []int{1}\n}", "func (*FindWebhookRequest_Response) Descriptor() ([]byte, []int) {\n\treturn file_uac_Event_proto_rawDescGZIP(), []int{2, 0}\n}", "func (*GetVersionRequest) Descriptor() ([]byte, []int) {\n\treturn file_github_com_yahuizhan_dappley_metrics_go_api_rpc_pb_rpc_proto_rawDescGZIP(), []int{9}\n}", "func (*DeleteResponse) Descriptor() ([]byte, []int) {\n\treturn file_proto_contact_proto_rawDescGZIP(), []int{11}\n}", "func (*Response) Descriptor() ([]byte, []int) {\n\treturn file_kv_proto_rawDescGZIP(), []int{2}\n}", "func (*MultiScopeDepsResponse) Descriptor() ([]byte, []int) {\n\treturn file_proto_clarifai_api_service_proto_rawDescGZIP(), []int{110}\n}", "func (*UpdateResponse) Descriptor() ([]byte, []int) {\n\treturn file_proto_contact_proto_rawDescGZIP(), []int{13}\n}", "func (*MultiModelVersionResponse) Descriptor() ([]byte, []int) {\n\treturn file_proto_clarifai_api_service_proto_rawDescGZIP(), []int{91}\n}", "func (*ContractQueryResponse) Descriptor() ([]byte, []int) {\n\treturn file_github_com_yahuizhan_dappley_metrics_go_api_rpc_pb_rpc_proto_rawDescGZIP(), []int{50}\n}", "func (*WatchResponse) Descriptor() ([]byte, []int) {\n\treturn file_protobuf_index_proto_rawDescGZIP(), []int{24}\n}", "func (*DeleteResponse) Descriptor() ([]byte, []int) {\n\treturn file_proto_engine_proto_rawDescGZIP(), []int{13}\n}", "func (*ListResponse) Descriptor() ([]byte, []int) {\n\treturn file_proto_wallet_proto_rawDescGZIP(), []int{8}\n}", "func (*MoneyResponse) Descriptor() ([]byte, []int) {\n\treturn file_proto_swap_swap_proto_rawDescGZIP(), []int{1}\n}", "func (*UpdateConversationResponse) Descriptor() ([]byte, []int) {\n\treturn file_proto_threads_proto_rawDescGZIP(), []int{9}\n}", "func (*ListResponse) Descriptor() ([]byte, []int) {\n\treturn file_v1_proto_rawDescGZIP(), []int{7}\n}", "func (*InvokeResponse) Descriptor() ([]byte, []int) {\n\treturn file_runtime_proto_rawDescGZIP(), []int{19}\n}", "func (*RenameResponse) Descriptor() ([]byte, []int) {\n\treturn file_protocol_rpc_rpc_proto_rawDescGZIP(), []int{43}\n}", "func (*DeleteResponse) Descriptor() ([]byte, []int) {\n\treturn file_versions_v1_versions_proto_rawDescGZIP(), []int{11}\n}", "func (*DiffResponse) Descriptor() ([]byte, []int) {\n\treturn file_github_com_containerd_containerd_api_services_diff_v1_diff_proto_rawDescGZIP(), []int{3}\n}" ]
[ "0.70776343", "0.7037685", "0.69777745", "0.69575745", "0.69492024", "0.6912672", "0.6911184", "0.6866258", "0.68466765", "0.68400687", "0.6833484", "0.6826143", "0.6821017", "0.68039095", "0.6800226", "0.6782455", "0.6780943", "0.67771107", "0.6770286", "0.6766262", "0.67604357", "0.6756529", "0.67546993", "0.67421514", "0.6738973", "0.6736974", "0.6731556", "0.67294997", "0.6726442", "0.67242944", "0.67217934", "0.67212325", "0.67160594", "0.67139184", "0.670788", "0.6705832", "0.66989803", "0.66911787", "0.6688876", "0.66844636", "0.66767836", "0.6674839", "0.667177", "0.6667627", "0.6666151", "0.6662951", "0.6662548", "0.66603416", "0.66602755", "0.66577977", "0.66561866", "0.665503", "0.66531926", "0.66530865", "0.6652563", "0.66521174", "0.66511434", "0.66507447", "0.6649954", "0.66482997", "0.6647302", "0.66459197", "0.66439563", "0.6640049", "0.66388535", "0.66382515", "0.66380906", "0.6637719", "0.6637143", "0.66369885", "0.66367143", "0.66339654", "0.66300756", "0.6629922", "0.66277796", "0.6625953", "0.6625387", "0.6623242", "0.66210884", "0.6621039", "0.6620579", "0.6620527", "0.66200835", "0.6618623", "0.6616937", "0.66143703", "0.66129565", "0.6612325", "0.66101927", "0.66068566", "0.6605239", "0.6604039", "0.6603727", "0.6601358", "0.6600879", "0.6599703", "0.65958065", "0.65941876", "0.6591333", "0.65903", "0.65896887" ]
0.0
-1
Deprecated: Use CheckRequest.ProtoReflect.Descriptor instead.
func (*CheckRequest) Descriptor() ([]byte, []int) { return file_imgCode_proto_rawDescGZIP(), []int{2} }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (*CheckRequest) Descriptor() ([]byte, []int) {\n\treturn file_google_api_servicecontrol_v1_service_controller_proto_rawDescGZIP(), []int{0}\n}", "func (*DescribeCheckRequest) Descriptor() ([]byte, []int) {\n\treturn file_api_ocp_check_api_ocp_check_api_proto_rawDescGZIP(), []int{2}\n}", "func (*CheckPermissionRequest) Descriptor() ([]byte, []int) {\n\treturn file_pkg_permission_pb_request_proto_rawDescGZIP(), []int{2}\n}", "func (*CheckRequest) Descriptor() ([]byte, []int) {\n\treturn file_authzed_api_v0_acl_service_proto_rawDescGZIP(), []int{5}\n}", "func (*RemoveCheckRequest) Descriptor() ([]byte, []int) {\n\treturn file_api_ocp_check_api_ocp_check_api_proto_rawDescGZIP(), []int{10}\n}", "func (*CreateCheckRequest) Descriptor() ([]byte, []int) {\n\treturn file_api_ocp_check_api_ocp_check_api_proto_rawDescGZIP(), []int{4}\n}", "func (*UpdateCheckRequest) Descriptor() ([]byte, []int) {\n\treturn file_api_ocp_check_api_ocp_check_api_proto_rawDescGZIP(), []int{8}\n}", "func (*ListChecksRequest) Descriptor() ([]byte, []int) {\n\treturn file_api_ocp_check_api_ocp_check_api_proto_rawDescGZIP(), []int{0}\n}", "func (*ValidateRequest) Descriptor() ([]byte, []int) {\n\treturn file_validation_v1_validation_api_proto_rawDescGZIP(), []int{0}\n}", "func (*ValidateRequest) Descriptor() ([]byte, []int) {\n\treturn file_protobuf_clusrun_proto_rawDescGZIP(), []int{17}\n}", "func (*UpdateCheckerV1Request) Descriptor() ([]byte, []int) {\n\treturn file_checker_v1_proto_rawDescGZIP(), []int{2}\n}", "func (*ValidateRequest) Descriptor() ([]byte, []int) {\n\treturn file_cso_v1_validator_api_proto_rawDescGZIP(), []int{0}\n}", "func (*CheckCodeRequest) Descriptor() ([]byte, []int) {\n\treturn file_pkg_verifycode_pb_request_proto_rawDescGZIP(), []int{2}\n}", "func (*HealthCheckRequest) Descriptor() ([]byte, []int) {\n\treturn file_internal_proto_files_domain_probes_proto_rawDescGZIP(), []int{0}\n}", "func (*VerifyRequest) Descriptor() ([]byte, []int) {\n\treturn file_threads_proto_rawDescGZIP(), []int{27}\n}", "func (*ValidateRequest) Descriptor() ([]byte, []int) {\n\treturn file_validate_proto_rawDescGZIP(), []int{0}\n}", "func (*CheckTokenRequest) Descriptor() ([]byte, []int) {\n\treturn file_check_token_proto_rawDescGZIP(), []int{0}\n}", "func (*CAccountHardware_VRCompatibilityCheck_Request) Descriptor() ([]byte, []int) {\n\treturn file_steammessages_accounthardware_steamclient_proto_rawDescGZIP(), []int{19}\n}", "func (*HealthCheckRequest) Descriptor() ([]byte, []int) {\n\treturn file_service_greeter_proto_health_health_proto_rawDescGZIP(), []int{0}\n}", "func (*CheckTokenRequest) Descriptor() ([]byte, []int) {\n\treturn file_service_proto_rawDescGZIP(), []int{3}\n}", "func (*HealthCheckRequest) Descriptor() ([]byte, []int) {\n\treturn file_api_health_service_proto_rawDescGZIP(), []int{0}\n}", "func (*ContentChangeCheckRequest) Descriptor() ([]byte, []int) {\n\treturn file_authzed_api_v0_acl_service_proto_rawDescGZIP(), []int{6}\n}", "func (*GetCheckerV1Request) Descriptor() ([]byte, []int) {\n\treturn file_checker_v1_proto_rawDescGZIP(), []int{6}\n}", "func (*DescribeCheckerV1Request) Descriptor() ([]byte, []int) {\n\treturn file_checker_v1_proto_rawDescGZIP(), []int{10}\n}", "func (*CheckLiveRequest) Descriptor() ([]byte, []int) {\n\treturn file_health_proto_rawDescGZIP(), []int{2}\n}", "func (*UpdatePermissionRequest) Descriptor() ([]byte, []int) {\n\treturn file_pkg_role_pb_request_proto_rawDescGZIP(), []int{9}\n}", "func (*MultiCreateCheckRequest) Descriptor() ([]byte, []int) {\n\treturn file_api_ocp_check_api_ocp_check_api_proto_rawDescGZIP(), []int{6}\n}", "func (*SelectorVerificationReq) Descriptor() ([]byte, []int) {\n\treturn file_proto_selector_verification_msgs_proto_rawDescGZIP(), []int{0}\n}", "func (*CreateCheckerV1Request) Descriptor() ([]byte, []int) {\n\treturn file_checker_v1_proto_rawDescGZIP(), []int{0}\n}", "func (*VerifyRequest) Descriptor() ([]byte, []int) {\n\treturn file_crypto_proto_rawDescGZIP(), []int{4}\n}", "func (*GetCheckerStatusV1Request) Descriptor() ([]byte, []int) {\n\treturn file_checker_v1_proto_rawDescGZIP(), []int{12}\n}", "func (*WebhookRequest) Descriptor() ([]byte, []int) {\n\treturn file_google_cloud_dialogflow_v2beta1_webhook_proto_rawDescGZIP(), []int{0}\n}", "func (*DeleteCheckerV1Request) Descriptor() ([]byte, []int) {\n\treturn file_checker_v1_proto_rawDescGZIP(), []int{4}\n}", "func (*HealthCheckStatusRequest) Descriptor() ([]byte, []int) {\n\treturn file_basicpb_unary_api_proto_rawDescGZIP(), []int{2}\n}", "func (*QueryPermissionRequest) Descriptor() ([]byte, []int) {\n\treturn file_pkg_permission_pb_request_proto_rawDescGZIP(), []int{0}\n}", "func (*SelfCheckRequest) Descriptor() ([]byte, []int) {\n\treturn file_viz_proto_rawDescGZIP(), []int{2}\n}", "func (*DiagnoseRequest) Descriptor() ([]byte, []int) {\n\treturn file_proto_api_proto_rawDescGZIP(), []int{16}\n}", "func (*ProvideValidationFeedbackRequest) Descriptor() ([]byte, []int) {\n\treturn file_google_maps_addressvalidation_v1_address_validation_service_proto_rawDescGZIP(), []int{2}\n}", "func (*ValidatorStatusRequest) Descriptor() ([]byte, []int) {\n\treturn file_eth_v1alpha1_validator_proto_rawDescGZIP(), []int{8}\n}", "func (*ValidateClientCredentialRequest) Descriptor() ([]byte, []int) {\n\treturn file_pkg_micro_pb_request_proto_rawDescGZIP(), []int{0}\n}", "func (*HealthcheckRequest) Descriptor() ([]byte, []int) {\n\treturn file_pkg_noderpc_proto_feeds_manager_proto_rawDescGZIP(), []int{10}\n}", "func (*ChangeRequest) Descriptor() ([]byte, []int) {\n\treturn file_authorization_proto_rawDescGZIP(), []int{0}\n}", "func (*HardCheckRequest) Descriptor() ([]byte, []int) {\n\treturn file_services_core_protobuf_servers_proto_rawDescGZIP(), []int{10}\n}", "func (*ValidateRequestSignatureRequest) Descriptor() ([]byte, []int) {\n\treturn file_proto_account_proto_rawDescGZIP(), []int{0}\n}", "func (*DescribePermissionRequest) Descriptor() ([]byte, []int) {\n\treturn file_pkg_role_pb_request_proto_rawDescGZIP(), []int{6}\n}", "func (*ProofRequest) Descriptor() ([]byte, []int) {\n\treturn file_messages_proto_rawDescGZIP(), []int{35}\n}", "func (*DetectionRequest) Descriptor() ([]byte, []int) {\n\treturn file_google_chromeos_uidetection_v1_ui_detection_proto_rawDescGZIP(), []int{1}\n}", "func (*DomainRequest) Descriptor() ([]byte, []int) {\n\treturn file_eth_v1alpha1_validator_proto_rawDescGZIP(), []int{0}\n}", "func (*DoppelGangerRequest_ValidatorRequest) Descriptor() ([]byte, []int) {\n\treturn file_eth_v1alpha1_validator_proto_rawDescGZIP(), []int{27, 0}\n}", "func (*DeleteRequest) Descriptor() ([]byte, []int) {\n\treturn file_dictybase_api_jsonapi_request_proto_rawDescGZIP(), []int{7}\n}", "func (*SelectorVerificationsReq) Descriptor() ([]byte, []int) {\n\treturn file_proto_selector_verification_msgs_proto_rawDescGZIP(), []int{2}\n}", "func (*QueryPermissionRequest) Descriptor() ([]byte, []int) {\n\treturn file_pkg_role_pb_request_proto_rawDescGZIP(), []int{5}\n}", "func (*DescribeCheckersV1Request) Descriptor() ([]byte, []int) {\n\treturn file_checker_v1_proto_rawDescGZIP(), []int{8}\n}", "func (*ValidateRefRequest) Descriptor() ([]byte, []int) {\n\treturn file_pkg_flow_grpc_workflows_proto_rawDescGZIP(), []int{31}\n}", "func (*ModifyRequest) Descriptor() ([]byte, []int) {\n\treturn file_proto_engine_proto_rawDescGZIP(), []int{10}\n}", "func (*GetCheckerIssuesV1Request) Descriptor() ([]byte, []int) {\n\treturn file_checker_v1_proto_rawDescGZIP(), []int{15}\n}", "func (*WatchProvisioningApprovalRequestRequest) Descriptor() ([]byte, []int) {\n\treturn edgelq_devices_proto_v1alpha_provisioning_approval_request_service_proto_rawDescGZIP(), []int{5}\n}", "func (*GetLogConsistencyProofRequest) Descriptor() ([]byte, []int) {\n\treturn file_api_proto_rawDescGZIP(), []int{1}\n}", "func (*FindWebhookCallRequest) Descriptor() ([]byte, []int) {\n\treturn file_events_Event_proto_rawDescGZIP(), []int{9}\n}", "func (*CheckResponse_CheckInfo) Descriptor() ([]byte, []int) {\n\treturn file_google_api_servicecontrol_v1_service_controller_proto_rawDescGZIP(), []int{1, 0}\n}", "func (*CheckAliasValidityRequest) Descriptor() ([]byte, []int) {\n\treturn file_proto_alias_v1_alias_proto_rawDescGZIP(), []int{2}\n}", "func (*PatchCollectorsRequest) Descriptor() ([]byte, []int) {\n\treturn file_proto_clarifai_api_service_proto_rawDescGZIP(), []int{161}\n}", "func (*MessageHubVerifyRequest) Descriptor() ([]byte, []int) {\n\treturn file_messagehub_proto_rawDescGZIP(), []int{3}\n}", "func (*BatchUpdateReferencesRequest_Request) Descriptor() ([]byte, []int) {\n\treturn file_pkg_proto_icas_icas_proto_rawDescGZIP(), []int{1, 0}\n}", "func (*Check) Descriptor() ([]byte, []int) {\n\treturn file_api_ocp_check_api_ocp_check_api_proto_rawDescGZIP(), []int{12}\n}", "func (*UpdateIngressRuleRequest) Descriptor() ([]byte, []int) {\n\treturn file_google_appengine_v1_appengine_proto_rawDescGZIP(), []int{26}\n}", "func (*PingRequest) Descriptor() ([]byte, []int) {\n\treturn file_internal_crosstest_v1test_cross_proto_rawDescGZIP(), []int{0}\n}", "func (*FindWebhookCallRequest) Descriptor() ([]byte, []int) {\n\treturn file_uac_Event_proto_rawDescGZIP(), []int{7}\n}", "func (*GetRequest) Descriptor() ([]byte, []int) {\n\treturn file_proto_comments_proto_rawDescGZIP(), []int{3}\n}", "func (*UpdateRequest) Descriptor() ([]byte, []int) {\n\treturn file_teams_v1_teams_proto_rawDescGZIP(), []int{5}\n}", "func (*CheckProjectTokenRequest) Descriptor() ([]byte, []int) {\n\treturn file_user_proto_rawDescGZIP(), []int{32}\n}", "func (*GetPeerInfoRequest) Descriptor() ([]byte, []int) {\n\treturn file_github_com_yahuizhan_dappley_metrics_go_api_rpc_pb_rpc_proto_rawDescGZIP(), []int{6}\n}", "func (*UpdateRequest) Descriptor() ([]byte, []int) {\n\treturn file_recordwants_proto_rawDescGZIP(), []int{6}\n}", "func (*UpdateRequest) Descriptor() ([]byte, []int) {\n\treturn file_proto_contact_proto_rawDescGZIP(), []int{12}\n}", "func (*FailRequest) Descriptor() ([]byte, []int) {\n\treturn file_internal_crosstest_v1test_cross_proto_rawDescGZIP(), []int{2}\n}", "func (*ListProofRequestsRequest) Descriptor() ([]byte, []int) {\n\treturn file_messages_proto_rawDescGZIP(), []int{34}\n}", "func (*OriginalDetectIntentRequest) Descriptor() ([]byte, []int) {\n\treturn file_google_cloud_dialogflow_v2beta1_webhook_proto_rawDescGZIP(), []int{2}\n}", "func (*UpdatePermissionRequest) Descriptor() ([]byte, []int) {\n\treturn file_protodef_user_user_proto_rawDescGZIP(), []int{16}\n}", "func (*MultipleValidatorStatusRequest) Descriptor() ([]byte, []int) {\n\treturn file_eth_v1alpha1_validator_proto_rawDescGZIP(), []int{10}\n}", "func (*CheckReadyRequest) Descriptor() ([]byte, []int) {\n\treturn file_health_proto_rawDescGZIP(), []int{0}\n}", "func (*CMsgGCPlayerInfoRequest) Descriptor() ([]byte, []int) {\n\treturn file_dota_gcmessages_client_proto_rawDescGZIP(), []int{117}\n}", "func (*DescribeCheckResponse) Descriptor() ([]byte, []int) {\n\treturn file_api_ocp_check_api_ocp_check_api_proto_rawDescGZIP(), []int{3}\n}", "func (*DeleteRequest) Descriptor() ([]byte, []int) {\n\treturn file_grpc_exercicio_proto_rawDescGZIP(), []int{7}\n}", "func (*AddPeerRequest) Descriptor() ([]byte, []int) {\n\treturn file_github_com_yahuizhan_dappley_metrics_go_api_rpc_pb_rpc_proto_rawDescGZIP(), []int{8}\n}", "func (*CalculatorRequest) Descriptor() ([]byte, []int) {\n\treturn file_basicpb_unary_api_proto_rawDescGZIP(), []int{4}\n}", "func (*GetIngressRuleRequest) Descriptor() ([]byte, []int) {\n\treturn file_google_appengine_v1_appengine_proto_rawDescGZIP(), []int{25}\n}", "func (*OrgDomainValidationRequest) Descriptor() ([]byte, []int) {\n\treturn file_management_proto_rawDescGZIP(), []int{55}\n}", "func (*ValidateTokenRequest) Descriptor() ([]byte, []int) {\n\treturn file_user_proto_rawDescGZIP(), []int{4}\n}", "func (*Request) Descriptor() ([]byte, []int) {\n\treturn file_msgs_msgs_proto_rawDescGZIP(), []int{14}\n}", "func (*WatchProvisioningApprovalRequestsRequest) Descriptor() ([]byte, []int) {\n\treturn edgelq_devices_proto_v1alpha_provisioning_approval_request_service_proto_rawDescGZIP(), []int{7}\n}", "func (*CreateAlterRequest) Descriptor() ([]byte, []int) {\n\treturn file_grpc_exercicio_proto_rawDescGZIP(), []int{1}\n}", "func (*ListenRequest) Descriptor() ([]byte, []int) {\n\treturn file_faultinjector_proto_rawDescGZIP(), []int{8}\n}", "func (*Request) Descriptor() ([]byte, []int) {\n\treturn file_ocis_messages_policies_v0_policies_proto_rawDescGZIP(), []int{2}\n}", "func (*FilteredPolicyRequest) Descriptor() ([]byte, []int) {\n\treturn file_proto_casbin_proto_rawDescGZIP(), []int{11}\n}", "func (*FeedbackRequest) Descriptor() ([]byte, []int) {\n\treturn file_ssn_dataservice_v1_dataservice_proto_rawDescGZIP(), []int{10}\n}", "func (*AddPermissionToRoleRequest) Descriptor() ([]byte, []int) {\n\treturn file_pkg_role_pb_request_proto_rawDescGZIP(), []int{7}\n}", "func (*CMsgClientToGCPlayerStatsRequest) Descriptor() ([]byte, []int) {\n\treturn file_dota_gcmessages_client_proto_rawDescGZIP(), []int{143}\n}", "func (*FindWebhookRequest) Descriptor() ([]byte, []int) {\n\treturn file_events_Event_proto_rawDescGZIP(), []int{3}\n}", "func (*PatchAnnotationsRequest) Descriptor() ([]byte, []int) {\n\treturn file_proto_clarifai_api_service_proto_rawDescGZIP(), []int{4}\n}", "func (*NewChromeRequest) Descriptor() ([]byte, []int) {\n\treturn file_check_power_menu_service_proto_rawDescGZIP(), []int{0}\n}" ]
[ "0.7296165", "0.72562546", "0.72532135", "0.7185493", "0.7169828", "0.7153088", "0.7151416", "0.6986814", "0.69666773", "0.69412285", "0.6925985", "0.68989396", "0.68957925", "0.68830997", "0.6878103", "0.68631446", "0.68546945", "0.68501765", "0.6838815", "0.682475", "0.68238956", "0.68112", "0.68077344", "0.680327", "0.6797477", "0.67597336", "0.67553854", "0.67541856", "0.67503387", "0.6742081", "0.67362595", "0.6727141", "0.67204726", "0.670859", "0.669144", "0.6657413", "0.66565734", "0.66495335", "0.66443866", "0.66422147", "0.6641144", "0.6609611", "0.66075796", "0.65996283", "0.65976834", "0.6586037", "0.6585053", "0.6575653", "0.65736824", "0.65724576", "0.6571794", "0.6559004", "0.65580946", "0.6549663", "0.65368515", "0.65364265", "0.65316033", "0.65304023", "0.65082055", "0.64961433", "0.649241", "0.6490241", "0.64901483", "0.6489686", "0.6489252", "0.6483847", "0.647273", "0.6472606", "0.64718866", "0.64654106", "0.6463478", "0.6462707", "0.64524657", "0.6451937", "0.6445621", "0.6444898", "0.6441595", "0.6441001", "0.644", "0.6431325", "0.64266384", "0.6422704", "0.64204603", "0.6414462", "0.64094317", "0.64063275", "0.64023423", "0.6398975", "0.6394983", "0.6391588", "0.6390963", "0.6388377", "0.6387235", "0.63864654", "0.63855416", "0.6383772", "0.63833386", "0.6383147", "0.6383035", "0.6382509" ]
0.65349936
56
Deprecated: Use CheckResponse.ProtoReflect.Descriptor instead.
func (*CheckResponse) Descriptor() ([]byte, []int) { return file_imgCode_proto_rawDescGZIP(), []int{3} }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (*DescribeCheckResponse) Descriptor() ([]byte, []int) {\n\treturn file_api_ocp_check_api_ocp_check_api_proto_rawDescGZIP(), []int{3}\n}", "func (*RemoveCheckResponse) Descriptor() ([]byte, []int) {\n\treturn file_api_ocp_check_api_ocp_check_api_proto_rawDescGZIP(), []int{11}\n}", "func (*CheckResponse) Descriptor() ([]byte, []int) {\n\treturn file_google_api_servicecontrol_v1_service_controller_proto_rawDescGZIP(), []int{1}\n}", "func (*CheckResponse_CheckInfo) Descriptor() ([]byte, []int) {\n\treturn file_google_api_servicecontrol_v1_service_controller_proto_rawDescGZIP(), []int{1, 0}\n}", "func (*CheckResponse) Descriptor() ([]byte, []int) {\n\treturn file_authzed_api_v0_acl_service_proto_rawDescGZIP(), []int{7}\n}", "func (*ListChecksResponse) Descriptor() ([]byte, []int) {\n\treturn file_api_ocp_check_api_ocp_check_api_proto_rawDescGZIP(), []int{1}\n}", "func (*CreateCheckResponse) Descriptor() ([]byte, []int) {\n\treturn file_api_ocp_check_api_ocp_check_api_proto_rawDescGZIP(), []int{5}\n}", "func (*UpdateCheckResponse) Descriptor() ([]byte, []int) {\n\treturn file_api_ocp_check_api_ocp_check_api_proto_rawDescGZIP(), []int{9}\n}", "func (*DescribeCheckRequest) Descriptor() ([]byte, []int) {\n\treturn file_api_ocp_check_api_ocp_check_api_proto_rawDescGZIP(), []int{2}\n}", "func (*LivenessCheckResponse) Descriptor() ([]byte, []int) {\n\treturn file_protobuf_index_proto_rawDescGZIP(), []int{0}\n}", "func (*CheckLiveResponse) Descriptor() ([]byte, []int) {\n\treturn file_health_proto_rawDescGZIP(), []int{3}\n}", "func (*HealthCheckResponse) Descriptor() ([]byte, []int) {\n\treturn file_internal_proto_files_domain_probes_proto_rawDescGZIP(), []int{1}\n}", "func (*UpdateCheckerV1Response) Descriptor() ([]byte, []int) {\n\treturn file_checker_v1_proto_rawDescGZIP(), []int{3}\n}", "func (*DescribeCheckerV1Response) Descriptor() ([]byte, []int) {\n\treturn file_checker_v1_proto_rawDescGZIP(), []int{11}\n}", "func (*CheckRequest) Descriptor() ([]byte, []int) {\n\treturn file_google_api_servicecontrol_v1_service_controller_proto_rawDescGZIP(), []int{0}\n}", "func (*GetCheckerV1Response) Descriptor() ([]byte, []int) {\n\treturn file_checker_v1_proto_rawDescGZIP(), []int{7}\n}", "func (*RemoveCheckRequest) Descriptor() ([]byte, []int) {\n\treturn file_api_ocp_check_api_ocp_check_api_proto_rawDescGZIP(), []int{10}\n}", "func (*Check) Descriptor() ([]byte, []int) {\n\treturn file_api_ocp_check_api_ocp_check_api_proto_rawDescGZIP(), []int{12}\n}", "func (*ValidateResponse) Descriptor() ([]byte, []int) {\n\treturn file_validation_v1_validation_api_proto_rawDescGZIP(), []int{1}\n}", "func (*CheckRequest) Descriptor() ([]byte, []int) {\n\treturn file_authzed_api_v0_acl_service_proto_rawDescGZIP(), []int{5}\n}", "func (*ListChecksRequest) Descriptor() ([]byte, []int) {\n\treturn file_api_ocp_check_api_ocp_check_api_proto_rawDescGZIP(), []int{0}\n}", "func (*MultiCreateCheckResponse) Descriptor() ([]byte, []int) {\n\treturn file_api_ocp_check_api_ocp_check_api_proto_rawDescGZIP(), []int{7}\n}", "func (*GetCheckerStatusV1Response) Descriptor() ([]byte, []int) {\n\treturn file_checker_v1_proto_rawDescGZIP(), []int{13}\n}", "func (*ValidateResponse) Descriptor() ([]byte, []int) {\n\treturn file_cso_v1_validator_api_proto_rawDescGZIP(), []int{1}\n}", "func (*HealthCheckResponse) Descriptor() ([]byte, []int) {\n\treturn file_service_greeter_proto_health_health_proto_rawDescGZIP(), []int{1}\n}", "func (*CreateCheckerV1Response) Descriptor() ([]byte, []int) {\n\treturn file_checker_v1_proto_rawDescGZIP(), []int{1}\n}", "func (*SinglePasswordValidationResponse) Descriptor() ([]byte, []int) {\n\treturn file_proto_clarifai_api_service_proto_rawDescGZIP(), []int{127}\n}", "func (*CheckLiveRequest) Descriptor() ([]byte, []int) {\n\treturn file_health_proto_rawDescGZIP(), []int{2}\n}", "func (*ValidateResponse) Descriptor() ([]byte, []int) {\n\treturn file_validate_proto_rawDescGZIP(), []int{1}\n}", "func (*CreateCheckRequest) Descriptor() ([]byte, []int) {\n\treturn file_api_ocp_check_api_ocp_check_api_proto_rawDescGZIP(), []int{4}\n}", "func (*CheckCodeResponse) Descriptor() ([]byte, []int) {\n\treturn file_pkg_verifycode_pb_request_proto_rawDescGZIP(), []int{3}\n}", "func (*CheckDetailResponse) Descriptor() ([]byte, []int) {\n\treturn file_checkDetailService_proto_rawDescGZIP(), []int{1}\n}", "func (*HealthCheckResponse) Descriptor() ([]byte, []int) {\n\treturn file_api_health_service_proto_rawDescGZIP(), []int{1}\n}", "func (*ApiVersionResponse) Descriptor() ([]byte, []int) {\n\treturn file_api_ocp_check_api_ocp_check_api_proto_rawDescGZIP(), []int{14}\n}", "func (*CheckPermissionRequest) Descriptor() ([]byte, []int) {\n\treturn file_pkg_permission_pb_request_proto_rawDescGZIP(), []int{2}\n}", "func (*HealthCheckRequest) Descriptor() ([]byte, []int) {\n\treturn file_internal_proto_files_domain_probes_proto_rawDescGZIP(), []int{0}\n}", "func (*Deprecation) Descriptor() ([]byte, []int) {\n\treturn file_external_cfgmgmt_response_nodes_proto_rawDescGZIP(), []int{8}\n}", "func (*CheckTokenResponse) Descriptor() ([]byte, []int) {\n\treturn file_service_proto_rawDescGZIP(), []int{4}\n}", "func (x *fastReflection_MsgUpdateParamsResponse) Descriptor() protoreflect.MessageDescriptor {\n\treturn md_MsgUpdateParamsResponse\n}", "func (*DescribeCheckersV1Response) Descriptor() ([]byte, []int) {\n\treturn file_checker_v1_proto_rawDescGZIP(), []int{9}\n}", "func (*UpdateCheckRequest) Descriptor() ([]byte, []int) {\n\treturn file_api_ocp_check_api_ocp_check_api_proto_rawDescGZIP(), []int{8}\n}", "func (*DeleteCheckerV1Response) Descriptor() ([]byte, []int) {\n\treturn file_checker_v1_proto_rawDescGZIP(), []int{5}\n}", "func (*HealthcheckResponse) Descriptor() ([]byte, []int) {\n\treturn file_pkg_noderpc_proto_feeds_manager_proto_rawDescGZIP(), []int{11}\n}", "func (*ProvideValidationFeedbackResponse) Descriptor() ([]byte, []int) {\n\treturn file_google_maps_addressvalidation_v1_address_validation_service_proto_rawDescGZIP(), []int{3}\n}", "func (*HealthCheckRequest) Descriptor() ([]byte, []int) {\n\treturn file_service_greeter_proto_health_health_proto_rawDescGZIP(), []int{0}\n}", "func (*CheckHealthResult) Descriptor() ([]byte, []int) {\n\treturn file_proto_api_proto_rawDescGZIP(), []int{9}\n}", "func (*CAccountHardware_VRCompatibilityCheck_Response) Descriptor() ([]byte, []int) {\n\treturn file_steammessages_accounthardware_steamclient_proto_rawDescGZIP(), []int{20}\n}", "func (*VerifyResponse) Descriptor() ([]byte, []int) {\n\treturn file_crypto_proto_rawDescGZIP(), []int{5}\n}", "func (*SelfCheckResponse) Descriptor() ([]byte, []int) {\n\treturn file_viz_proto_rawDescGZIP(), []int{3}\n}", "func (*SelectorVerificationReq) Descriptor() ([]byte, []int) {\n\treturn file_proto_selector_verification_msgs_proto_rawDescGZIP(), []int{0}\n}", "func (*HealthCheckRequest) Descriptor() ([]byte, []int) {\n\treturn file_api_health_service_proto_rawDescGZIP(), []int{0}\n}", "func (*CheckAliasValidityResponse) Descriptor() ([]byte, []int) {\n\treturn file_proto_alias_v1_alias_proto_rawDescGZIP(), []int{3}\n}", "func (*DiagnoseResponse) Descriptor() ([]byte, []int) {\n\treturn file_proto_api_proto_rawDescGZIP(), []int{17}\n}", "func (*HealthCheckStatusRequest) Descriptor() ([]byte, []int) {\n\treturn file_basicpb_unary_api_proto_rawDescGZIP(), []int{2}\n}", "func (*ValidateRequest) Descriptor() ([]byte, []int) {\n\treturn file_validation_v1_validation_api_proto_rawDescGZIP(), []int{0}\n}", "func (*DescribeCheckerV1Request) Descriptor() ([]byte, []int) {\n\treturn file_checker_v1_proto_rawDescGZIP(), []int{10}\n}", "func (*ResponseAssertions) Descriptor() ([]byte, []int) {\n\treturn file_github_com_solo_io_gloo_projects_gloo_api_external_envoy_extensions_advanced_http_advanced_http_proto_rawDescGZIP(), []int{1}\n}", "func (*AddPeerResponse) Descriptor() ([]byte, []int) {\n\treturn file_github_com_yahuizhan_dappley_metrics_go_api_rpc_pb_rpc_proto_rawDescGZIP(), []int{30}\n}", "func (*GetCheckerIssuesV1Response) Descriptor() ([]byte, []int) {\n\treturn file_checker_v1_proto_rawDescGZIP(), []int{16}\n}", "func (*SelectorVerificationRes) Descriptor() ([]byte, []int) {\n\treturn file_proto_selector_verification_msgs_proto_rawDescGZIP(), []int{1}\n}", "func (*ValidateReply) Descriptor() ([]byte, []int) {\n\treturn file_protobuf_clusrun_proto_rawDescGZIP(), []int{18}\n}", "func (*GetPeerInfoResponse) Descriptor() ([]byte, []int) {\n\treturn file_github_com_yahuizhan_dappley_metrics_go_api_rpc_pb_rpc_proto_rawDescGZIP(), []int{28}\n}", "func (*CheckStatelessResponse) Descriptor() ([]byte, []int) {\n\treturn file_orc8r_protos_magmad_proto_rawDescGZIP(), []int{14}\n}", "func (*HealthcheckRequest) Descriptor() ([]byte, []int) {\n\treturn file_pkg_noderpc_proto_feeds_manager_proto_rawDescGZIP(), []int{10}\n}", "func (*GetMetricsInfoResponse) Descriptor() ([]byte, []int) {\n\treturn file_github_com_yahuizhan_dappley_metrics_go_api_rpc_pb_rpc_proto_rawDescGZIP(), []int{44}\n}", "func (*GetCheckerV1Request) Descriptor() ([]byte, []int) {\n\treturn file_checker_v1_proto_rawDescGZIP(), []int{6}\n}", "func (*ValidatorUpdate) Descriptor() ([]byte, []int) {\n\treturn file_tm_replay_proto_rawDescGZIP(), []int{9}\n}", "func (*GetCheckerStatusV1Request) Descriptor() ([]byte, []int) {\n\treturn file_checker_v1_proto_rawDescGZIP(), []int{12}\n}", "func (*PingResponse) Descriptor() ([]byte, []int) {\n\treturn file_internal_crosstest_v1test_cross_proto_rawDescGZIP(), []int{1}\n}", "func (*ValidateRequest) Descriptor() ([]byte, []int) {\n\treturn file_protobuf_clusrun_proto_rawDescGZIP(), []int{17}\n}", "func (*ValidateRequest) Descriptor() ([]byte, []int) {\n\treturn file_cso_v1_validator_api_proto_rawDescGZIP(), []int{0}\n}", "func (*HardCheckRequest) Descriptor() ([]byte, []int) {\n\treturn file_services_core_protobuf_servers_proto_rawDescGZIP(), []int{10}\n}", "func (*FailRequest) Descriptor() ([]byte, []int) {\n\treturn file_internal_crosstest_v1test_cross_proto_rawDescGZIP(), []int{2}\n}", "func (*FailResponse) Descriptor() ([]byte, []int) {\n\treturn file_internal_crosstest_v1test_cross_proto_rawDescGZIP(), []int{3}\n}", "func (*HealthCheckStatusResponse) Descriptor() ([]byte, []int) {\n\treturn file_basicpb_unary_api_proto_rawDescGZIP(), []int{3}\n}", "func (*Check) Descriptor() ([]byte, []int) {\n\treturn file_services_core_protobuf_servers_proto_rawDescGZIP(), []int{11}\n}", "func (*ListResponse) Descriptor() ([]byte, []int) {\n\treturn file_teams_v1_teams_proto_rawDescGZIP(), []int{1}\n}", "func (*CheckProjectTokenResponse) Descriptor() ([]byte, []int) {\n\treturn file_user_proto_rawDescGZIP(), []int{33}\n}", "func (*ListResponse) Descriptor() ([]byte, []int) {\n\treturn file_proto_contact_proto_rawDescGZIP(), []int{15}\n}", "func (*FindWebhookCallRequest_Response) Descriptor() ([]byte, []int) {\n\treturn file_events_Event_proto_rawDescGZIP(), []int{9, 0}\n}", "func (*FindWebhookCallRequest_Response) Descriptor() ([]byte, []int) {\n\treturn file_uac_Event_proto_rawDescGZIP(), []int{7, 0}\n}", "func (*HealthCheckResponse) Descriptor() ([]byte, []int) {\n\treturn file_src_search_proto_rawDescGZIP(), []int{6}\n}", "func (*CheckTokenRequest) Descriptor() ([]byte, []int) {\n\treturn file_check_token_proto_rawDescGZIP(), []int{0}\n}", "func (*DomainResponse) Descriptor() ([]byte, []int) {\n\treturn file_eth_v1alpha1_validator_proto_rawDescGZIP(), []int{1}\n}", "func (*CheckReadyResponse) Descriptor() ([]byte, []int) {\n\treturn file_health_proto_rawDescGZIP(), []int{1}\n}", "func (*ValidatorStatusResponse) Descriptor() ([]byte, []int) {\n\treturn file_eth_v1alpha1_validator_proto_rawDescGZIP(), []int{9}\n}", "func (*VerifyReply) Descriptor() ([]byte, []int) {\n\treturn file_threads_proto_rawDescGZIP(), []int{28}\n}", "func (*MultiCreateCheckRequest) Descriptor() ([]byte, []int) {\n\treturn file_api_ocp_check_api_ocp_check_api_proto_rawDescGZIP(), []int{6}\n}", "func (*GetTeamById_Response) Descriptor() ([]byte, []int) {\n\treturn file_uac_Team_proto_rawDescGZIP(), []int{1, 0}\n}", "func (*QueryPlanStatusResponseProto) Descriptor() ([]byte, []int) {\n\treturn file_ClientDatanodeProtocol_proto_rawDescGZIP(), []int{25}\n}", "func (*UpdateCheckerV1Request) Descriptor() ([]byte, []int) {\n\treturn file_checker_v1_proto_rawDescGZIP(), []int{2}\n}", "func (*CheckTokenRequest) Descriptor() ([]byte, []int) {\n\treturn file_service_proto_rawDescGZIP(), []int{3}\n}", "func (*ValidateRequest) Descriptor() ([]byte, []int) {\n\treturn file_validate_proto_rawDescGZIP(), []int{0}\n}", "func (*VerifyRequest) Descriptor() ([]byte, []int) {\n\treturn file_threads_proto_rawDescGZIP(), []int{27}\n}", "func (*ModifyResponse) Descriptor() ([]byte, []int) {\n\treturn file_proto_engine_proto_rawDescGZIP(), []int{11}\n}", "func (*WebhookResponse) Descriptor() ([]byte, []int) {\n\treturn file_google_cloud_dialogflow_v2beta1_webhook_proto_rawDescGZIP(), []int{1}\n}", "func (*CheckCodeRequest) Descriptor() ([]byte, []int) {\n\treturn file_pkg_verifycode_pb_request_proto_rawDescGZIP(), []int{2}\n}", "func (*UpdateTelemetryReportedResponse) Descriptor() ([]byte, []int) {\n\treturn file_external_applications_applications_proto_rawDescGZIP(), []int{30}\n}", "func (*DoppelGangerResponse_ValidatorResponse) Descriptor() ([]byte, []int) {\n\treturn file_eth_v1alpha1_validator_proto_rawDescGZIP(), []int{28, 0}\n}", "func (*MessageHubVerifyResponse) Descriptor() ([]byte, []int) {\n\treturn file_messagehub_proto_rawDescGZIP(), []int{4}\n}", "func (*SelfCheckRequest) Descriptor() ([]byte, []int) {\n\treturn file_viz_proto_rawDescGZIP(), []int{2}\n}" ]
[ "0.72049487", "0.71510273", "0.70745736", "0.7069298", "0.70513886", "0.7049406", "0.7048401", "0.70210135", "0.69132155", "0.6911588", "0.6868659", "0.6855346", "0.6838182", "0.68354154", "0.68328184", "0.6821605", "0.68125427", "0.6808781", "0.6804347", "0.67804843", "0.6778955", "0.67714196", "0.6749602", "0.6737448", "0.67299145", "0.6727761", "0.671686", "0.6713849", "0.67111367", "0.6710731", "0.6709081", "0.6701422", "0.6695538", "0.6694857", "0.6692836", "0.66926163", "0.6673309", "0.6660064", "0.6653226", "0.6650135", "0.6638545", "0.66296023", "0.6612534", "0.6605838", "0.6591452", "0.65879333", "0.65805334", "0.6572585", "0.6570264", "0.6561692", "0.6561125", "0.65495616", "0.6547632", "0.6541999", "0.6532249", "0.65283316", "0.6527507", "0.6527121", "0.65255755", "0.65249026", "0.65221614", "0.6506388", "0.6500678", "0.649543", "0.6485812", "0.64826363", "0.6479402", "0.647699", "0.64762753", "0.6474264", "0.6472749", "0.6471677", "0.64683485", "0.646727", "0.6463924", "0.6461303", "0.64605397", "0.64555496", "0.6454414", "0.6447433", "0.6446202", "0.6441206", "0.6435643", "0.6435214", "0.6434569", "0.6418329", "0.6415153", "0.6412508", "0.6411979", "0.6407618", "0.6404798", "0.6404318", "0.6403485", "0.63990915", "0.6393629", "0.6392916", "0.6392285", "0.6389158", "0.63847435", "0.6381872", "0.63801765" ]
0.0
-1
Open mysql connection using dsn.
func Open(dsn string) (*Adapter, error) { var err error adapter := &Adapter{sql.New("?", false, errorFunc, incrementFunc)} adapter.DB, err = db.Open("sqlite3", dsn) return adapter, err }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func Open(user, addr, dbName string) (*sql.DB, error) {\n\tc := &mysql.Config{\n\t\tUser: user,\n\t\tNet: \"tcp\",\n\t\tAddr: addr,\n\t\tDBName: dbName,\n\t\tCollation: \"utf8mb4_bin\",\n\t\tParseTime: true,\n\t}\n\n\treturn sql.Open(\"mysql\", c.FormatDSN())\n}", "func openDB(dsn string) (*sql.DB, error) {\n\tDB, err := sql.Open(\"mysql\", dsn)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif err = DB.Ping(); err != nil {\n\t\treturn nil, err\n\t}\n\treturn DB, nil\n}", "func openDB(dsn string) (*sql.DB, error) {\n\tdb, err := sql.Open(\"mysql\", dsn)\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif err = db.Ping(); err != nil {\n\t\treturn nil, err\n\t}\n\treturn db, nil\n}", "func Open(dsn string) (*sql.DB, error) {\n\tcfg, err := mysql.ParseDSN(dsn)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// MySQL flags that affect storage logic.\n\tcfg.ClientFoundRows = true // Return number of matching rows instead of rows changed.\n\tcfg.ParseTime = true // Parse time values to time.Time\n\tcfg.Loc = time.UTC\n\n\tdb, err := sql.Open(\"mysql\", cfg.FormatDSN())\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn db, db.Ping()\n}", "func openDB(dsn string) (*sql.DB, error) {\r\n\tdb, err := sql.Open(\"mysql\", dsn)\r\n\r\n\tif err != nil {\r\n\t\treturn nil, err\r\n\t}\r\n\r\n\tif err = db.Ping(); err != nil {\r\n\t\treturn nil, err\r\n\t}\r\n\r\n\treturn db, nil\r\n}", "func openDB(dsn string) (*sql.DB, error) {\n\tdb, err := sql.Open(\"mysql\", dsn)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif err = db.Ping(); err != nil {\n\t\treturn nil, err\n\t}\n\treturn db, nil\n}", "func openDB(dsn string) (*sql.DB, error) {\n\tdb, err := sql.Open(\"mysql\", dsn)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif err = db.Ping(); err != nil {\n\t\treturn nil, err\n\t}\n\treturn db, nil\n}", "func openDB(dsn string) (*sql.DB, error) {\n\tdb, err := sql.Open(\"mysql\", dsn)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif err = db.Ping(); err != nil {\n\t\treturn nil, err\n\t}\n\treturn db, nil\n}", "func OpenDatabaseDSN(dsn string) *sql.DB {\n\tdb, err := sql.Open(\"mysql\", dsn)\n\tif err != nil {\n\t\tlog.Error().Err(err)\n\t\tpanic(err)\n\t}\n\n\treturn db\n}", "func connect(dsn string) *sql.DB {\n\tdb, err := sql.Open(\"mysql\", dsn)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\t// Set the maximum number of idle connections in the pool. Setting this\n\t// to less than or equal to 0 will mean that no idle connections are retained.\n\tdb.SetMaxIdleConns(5)\n\n\tif err := db.Ping(); err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\treturn db\n}", "func (m *Mysql) OpenDB(dsn string) (*sql.DB, error) {\n\n\tif dsn == \"\" {\n\t\terr := errors.New(\"invalid db connection url\")\n\t\treturn nil, err\n\t}\n\n\tdb, err := sql.Open(\"mysql\", dsn)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\terr = db.Ping()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn db, err\n}", "func OpenMysql(driverName, dataSourceName string) (db *MysqlDB, err error) {\n var db2 *sql.DB\n db2, err = sql.Open(driverName, dataSourceName)\n if err != nil {\n return\n }\n db = &MysqlDB{}\n db.DB.DB = *db2\n return\n}", "func initializeMysqlConn() {\n\tdbConn, err := sql.Open(\"mysql\", \"admin:admin@tcp(y2search_mysql:3306)/y2search_db?collation=utf8mb4_unicode_ci\")\n\tdb = *dbConn\n\tif err != nil {\n\t\tlog.Panic(err.Error()) // Just for example purpose. You should use proper error handling instead of panic\n\t}\n\n\t// Open doesn't open a connection. Validate DSN data:\n\terr = db.Ping()\n\tif err != nil {\n\t\tlog.Panic(err.Error()) // proper error handling instead of panic in your app\n\t}\n}", "func connectDB(dsn string) (dbh *sql.DB, err error) {\n\tdbh, err = sql.Open(\"mysql\", dsn)\n\tif err != nil {\n\t\treturn\n\t}\n\n\t// Set really low limits, this application is only meant to do quick serialized SQL queries\n\tdbh.SetMaxOpenConns(1)\n\tdbh.SetConnMaxLifetime(time.Second)\n\n\treturn\n}", "func OpenConnection(cnf config.Config) (*sql.DB, error) {\n\tusername := cnf.DBUsername\n\tpassword := cnf.DBPassword\n\thost := cnf.DBHost\n\tport := cnf.DBPort\n\tdatabase := cnf.Database\n\n\tdsn := fmt.Sprintf(\"%v:%v@tcp(%v:%v)/%v\", username, password, host, port, database)\n\n\tlog.Debugf(\"Connect to : %v\", dsn)\n\tdb, err := sql.Open(\"mysql\", dsn)\n\tif err != nil {\n\t\treturn nil, ErrCanNotConnectWithDatabase\n\t}\n\n\t// Open doesn't open a connection. Validate DSN data:\n\terr = db.Ping()\n\tif err != nil {\n\t\treturn nil, ErrCanNotConnectWithDatabase\n\t}\n\treturn db, nil\n}", "func OpenConnection(cnf config.Config) (*sql.DB, error) {\n\tusername := cnf.DBUsername\n\tpassword := cnf.DBPassword\n\thost := cnf.DBHost\n\tport := cnf.DBPort\n\tdatabase := cnf.Database\n\n\tdsn := fmt.Sprintf(\"%v:%v@tcp(%v:%v)/%v?parseTime=true\", username, password, host, port, database)\n\n\tlog.Debugf(\"Connect to : %v\", dsn)\n\tdb, err := sql.Open(\"mysql\", dsn)\n\tif err != nil {\n\t\treturn nil, ErrCanNotConnectWithDatabase\n\t}\n\n\t// Open doesn't open a connection. Validate DSN data:\n\terr = db.Ping()\n\tif err != nil {\n\t\treturn nil, ErrCanNotConnectWithDatabase\n\t}\n\treturn db, nil\n}", "func (d *Driver) Open(uri string) (driver.Conn, error) {\n proto, addr, dbname, user, passwd, params, err := parseDSN(uri)\n\tif err != nil {\n\t return nil, err\n }\n\td.proto = proto\n d.raddr = addr\n d.user = user\n\td.passwd = passwd\n d.db = dbname\n\n\t// Establish the connection\n\tc := conn{mysql.New(d.proto, d.laddr, d.raddr, d.user, d.passwd, d.db)}\n\n if v, ok := params[\"charset\"]; ok {\n Register(\"SET NAMES \" + v)\n }\n if v, ok := params[\"keepalive\"]; ok {\n t, err := strconv.Atoi(v)\n if err != nil {\n return nil, ErrMaxIdle\n }\n RegisterFunc(func(my mysql.Conn){\n go func() {\n for my.IsConnected() {\n time.Sleep(time.Duration(t) * time.Second)\n if err := my.Ping(); err != nil {\n break\n }\n }\n }()\n })\n }\n\tfor _, q := range d.initCmds {\n\t\tc.my.Register(q) // Register initialisation commands\n\t}\n for _, f := range d.initFuncs {\n c.my.RegisterFunc(f)\n }\n\tif err := c.my.Connect(); err != nil {\n\t\treturn nil, errFilter(err)\n\t}\n\treturn &c, nil\n}", "func (d MySQLDriver) OpenDB() (*sql.DB, error) {\n\ta := d.username + \":\" + d.pass + \"@\" + d.server + \"/\" + d.database\n\tfmt.Println(a)\n\tdb, err := sql.Open(\"mysql\", d.username+\":\"+d.pass+\"@(\"+d.server+\")/\"+d.database)\n\tif err != nil {\n\t\tcheckErr(err) // Just for example purpose. You should use proper error handling instead of panic\n\t}\n\treturn db, err\n}", "func open() (*sql.DB, error) {\n\tdb, err := sql.Open(\"mysql\", dsn(dbName))\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"unable to open database %q: %v\", dbName, err)\n\t}\n\treturn db, nil\n}", "func Open(driverName, dsn string) (*DB, error) {\n\tswitch driverName {\n\tcase \"mysql\":\n\tdefault:\n\t\tdsn = fmt.Sprintf(\"%s://%s\", driverName, dsn)\n\t}\n\n\tctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)\n\tdefer cancel()\n\n\tdbC := make(chan *sql.DB, 1)\n\terrC := make(chan error, 1)\n\n\tgo func(driverName, dsn string) {\n\t\tdb, err := sql.Open(driverName, dsn)\n\t\tif err != nil {\n\t\t\terrC <- err\n\t\t\treturn\n\t\t}\n\t\tdbC <- db\n\t}(driverName, dsn)\n\n\tselect {\n\tcase db := <-dbC:\n\t\t// see: https://github.com/go-sql-driver/mysql/issues/674\n\t\tdb.SetMaxIdleConns(0)\n\t\treturn &DB{db}, nil\n\tcase err := <-errC:\n\t\treturn nil, err\n\tcase <-ctx.Done():\n\t\treturn nil, errTimeout\n\t}\n}", "func (c *SQLClient) OpenConn(user string, pass string) (*gorm.DB, error) {\n\tdsnConfig := mysql.NewConfig()\n\tdsnConfig.Net = \"tcp\"\n\tdsnConfig.Addr = fmt.Sprintf(\"%s:%d\", c.config.Host, c.config.Port)\n\tdsnConfig.User = user\n\tdsnConfig.Passwd = pass\n\tdsnConfig.Timeout = time.Second * 5\n\tdsnConfig.ParseTime = true\n\tdsnConfig.Loc = time.Local\n\tdsnConfig.MultiStatements = true // TODO: Disable this, as it increase security risk.\n\tdsnConfig.TLSConfig = c.config.TLSKey\n\tdsn := dsnConfig.FormatDSN()\n\n\tdb, err := gorm.Open(mysqlDriver.Open(dsn))\n\tif err != nil {\n\t\tlog.Warn(\"Failed to open SQL connection\",\n\t\t\tzap.String(\"targetComponent\", distro.R().TiDB),\n\t\t\tzap.Error(err))\n\t\tif mysqlErr, ok := err.(*mysql.MySQLError); ok {\n\t\t\tif mysqlErr.Number == mysqlerr.ER_ACCESS_DENIED_ERROR {\n\t\t\t\treturn nil, ErrAuthFailed.New(\"Bad SQL username or password\")\n\t\t\t}\n\t\t}\n\t\treturn nil, ErrConnFailed.Wrap(err, \"Failed to connect to %s\", distro.R().TiDB)\n\t}\n\n\t// Ensure that when the App stops resources are released\n\tif c.config.BaseContext != nil {\n\t\tdb = db.WithContext(c.config.BaseContext)\n\t}\n\n\treturn db, nil\n}", "func SQLConnect(name, passwd, address, dbname string) (*sql.DB, error) {\r\n\turl := name + \":\" + passwd + \"@tcp(\" + address + \":3306)/\" + dbname\r\n\tdb, err := sql.Open(\"mysql\", url)\r\n\tif err != nil {\r\n\t\treturn nil, err\r\n\t}\r\n\treturn db, nil\r\n}", "func Open(dataSourceName string) (*DB, error) {\n\tdb, err := sql.Open(\"mysql\", dataSourceName)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn &DB{DB: db}, nil\n}", "func (db *dbMysql) Open(config *ConfigNode) (*sql.DB, error) {\n\tvar source string\n\tif config.LinkInfo != \"\" {\n\t\tsource = config.LinkInfo\n\t} else {\n\t\tsource = fmt.Sprintf(\n\t\t\t\"%s:%s@tcp(%s:%s)/%s?charset=%s&multiStatements=true&parseTime=true&loc=Local\",\n\t\t\tconfig.User, config.Pass, config.Host, config.Port, config.Name, config.Charset,\n\t\t)\n\t}\n\tif db, err := sql.Open(\"gf-mysql\", source); err == nil {\n\t\treturn db, nil\n\t} else {\n\t\treturn nil, err\n\t}\n}", "func Open(host, port, username, password, name string) *gorm.DB {\n\tdbConfig := fmt.Sprintf(\"%s:%s@tcp(%s:%s)/%s?charset=utf8mb4&parseTime=true&loc=Local\",\n\t\tusername,\n\t\tpassword,\n\t\thost,\n\t\tport,\n\t\tname,\n\t)\n\n\tdb, err := gorm.Open(\"mysql\", dbConfig)\n\tcommon.PanicError(err)\n\treturn db\n}", "func Open(driverName, masterDSN string, replicasDSNs []string) (*DB, error) {\n\tconns := make([]string, 0, len(replicasDSNs)+1)\n\tconns = append(conns, masterDSN)\n\tconns = append(conns, replicasDSNs...)\n\n\tdb := &DB{\n\t\tcpdbs: make([]*connection, len(conns)),\n\t}\n\n\terr := scatter(len(db.cpdbs), func(i int) (err error) {\n\t\tconn, err := sql.Open(driverName, conns[i])\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\terr = conn.Ping()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tdb.cpdbs[i] = new(connection)\n\t\tdb.cpdbs[i].db = conn\n\n\t\treturn nil\n\t})\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn db, nil\n}", "func ConnectDSN(username, password, databaseName string) (*sql.DB, error) {\n\t//Connect to the database\n\tdatabaseDSN := username + \":\" + password + \"@/\" + databaseName + \"?parseTime=true&charset=utf8mb4\"\n\tdb, err := sql.Open(\"mysql\", databaseDSN)\n\tif err != nil {\n\t\tlog.Fatal(\"Error connecting to the database\")\n\t\treturn nil, err\n\t}\n\tDatabase = db\n\treturn db, nil\n}", "func NewMysqlConn(dsn string) (*MysqlConn, error) {\n\tdbConn, err := sql.Open(\"mysql\", dsn)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tconn := MysqlConn{dbConn}\n\treturn &conn, nil\n}", "func Open() (*sql.DB, error) {\n\tdatabase := os.Getenv(\"DB_DATABASE\")\n\tdbUser := os.Getenv(\"DB_USERNAME\")\n\tdbPassword := os.Getenv(\"DB_PASSWORD\")\n\tdbHost := os.Getenv(\"DB_HOST\")\n\tconn := dbUser + \":\" + dbPassword + \"@tcp(\" + dbHost + \":3306)/\" + database\n\n\treturn sql.Open(os.Getenv(\"DB_CONNECTION\"), conn)\n}", "func (adapter *MySQLAdapter) OpenConnection(config *config.DatabaseConfig, queryString string) (*sql.DB, error) {\n\tif len(config.Masters) > 1 {\n\t\treturn nil, errors.New(\"Sorry, currently supports single master database only\")\n\t}\n\tdbname := config.NameOrPath\n\tfor _, master := range config.Masters {\n\t\tdsn := fmt.Sprintf(\"%s:%s@tcp(%s)/%s?%s\", config.Username, config.Password, master, dbname, queryString)\n\t\tdebug.Printf(\"dsn = %s\", strings.Replace(dsn, \"%\", \"%%\", -1))\n\t\tconn, err := sql.Open(config.Adapter, dsn)\n\t\tif err != nil {\n\t\t\treturn nil, errors.WithStack(err)\n\t\t}\n\t\treturn conn, nil\n\t}\n\tfor _, slave := range config.Slaves {\n\t\tdsn := fmt.Sprintf(\"%s:%s@tcp(%s)/%s?%s\", config.Username, config.Password, slave, dbname, queryString)\n\t\tdebug.Printf(\"TODO: not support slave. dsn = %s\", dsn)\n\t\tbreak\n\t}\n\n\tfor _, backup := range config.Backups {\n\t\tdsn := fmt.Sprintf(\"%s:%s@tcp(%s)/%s?%s\", config.Username, config.Password, backup, dbname, queryString)\n\t\tdebug.Printf(\"TODO: not support backup. dsn = %s\", dsn)\n\t}\n\treturn nil, errors.New(\"must define 'master' server\")\n}", "func (d *hdbDriver) Open(dsn string) (driver.Conn, error) {\n\tconnector, err := NewDSNConnector(dsn)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn connector.Connect(context.Background())\n}", "func (m *MysqlRepository) connect() error {\n\tif !m.connected {\n\t\tdb, err := sql.Open(\"mysql\", m.Credentials)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tm.db = db\n\t\tm.connected = true\n\t}\n\n\treturn nil\n}", "func (dbi *dbInfo) connect() (*sql.DB, error) {\n\t// Set MySQL driver parameters\n\tdbParameters := \"charset=\" + dbi.charset\n\n\t// Append cleartext and tls parameters if TLS is specified\n\tif dbi.tls == true {\n\t\tdbParameters = dbParameters + \"&allowCleartextPasswords=1&tls=skip-verify\"\n\t}\n\n\tdb, err := sql.Open(\"mysql\", dbi.user+\":\"+dbi.pass+\"@tcp(\"+dbi.host+\":\"+dbi.port+\")/?\"+dbParameters)\n\tcheckErr(err)\n\n\t// Ping database to verify credentials\n\terr = db.Ping()\n\n\treturn db, err\n}", "func (c *MysqlClient) OpenCon(config config.Config, logger *log.Logger) error {\n\tconStr := fmt.Sprintf(\n\t\t\"%s:%s@(%s:%s)/%s?charset=utf8&parseTime=True&loc=Local\",\n\t\tconfig.DBUser,\n\t\tconfig.DBPassword,\n\t\tconfig.DBHost,\n\t\tconfig.DBPort,\n\t\tconfig.DBName,\n\t)\n\tdb, err := gorm.Open(config.DBDriver, conStr)\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"failed to open db connection\")\n\t}\n\tdb.LogMode(true)\n\tdb.SetLogger(NewGormLogger(logger))\n\n\tc.conn = db\n\tc.config = config\n\tc.logger = logger\n\treturn nil\n}", "func MysqlConnect() *sql.DB {\n\tdb, err := sql.Open(\"mysql\", \"root:mysql@/go-cms\")\n\n\terr = db.Ping()\n\tif err != nil {\n\t\tfmt.Println(err.Error())\n\t\tpanic(err.Error()) // proper error handling instead of panic in your app\n\t}\n\n\treturn db\n}", "func InitDB(dsn string) (*MysqlDB, error) {\n\tdb, err := sql.Open(\"mysql\", dsn)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdb.SetMaxOpenConns(10)\n\treturn &MysqlDB{Conn: db}, nil\n}", "func OpenDB(driver, dsn string, opts ...Option) (*sql.DB, error) {\n\tconfig := &dbConfig{\n\t\tlogger: log.NewNopLogger(),\n\t\tcensusTraceOptions: []ocsql.TraceOption{ocsql.WithAllTraceOptions()},\n\t\tmaxAttempts: 15,\n\t}\n\n\tfor _, opt := range opts {\n\t\topt(config)\n\t}\n\n\tif config.wrapWithCensus && !config.alreadyRegisteredDriver {\n\t\tdriverName, err := ocsql.Register(driver, config.censusTraceOptions...)\n\t\tif err != nil {\n\t\t\treturn nil, errors.Wrapf(err, \"wrapping driver %s with opencensus sql %s\", driver, driverName)\n\t\t}\n\t\tdriver = driverName\n\t}\n\n\tdb, err := sql.Open(driver, dsn)\n\tif err != nil {\n\t\treturn nil, errors.Wrapf(err, \"opening %s connection, dsn=%s\", driver, dsn)\n\t}\n\n\tvar dbError error\n\tfor attempt := 0; attempt < config.maxAttempts; attempt++ {\n\t\tdbError = db.Ping()\n\t\tif dbError == nil {\n\t\t\t// we're connected!\n\t\t\tbreak\n\t\t}\n\t\tinterval := time.Duration(attempt) * time.Second\n\t\tlevel.Info(config.logger).Log(driver, fmt.Sprintf(\n\t\t\t\"could not connect to db: %v, sleeping %v\", dbError, interval))\n\t\ttime.Sleep(interval)\n\t}\n\tif dbError != nil {\n\t\treturn nil, dbError\n\t}\n\n\treturn db, nil\n}", "func connect_db() {\n\tdb, err = sql.Open(\"mysql\", \"root:jadir123@tcp(127.0.0.1:3306)/go_db\")\n\tif err != nil {\n\t\tlog.Fatalln(err)\n\t}\n\n\terr = db.Ping()\n\tif err != nil {\n\t\tlog.Fatalln(err)\n\t}\n}", "func Connect(ctx context.Context, dbName string) (*sql.DB, error) {\n\tdbusername := os.Getenv(\"MARIA_USERNAME\")\n\tdbpassword := os.Getenv(\"MARIA_PASSWORD\")\n\n\tdb, err := sql.Open(\"mysql\", dbusername+\":\"+dbpassword+\"@tcp(127.0.0.1:3306)/\"+dbName)\n\tif err != nil {\n\t\tlogger.Error.Println(logger.GetCallInfo(), err.Error())\n\t\treturn nil, err\n\t}\n\n\terr = db.Ping()\n\tif err != nil {\n\t\tlogger.Error.Println(\"Error:\", err.Error())\n\t\treturn nil, err\n\t}\n\n\treturn db, nil\n}", "func (p MysqlProvider) Connect(config *gormx.DatabaseConfig) (*gorm.DB, error) {\n\tif config.Dialect == gormx.DriverMysql {\n\t\tif db, err := gorm.Open(mysql.New(mysql.Config{DSN: config.DSN}), &gorm.Config{\n\t\t\tLogger: gormx.DefaultLogger(&config.Logger),\n\t\t}); err == nil {\n\t\t\tif sqlDB, err := db.DB(); err == nil {\n\t\t\t\tif config.MaxIdle > 0 {\n\t\t\t\t\tsqlDB.SetMaxIdleConns(config.MaxIdle)\n\t\t\t\t}\n\t\t\t\tif config.MaxOpen > 0 && config.MaxOpen > config.MaxIdle {\n\t\t\t\t\tsqlDB.SetMaxOpenConns(100)\n\t\t\t\t}\n\t\t\t\tif config.MaxLifetime > 0 {\n\t\t\t\t\tsqlDB.SetConnMaxLifetime(time.Duration(config.MaxLifetime) * time.Second)\n\t\t\t\t}\n\t\t\t\treturn db, nil\n\t\t\t} else {\n\t\t\t\treturn nil, errors.New(\"open DB failed\")\n\t\t\t}\n\t\t} else {\n\t\t\tlog.Errorf(\"connect mysql db failed: error=%s\", err.Error())\n\t\t}\n\t\treturn nil, errors.New(\"connect db failed\")\n\t}\n\treturn nil, errors.New(\"driver is not postgres\")\n}", "func Connect() {\n\tvar err error\n\n\tdsn := GetMySQLDataSourceName()\n\n\tDB, err = sql.Open(\"mysql\", dsn)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\tif err := DB.Ping(); err != nil {\n\t\tpanic(err)\n\t}\n}", "func connect() (connection mysql.Conn) {\n\tuser := \"root\"\n\tpass := \"toor\"\n\tdbname := \"trackerdb\"\n\tproto := \"tcp\"\n\taddr := \"127.0.0.1:3306\"\n\n\tdb := mysql.New(proto, \"\", addr, user, pass, dbname)\n\n\terr := db.Connect()\n\tif err != nil {\n\t\tfmt.Println(\"Database Connection Error:\", err)\n\t}\n\n\treturn db\n}", "func (w *DBInstance) connect() (req DBRequest, err error) {\n\treq.db, err = gorm.Open(\"mysql\", fmt.Sprintf(\"%v:%v@/%v?charset=utf8&parseTime=True&loc=Local\", w.sqlUser, w.sqlPass, w.sqlDBName))\n\treturn\n}", "func Open(dataSourceName string) (*DB, error) {\n\tdb, err := mgo.Dial(dataSourceName)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &DB{db}, nil\n}", "func MysqlConnect() (*gorm.DB, error) {\n\tconfig := aws.Config{\n\t\tRegion: aws.String(os.Getenv(\"DB_BUCKET_REGION\")),\n\t}\n\tsess := session.Must(session.NewSession(&config))\n\n\tsvc := s3.New(sess)\n\tfmt.Println(\"accessing bucket: \" + os.Getenv(\"DB_BUCKET\") + \"/\" + os.Getenv(\"DB_BUCKET_KEY\"))\n\ts3Output, err := svc.GetObject(&s3.GetObjectInput{\n\t\tBucket: aws.String(os.Getenv(\"DB_BUCKET\")),\n\t\tKey: aws.String(os.Getenv(\"DB_BUCKET_KEY\")),\n\t})\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tbuf := new(bytes.Buffer)\n\tbuf.ReadFrom(s3Output.Body)\n\tconBytes := buf.Bytes()\n\n\tvar connection DB\n\tjson.Unmarshal(conBytes, &connection)\n\n\tfmt.Println(\"accessing database\")\n\n\tdb, err := gorm.Open(\"mysql\", connection.User+\":\"+connection.Password+\"@(\"+connection.Host+\":\"+connection.Port+\")\"+\"/\"+connection.Db+\"?charset=utf8&parseTime=True\")\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn db, nil\n}", "func Connect(user string, password string, host string, port int, schema string, dsn string) (*sql.DB, error) {\n\tvar err error\n\tvar connString bytes.Buffer\n\n\tpara := map[string]interface{}{}\n\tpara[\"User\"] = user\n\tpara[\"Pass\"] = password\n\tpara[\"Host\"] = host\n\tpara[\"Port\"] = port\n\tpara[\"Schema\"] = schema\n\n\ttmpl, err := template.New(\"dbconn\").Option(\"missingkey=zero\").Parse(dsn)\n\tif err != nil {\n\t\tlog.Error().Err(err).Msg(\"tmpl parse\")\n\t\treturn nil, err\n\t}\n\n\terr = tmpl.Execute(&connString, para)\n\tif err != nil {\n\t\tlog.Error().Err(err).Msg(\"tmpl execute\")\n\t\treturn nil, err\n\t}\n\n\tlog.Debug().Str(\"dsn\", connString.String()).Msg(\"connect to db\")\n\tdb, err := sql.Open(\"mysql\", connString.String())\n\tif err != nil {\n\t\tlog.Error().Err(err).Msg(\"mysql connect\")\n\t\treturn nil, err\n\t}\n\n\treturn db, nil\n}", "func Open(dsn string) *sql.DB {\n\treturn sql.OpenDB(connector{dsn})\n}", "func New(dsn string) (*gorm.DB, error) {\n\tconn, err := gorm.Open(\"mysql\", dsn)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"open mysql db failed, %v\", err)\n\t}\n\n\tconn.DB().Ping()\n\tconn.DB().SetConnMaxLifetime(time.Minute * 5)\n\tconn.DB().SetMaxIdleConns(10)\n\tconn.DB().SetMaxOpenConns(10)\n\n\tif defaultdb == nil {\n\t\tdefaultdb = conn\n\t}\n\n\treturn conn, nil\n}", "func (d *hdbDriver) OpenConnector(dsn string) (driver.Connector, error) { return NewDSNConnector(dsn) }", "func Connect(ctx context.Context, host string, port int, dbName, user, password string) (*DB, error) {\n\tconfig := mysql.Config{\n\t\tAddr: fmt.Sprintf(\"%s:%d\", host, port),\n\t\tNet: \"tcp\",\n\t\tUser: user,\n\t\tPasswd: password,\n\t\tDBName: dbName,\n\t\tMultiStatements: true,\n\t}\n\tctxLogger := logger.FromContext(ctx)\n\tctx = logger.NewContext(ctx, ctxLogger.(logger.WithLogger).With(\"host\", host, \"dbName\", dbName, \"user\", user, \"port\", port))\n\n\tdb := &DB{\n\t\tCtx: ctx,\n\t\tLogger: logger.FromContext(ctx),\n\t}\n\tdb.Logger.Info(\"dsn\", config.FormatDSN(), \"msg\", \"Connecting\")\n\tif myLogger, ok := db.Logger.(logger.PrintLogger); ok {\n\t\tif myWithLogger, okWith := db.Logger.(logger.WithLogger); okWith {\n\t\t\tmyLogger = myWithLogger.With(\"package\", \"mysql\").(logger.PrintLogger)\n\t\t}\n\t\tmysql.SetLogger(myLogger)\n\t}\n\tcon, err := sql.Open(\"mysql\", config.FormatDSN())\n\tif err != nil {\n\t\treturn db, err\n\t}\n\terr = con.PingContext(ctx)\n\tif err != nil {\n\t\treturn db, err\n\t}\n\tdb.Database = goqu.New(\"mysql\", con)\n\treturn db, nil\n}", "func (client *DatabaseClient) Connect() error {\n var err error\n database, err = gorm.Open(\"mysql\", client.buildDatabaseDSN())\n if err != nil {\n return errors.DatabaseConnectionError.ToError(err)\n }\n client.autoMigrate()\n return nil\n}", "func (al *AccessLayer) Open() error {\n\tc := mysql.Config{\n\t\tUser: al.user,\n\t\tPasswd: al.pass,\n\t\tDBName: al.db,\n\t\tAddr: al.host,\n\t}\n\tif al.host != \"\" {\n\t\tc.Net = \"tcp\"\n\t}\n\tc.Params = map[string]string{\"allowNativePasswords\": \"true\"}\n\tfmt.Println(c.FormatDSN())\n\tdb, err := sql.Open(\"mysql\", c.FormatDSN())\n\tif err != nil {\n\t\treturn err\n\t}\n\tdb.SetConnMaxLifetime(5 * time.Minute)\n\tif err := db.Ping(); err != nil {\n\t\treturn err\n\t}\n\n\tal.AL = db\n\n\terr = al.CreateTables(false)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\terr = al.CreateIndices(false)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n\n}", "func connect(DSN string, timeout time.Duration) *sql.DB {\n\tticker := time.NewTicker(1 * time.Second)\n\ttimer := time.After(timeout)\n\tconn, _ := sql.Open(\"mysql\", DSN)\n\n\tfor {\n\t\tselect {\n\t\tcase <-ticker.C:\n\t\t\terr := conn.Ping()\n\t\t\tif err == nil {\n\t\t\t\treturn conn\n\t\t\t}\n\t\tcase <-timer:\n\t\t\tlog.Fatalf(\"Timeout trying To connect To %s after %d seconds. Forgot To run `make db_up`?\", DSN, timeout/1e9)\n\t\t\tos.Exit(1)\n\t\t}\n\t}\n}", "func DbConnect() (db *sql.DB) {\n\tdb, err := sql.Open(\"mysql\", \"root:root@tcp(127.0.0.1:3308)/ecommerce\")\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\treturn db\n}", "func OpenDB() *sql.DB {\n\tdb, err := sql.Open(\"mysql\", \"root:password@tcp(192.168.1.200:32769)/BikeTransport_db\")\n\tif err != nil {\n\t\tfmt.Println(\"Failed to open SQL db:\", err)\n\t\tlog.Fatalln(\"Failed to open SQL db:\", err)\n\t}\n\tfmt.Println(\"Database successfully opened.\")\n\n\treturn db\n}", "func OpenConnection(ctx context.Context, logContext, dsn string, maxConns, maxIdleConns int, maxConnLifetime time.Duration) (*sql.DB, error) {\n\tvar (\n\t\turl *dburl.URL\n\t\tconn *sql.DB\n\t\terr error\n\t\tch = make(chan error)\n\t)\n\n\turl, err = safeParse(dsn)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tdriver := url.Driver\n\tif url.GoDriver != \"\" {\n\t\tdriver = url.GoDriver\n\t}\n\n\t// Open the DB handle in a separate goroutine so we can terminate early if the context closes.\n\tgo func() {\n\t\tconn, err = sql.Open(driver, url.DSN)\n\t\tclose(ch)\n\t}()\n\n\tselect {\n\tcase <-ctx.Done():\n\t\treturn nil, ctx.Err()\n\tcase <-ch:\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\tconn.SetMaxIdleConns(maxIdleConns)\n\tconn.SetMaxOpenConns(maxConns)\n\tconn.SetConnMaxLifetime(maxConnLifetime)\n\n\tif klog.V(1).Enabled() {\n\t\tif len(logContext) > 0 {\n\t\t\tlogContext = fmt.Sprintf(\"[%s] \", logContext)\n\t\t}\n\t\tklog.Infof(\"%sDatabase handle successfully opened with '%s' driver\", logContext, driver)\n\t}\n\treturn conn, nil\n}", "func NewDB(driver, dsn string) (*sql.DB, error) {\n\tdb, err := sql.Open(driver, dsn)\n\tif err != nil {\n\t\treturn nil, errors.Wrap(err, \"[NewDB] prepare dsn\")\n\t}\n\n\t// db.SetMaxOpenConns()\n\n\treturn db, nil\n}", "func OpenDB() error {\n\tvar err error\n\tdb, err = sql.Open(\"mysql\", config.GetConfig().DbURL)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif err = db.Ping(); err != nil {\n\t\tlog.Println(\"Startup ping failed: \", err)\n\t}\n\treturn nil\n}", "func Connect() (*gorm.DB, error) {\n\turl := fmt.Sprintf(\"%s:%s@tcp(%s:%d)/%s?charset=utf8mb4&parseTime=True&loc=Local\", *dbUser, *dbPassword, *dbHost, *dbPort, *dbSchema)\n\treturn gorm.Open(\"mysql\", url)\n}", "func Open(driverName, dataSourceName string) (*DB, error) {\n db, err := sql.Open(driverName, dataSourceName)\n if err != nil {\n return nil, err\n }\n return &DB{DB: db, driverName: driverName, Mapper: mapper()}, err\n}", "func StartConn(svPath string, usr string, pass string) *gorm.DB {\n\tif pass != \"\" {\n\t\tpass = \":\" + pass\n\t}\n\tdbPath := usr + pass + \"@tcp(\" + svPath + \")/mysql?charset=utf8&parseTime=True&loc=Local\"\n\t// fmt.Println(dbPath)\n\tdb, err := gorm.Open(\"mysql\", dbPath)\n\tif err != nil {\n\t\tpanic(\"failed to connect database\")\n\t}\n\n\tdb.AutoMigrate(&(types.Boolean{}))\n\treturn db\n}", "func newSqlDB(dsn string) *mysql.DB {\n\n\tdb, err := mysql.Open(\"dbatman\", dsn)\n\tif err != nil {\n\t\tfmt.Fprintf(os.Stderr, \"%s is unavailable\", dsn)\n\t\tos.Exit(2)\n\t}\n\n\tif err := db.Ping(); err != nil {\n\t\tfmt.Fprintf(os.Stderr, \"%s is unreacheable\", dsn)\n\t\tos.Exit(2)\n\t}\n\n\treturn db\n}", "func init() {\n\n\tvar err error\n\tdatabase, err = sql.Open(\"mysql\", config.MySQLToFormatDNS())\n\tif err != nil {\n\t\tlog.Fatal(\"==> Error in library/mysql: \" + err.Error())\n\t}\n\n\tdatabase.SetMaxOpenConns(20)\n\tdatabase.SetMaxIdleConns(20)\n\n}", "func OpenDBConnection(connStr string, config gorm.Config) (*gorm.DB, error) {\n\tvar err error\n\tonce.Do(func() {\n\t\tdb, err := gorm.Open(mysql.Open(connStr), &config)\n\t\tif err == nil {\n\t\t\tsingleton = db\n\t\t}\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn singleton, nil\n}", "func (a *Driver) Open(dsn string) (driver.Conn, error) {\n\treturn NewConnector(dsn).Connect(context.TODO())\n}", "func Connect(ctx context.Context, driver, dsn string, connOpts *ConnectOptions) (*sqlx.DB, error) {\n\topts := connOpts\n\tif opts == nil {\n\t\topts = &ConnectOptions{}\n\t}\n\n\tdb, err := connectWithRetry(ctx, driver, dsn, opts.Retry)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tdb.SetMaxOpenConns(opts.MaxOpenConnections)\n\tdb.SetMaxIdleConns(opts.MaxIdleConnections)\n\tdb.SetConnMaxLifetime(opts.ConnectionMaxLifetime)\n\treturn db, nil\n}", "func GormConnectDB() *gorm.DB {\n\n\thost := viper.GetString(\"db.host\")\n\tport := viper.GetString(\"db.port\")\n\tuser := viper.GetString(\"db.user\")\n\tpass := viper.GetString(\"db.pass\")\n\tdsn := fmt.Sprintf(\"%s:%s@tcp(%s:%s)/%s?charset=utf8&parseTime=True\", user, pass, host, port, user)\n\tdb, err := gorm.Open(\"mysql\", dsn)\n\tif err != nil {\n\t\tlog.Fatalf(\"Error connecting gorm to db: %s\\n\", err.Error())\n\t}\n\treturn db\n\n}", "func Connect() (*gorm.DB, error) {\n\tdb, err := gorm.Open(\"mysql\", viper.GetString(\"mysql.username\")+\":\"+viper.GetString(\"mysql.password\")+\"@(\"+viper.GetString(\"mysql.hostname\")+\":\"+viper.GetString(\"mysql.port\")+\")/\"+viper.GetString(\"mysql.database\")+\"?charset=utf8&parseTime=True&loc=Local\")\n\treturn db, err\n}", "func (m *MySQL) Connect() error {\n\t// user:password@protocol(server:port)/database?params...\n\tdsn := fmt.Sprintf(\n\t\t\"%s:%s@tcp(%s:%s)/%s?tls=false&autocommit=true&allowNativePasswords=true&parseTime=true\",\n\t\t\"root\",\n\t\t\"userRootPass\",\n\t\t\"127.0.0.1\",\n\t\t\"3306\",\n\t\t\"store\",\n\t)\n\n\tdb, err := sql.Open(\"mysql\", dsn)\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = db.Ping()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tm.db = db\n\treturn nil\n}", "func OpenDB(host, user, password, dbname string) error {\n\tvar err error\n\tdb, err = sql.Open(\"postgres\",\n\t\tfmt.Sprintf(\"host=%s user=%s password=%s dbname=%s \"+\n\t\t\t\"sslmode=%s\", host, user, password, dbname, db_ssl_mode))\n\treturn err\n}", "func OpenDatabase(user string, pass string, host string, schema string) *sql.DB {\n\tvar dsn string\n\n\tdsn = user\n\tif pass != \"\" {\n\t\tdsn += \":\" + pass\n\t}\n\tdsn += \"@\"\n\tif host != \"\" {\n\t\tdsn += \"tcp(\"\n\t\tdsn += host\n\t\tdsn += \")\"\n\t}\n\tdsn += \"/\"\n\tdsn += schema\n\tdsn += \"?parseTime=true\"\n\n\treturn OpenDatabaseDSN(dsn)\n}", "func connect(dsn string) (*sqlx.DB, error) {\n\tdb, err := sqlx.Connect(\"postgres\", dsn)\n\tif err != nil {\n\t\tlog.Log.Err(err).Msg(\"error initializing database connection\")\n\t\treturn nil, err\n\t}\n\treturn db, nil\n}", "func (dbProvider *dbProvider) connect() error {\n\tvar err error\n\n\tdbProvider.instance, err = gorm.Open(dbProvider.dialect, dbProvider.path)\n\n\tif err != nil {\n\t\tlogger.Error(nil, crerrors.CodeDatabaseError, err, nil)\n\t}\n\n\tif gin.IsDebugging() {\n\t\tdbProvider.instance.LogMode(true)\n\t}\n\n\tdbProvider.instance.DB().SetMaxIdleConns(dbProvider.maxIdleConnections)\n\tdbProvider.instance.DB().SetMaxOpenConns(dbProvider.maxOpenConnections)\n\tdbProvider.instance.DB().SetConnMaxLifetime(dbProvider.connMaxLifetime * time.Second)\n\treturn nil\n}", "func (c *PGConnector) Open(cfg *config.Config) (*sql.DB, error) {\n\tsslmode := \"disable\"\n\tif cfg.DBSSLModeOption == \"enable\" {\n\t\tsslmode = \"require\"\n\t}\n\n\tdbstring := fmt.Sprintf(\"user=%s dbname=%s sslmode=%s password=%s host=%s port=%s\",\n\t\tcfg.DBUserName,\n\t\tcfg.DBName,\n\t\tsslmode,\n\t\tcfg.DBPassword,\n\t\tcfg.DBHostname,\n\t\tcfg.DBPort,\n\t)\n\n\treturn sql.Open(\"postgres\", dbstring)\n}", "func (c *PGConnector) Open(cfg *config.Config) (*sql.DB, error) {\n\tsslmode := \"disable\"\n\tif cfg.DBSSLModeOption == \"enable\" {\n\t\tsslmode = \"require\"\n\t}\n\n\tdbstring := fmt.Sprintf(\"user=%s dbname=%s sslmode=%s password=%s host=%s port=%s\",\n\t\tcfg.DBUserName,\n\t\tcfg.DBName,\n\t\tsslmode,\n\t\tcfg.DBPassword,\n\t\tcfg.DBHostname,\n\t\tcfg.DBPort,\n\t)\n\n\treturn sql.Open(\"postgres\", dbstring)\n}", "func GetConnection(dsn string) (*sql.DB, error) {\n\tif dsn == \"\" {\n\t\tdsn = GetDSN()\n\t}\n\treturn sql.Open(\"goracle\", dsn)\n}", "func Open() (DB, error) {\n\tif config.Bridge.DB.Driver != \"sqlserver\" {\n\t\treturn nil, errors.New(\"only sqlserver is supported at the moment\")\n\t}\n\n\tquery := url.Values{}\n\tquery.Add(\"database\", config.Bridge.DB.Name)\n\n\tconnURL := &url.URL{\n\t\tScheme: \"sqlserver\",\n\t\tUser: url.UserPassword(config.Bridge.DB.User, config.Bridge.DB.Password),\n\t\tHost: fmt.Sprintf(\"%s:%d\", config.Bridge.DB.Host, config.Bridge.DB.Port),\n\t\tRawQuery: query.Encode(),\n\t}\n\n\tdbConn, err := sql.Open(config.Bridge.DB.Driver, connURL.String())\n\tif err != nil {\n\t\treturn nil, errors.Wrap(err, \"could not create db connection pool\")\n\t}\n\n\tif err := dbConn.Ping(); err != nil {\n\t\tdbConn.Close()\n\t\treturn nil, errors.Wrap(err, \"could not connect to database server\")\n\t}\n\treturn &db{dbConn}, nil\n}", "func ConnectMysqlDB() (interface{}, error) {\n\tconfig := NewConfig()\n\tdb, err := gorm.Open(\"mysql\", config.Mysql.User+\":\"+config.Mysql.Password+\"@\"+config.Mysql.TCP+\"/\"+config.Mysql.DBName)\n\treturn db, err\n}", "func (w *dbWrapper) open() error {\n\tdsn, err := w.provider.dsn(w.Settings)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tw.db, err = sql.Open(w.provider.driver(), dsn)\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"can't open database\")\n\t}\n\n\treturn nil\n}", "func OpenConnectionDB() (*sql.DB, error) {\n\tdb, err := sql.Open(\"mysql\", connectionString)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn db, nil\n}", "func Connect() *gorm.DB {\n\tURL := fmt.Sprintf(\"%s:%s@tcp(%s:%d)/%s?charset=utf8&parseTime=True&loc=Local\", DB_USER, DB_PASS, DB_HOST, DB_PORT, DB_NAME)\n\tdb, err := gorm.Open(\"mysql\", URL)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t\treturn nil\n\t}\n\treturn db\n}", "func Open() *sql.DB {\n\tusername := \"root\"\n\tpassword := \"DB_PASSWORD\"\n\tprotocol := \"tcp\"\n\taddress := \"mysql\"\n\tport := \"3306\"\n\tdbname := \"leafme\"\n\tdb, _ := sql.Open(\"mysql\",\n\t\tusername+\":\"+password+\"@\"+protocol+\"(\"+address+\":\"+port+\")/\"+dbname+\"?parseTime=true\")\n\treturn db\n}", "func connectDB(cfg *config.DB) error{\n\turi := fmt.Sprintf(\"%s:%s@tcp(%s)/%s?charset=utf8&parseTime=True\", cfg.User, cfg.Password, cfg.Address, cfg.Name)\n\tconn, err := gorm.Open(dialect, uri)\n\tif err != nil{\n\t\treturn err\n\t}\n\tdefaultDB = &DB{conn}\n\tdefaultDB.DB.DB().SetMaxIdleConns(cfg.MaxIdleConn)\n\tdefaultDB.DB.DB().SetMaxOpenConns(cfg.MaxOpenConn)\n\tdefaultDB.DB.DB().SetConnMaxLifetime(cfg.MaxConnLifetime)\n\tdefaultDB.DB.LogMode(cfg.Debug)\n\n\treturn nil\n}", "func initMysql(driverName string, initConnectionTimeout time.Duration) string {\n\tmysqlConfig := client.CreateMySQLConfig(\n\t\t\"root\",\n\t\tgetStringConfig(mysqlServiceHost),\n\t\tgetStringConfig(mysqlServicePort),\n\t\t\"\")\n\n\tvar db *sql.DB\n\tvar err error\n\tvar operation = func() error {\n\t\tdb, err = sql.Open(driverName, mysqlConfig.FormatDSN())\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\t}\n\tb := backoff.NewExponentialBackOff()\n\tb.MaxElapsedTime = initConnectionTimeout\n\terr = backoff.Retry(operation, b)\n\n\tdefer db.Close()\n\tutil.TerminateIfError(err)\n\n\t// Create database if not exist\n\toperation = func() error {\n\t\t_, err = db.Exec(fmt.Sprintf(\"CREATE DATABASE IF NOT EXISTS %s\", dbName))\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\t}\n\tb = backoff.NewExponentialBackOff()\n\tb.MaxElapsedTime = initConnectionTimeout\n\terr = backoff.Retry(operation, b)\n\n\tutil.TerminateIfError(err)\n\tmysqlConfig.DBName = dbName\n\treturn mysqlConfig.FormatDSN()\n}", "func InitDB(dataSource string) {\n\tvar err error\n\tDB, err = sqlx.Connect(\"mysql\", dataSource)\n\tif err != nil {\n\t\tlog.Panic(err)\n\t}\n\n\tif err = DB.Ping(); err != nil {\n\t\tlog.Panic(err)\n\t}\n\tDB.SetMaxOpenConns(1000)\n}", "func ConnectDB() {\n\ttcpString := \"@tcp(\" + Config.mysql + \")\"\n\tdb, err := sql.Open(\"mysql\", \"root:\"+tcpString+\"/\"+Config.dbName)\n\n\tif err != nil {\n\t\tlog.Panic(err)\n\t}\n\n\tConnection.Db = db\n\n\terr = Connection.Db.Ping()\n\n\tif err != nil {\n\t\tlog.Panic(err)\n\t}\n}", "func (d Database) DSN() string {\n\treturn fmt.Sprintf(\"server=%s;user id=%s;password=%s;port=%d;database=master\", d.Server, d.User, d.Password, d.Port)\n}", "func dbConn() (db *sql.DB) {\r\n\tdbDriver := \"mysql\"\r\n\tdbUser := \"root\"\r\n\tdbPass := \"\"\r\n\tdbName := \"golang\"\r\n\tdb, err := sql.Open(dbDriver, dbUser+\":\"+dbPass+\"@/\"+dbName)\r\n\tif err != nil {\r\n\t\tpanic(err.Error())\r\n\t}\r\n\treturn db\r\n}", "func Connect() *sql.DB {\n\n\tvar connStr string\n\n\tif os.Getenv(\"mode\") == \"dev\" {\n\t\tconnStr = \"root\" + \"@tcp(\" + \"127.0.0.1:3306\" + \")/\" + \"analasia\"\n\t} else {\n\t\tconnStr = os.Getenv(\"DATABASE_USER\") + \":\" + os.Getenv(\"DATABASE_PASSWORD\") + \"@tcp(\" + os.Getenv(\"DATABASE_HOST\") + \")/\" + os.Getenv(\"DATABASE_NAME\")\n\t}\n\tconn, err := sql.Open(\"mysql\", connStr)\n\tif err != nil {\n\t\tpanic(err.Error())\n\t}\n\treturn conn\n\n}", "func Dbcon() (db *sql.DB, err error) {\n\tdb, err = sql.Open(\"mysql\", \"mremmalex:password@tcp(localhost:3306)/backendtest\")\n\treturn db, err\n}", "func (c Connector) Connect() (db *sql.DB) {\n\tif c.db != nil {\n\t\tlevel.Info(c.log).Log(\"msg\", \"returning mysql\")\n\t\treturn db\n\t}\n\tlevel.Info(c.log).Log(\"msg\", \"connecting to mysql\")\n\tlevel.Info(c.log).Log(\"msg\", \"returning connection\")\n\t_ = c.config.dbdriver\n\tdbUser := c.config.dbUser\n\tdbHost := c.config.dbHost\n\tdbPass := c.config.dbPassword\n\tdbName := c.config.dbDatabase\n\tdbPort := c.config.dbPort\n\n\tdb, err := sql.Open(\"mysql\", dbUser+\":\"+dbPass+\"@(\"+dbHost+\":\"+dbPort+\")/\"+dbName)\n\tif err != nil {\n\t\tpanic(err.Error())\n\t}\n\tc.db = db\n\tc.db.SetMaxIdleConns(10)\n\tc.db.SetMaxOpenConns(10)\n\tc.db.SetConnMaxLifetime(time.Duration(360))\n\treturn c.db\n\n}", "func dbConn() (db *sql.DB) {\n\tdbDriver := \"mysql\"\n\tdbUser := \"root\"\n\tdbpass := \"Green2013[]\"\n\tdbName := \"goblog\"\n\n\tdb, err := sql.Open(dbDriver, dbUser+\":\"+dbpass+\"@/\"+dbName)\n\tif err != nil {\n\t\tpanic(err.Error())\n\t}\n\n\treturn db\n\n}", "func connectForTesting(_ *testing.T) (*gorm.DB, error) {\n\turl := fmt.Sprintf(\"%s:%s@tcp(%s:%d)/test?charset=utf8mb4&parseTime=True&loc=Local\", *dbUser, *dbPassword, *dbHost, *dbPort)\n\treturn gorm.Open(\"mysql\", url)\n}", "func Open() {\n\tDB, err = sqlx.Connect(\"mysql\", \"root:root@tcp(db)/Scout_DB\")\n\tif err != nil {\n\t\tpanic(err)\n\t}\n}", "func Connect() (*gorm.DB, error) {\n\n\tdb, err := gorm.Open(mysql.Open(config.DBURL), &gorm.Config{})\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn db, nil\n}", "func Open(driverName, dataSourceNames string) (db *DB, err error) {\n\tdsns := strings.Split(dataSourceNames, \";\")\n\tif len(dsns) < 2 {\n\t\treturn nil, fmt.Errorf(\"At least one master and worker DB are required\")\n\t}\n\n\tdb = &DB{\n\t\tdriverName: driverName,\n\t\tconnInfo: make(map[uint32]string),\n\t\tsick: make(map[uint32]struct{}),\n\t\tresurrect: make(map[uint32]*sql.DB),\n\t}\n\n\tmaster, err := sql.Open(driverName, dsns[0])\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar workers []*sql.DB\n\tfor i, dsn := range dsns[1:] {\n\t\tworker, err := sql.Open(driverName, dsn)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tdb.connInfo[uint32(i)] = dsn\n\t\tworkers = append(workers, worker)\n\t}\n\n\treturn db.newDB(master, workers...)\n}", "func Connect(driver, dsn string) (*sqlx.DB, error) {\n\turl := fmt.Sprintf(\"%s://%s\", driver, dsn)\n\n\t// use synchronous versions of migration functions ...\n\tallErrs, ok := migrate.UpSync(url, \"../migrate\")\n\tif !ok {\n\t\tlog.Println(allErrs)\n\t\treturn nil, errors.New(\"Migration Error\")\n\t}\n\n\treturn sqlx.Open(driver, url)\n}", "func (bdm *MySQLDBManager) getConnection() (*sql.DB, error) {\n\n\tif !bdm.openedConn {\n\t\treturn nil, errors.New(\"Connection was not inited\")\n\t}\n\n\tif bdm.conn != nil {\n\t\treturn bdm.conn, nil\n\t}\n\n\tdb, err := sql.Open(\"mysql\", bdm.Config.GetMySQLConnString())\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\t//db.SetMaxOpenConns(2)\n\tdb.SetMaxIdleConns(2)\n\n\tbdm.conn = db\n\n\treturn db, nil\n}", "func New(dsn string) (*Client, error) {\n\tdb, err := gorm.Open(mysql.Open(dsn), &gorm.Config{})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tdb = db.Debug()\n\n\treturn &Client{db}, nil\n}", "func (p *DSNProvider) openDB() (*sql.DB, error) {\n\tdb, err := sql.Open(p.DriverName, p.DSN)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tidle := p.MaxIdleConns\n\tif idle == 0 {\n\t\tidle = DefaultMaxIdleConns\n\t}\n\tdb.SetMaxIdleConns(idle)\n\n\topen := p.MaxOpenConns\n\tif open == 0 {\n\t\topen = DefaultMaxOpenConns\n\t}\n\tdb.SetMaxOpenConns(open)\n\n\tttl := p.MaxConnLifetime\n\tif ttl == 0 {\n\t\tttl = DefaultMaxConnLifetime\n\t}\n\tdb.SetConnMaxLifetime(ttl)\n\n\treturn db, nil\n}", "func DbConn() (db *sql.DB) {\n\t//var host = \"tcp(192.168.0.14)\"\n\tvar host = \"tcp(192.168.0.12)\"\n\t// var host = \"tcp(127.0.0.1)\"\n\tdbname := \"dbaexperience\"\n\tdb, err := sql.Open(config.DbDriver, fmt.Sprintf(\"%s:%s@%s/%s\", config.DbUser, config.DbPass, host, dbname))\n\tif err != nil {\n\t\tpanic(err.Error())\n\t}\n\treturn db\n}" ]
[ "0.7246999", "0.71910393", "0.7179003", "0.717448", "0.71717465", "0.71652126", "0.71652126", "0.71652126", "0.71397305", "0.7136143", "0.71265143", "0.7082557", "0.6925274", "0.69121253", "0.68631303", "0.6847047", "0.6728924", "0.67062306", "0.667542", "0.6663505", "0.65856075", "0.6516379", "0.6491855", "0.6478218", "0.6421544", "0.6406693", "0.640536", "0.638332", "0.63702196", "0.6363853", "0.6353147", "0.63315624", "0.6331151", "0.63281685", "0.6324971", "0.63156503", "0.6309845", "0.62549365", "0.6240661", "0.6236031", "0.62349737", "0.62088734", "0.62064725", "0.6202035", "0.6196566", "0.61865866", "0.61418396", "0.61319345", "0.61270285", "0.61109877", "0.6052711", "0.604955", "0.60448647", "0.60085523", "0.6008205", "0.59962934", "0.5984292", "0.59805447", "0.59701633", "0.5962182", "0.5953714", "0.5953082", "0.5945932", "0.5945515", "0.5944769", "0.59384674", "0.5931152", "0.59294575", "0.592931", "0.591874", "0.591678", "0.5915397", "0.5914635", "0.5910011", "0.5910011", "0.5907372", "0.59030885", "0.58979076", "0.5886836", "0.5884415", "0.58843994", "0.588428", "0.5877073", "0.5867192", "0.5864963", "0.5855724", "0.58541024", "0.5848585", "0.58403856", "0.5833178", "0.58300304", "0.5818768", "0.58187085", "0.5815941", "0.5810737", "0.5809092", "0.5800028", "0.5798278", "0.57913953", "0.57881916", "0.5782971" ]
0.0
-1
Updates settings for a FlexMatch matchmaking configuration. These changes affect all matches and game sessions that are created after the update. To update settings, specify the configuration name to be updated and provide the new settings. Learn more Design a FlexMatch matchmaker (
func (c *Client) UpdateMatchmakingConfiguration(ctx context.Context, params *UpdateMatchmakingConfigurationInput, optFns ...func(*Options)) (*UpdateMatchmakingConfigurationOutput, error) { if params == nil { params = &UpdateMatchmakingConfigurationInput{} } result, metadata, err := c.invokeOperation(ctx, "UpdateMatchmakingConfiguration", params, optFns, c.addOperationUpdateMatchmakingConfigurationMiddlewares) if err != nil { return nil, err } out := result.(*UpdateMatchmakingConfigurationOutput) out.ResultMetadata = metadata return out, nil }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (runner *McRunner) applySettings() {\n\tpropPath := filepath.Join(McServerPath(), \"server.properties\")\n\tprops, err := ioutil.ReadFile(propPath)\n\n\tif err != nil {\n\t\tfmt.Println(err)\n\t\treturn\n\t}\n\n\tnameExp, _ := regexp.Compile(\"displayname=.*\\\\n\")\n\tmotdExp, _ := regexp.Compile(\"motd=.*\\\\n\")\n\tmaxPlayersExp, _ := regexp.Compile(\"max-players=.*\\\\n\")\n\tportExp, _ := regexp.Compile(\"server-port=.*\\\\n\")\n\n\tname := fmt.Sprintf(\"displayname=%s\\n\", runner.Settings.Name)\n\tmotd := fmt.Sprintf(\"motd=%s\\n\", runner.Settings.MOTD)\n\tmaxPlayers := fmt.Sprintf(\"max-players=%d\\n\", runner.Settings.MaxPlayers)\n\tport := fmt.Sprintf(\"server-port=%d\\n\", runner.Settings.Port)\n\n\tnewProps := strings.Replace(string(props), nameExp.FindString(string(props)), name, 1)\n\tnewProps = strings.Replace(newProps, motdExp.FindString(newProps), motd, 1)\n\tnewProps = strings.Replace(newProps, maxPlayersExp.FindString(newProps), maxPlayers, 1)\n\tnewProps = strings.Replace(newProps, portExp.FindString(newProps), port, 1)\n\n\terr = ioutil.WriteFile(propPath, []byte(newProps), 0644)\n\n\tif err != nil {\n\t\tfmt.Println(err)\n\t\treturn\n\t}\n}", "func (fm *FakeManager) UpdateConfiguration(throttlerName string, configuration *throttlerdatapb.Configuration, copyZeroValues bool) ([]string, error) {\n\tpanic(panicMsg)\n}", "func (x *Rest) ConfigurationUpdate(w http.ResponseWriter, r *http.Request,\n\tparams httprouter.Params) {\n\tdefer panicCatcher(w)\n\n\trequest := msg.New(r, params)\n\trequest.Section = msg.SectionConfiguration\n\trequest.Action = msg.ActionUpdate\n\n\tswitch request.Version {\n\tcase msg.ProtocolOne:\n\t\tcReq := &v1.ConfigurationItem{}\n\t\tif err := decodeJSONBody(r, cReq); err != nil {\n\t\t\tx.replyUnprocessableEntity(&w, &request, err)\n\t\t\treturn\n\t\t}\n\t\trequest.Configuration = v2.ConfigurationFromV1(cReq)\n\n\tcase msg.ProtocolTwo:\n\t\tcReq := v2.NewConfigurationRequest()\n\t\tif err := decodeJSONBody(r, &cReq); err != nil {\n\t\t\tx.replyUnprocessableEntity(&w, &request, err)\n\t\t\treturn\n\t\t}\n\t\trequest.Configuration = *cReq.Configuration\n\n\t\t// only the v2 API has request flags\n\t\tif err := resolveFlags(&cReq, &request); err != nil {\n\t\t\tx.replyBadRequest(&w, &request, err)\n\t\t\treturn\n\t\t}\n\n\tdefault:\n\t\tx.replyInternalError(&w, &request, nil)\n\t\treturn\n\t}\n\n\trequest.Configuration.InputSanatize()\n\trequest.LookupHash = calculateLookupID(\n\t\trequest.Configuration.HostID,\n\t\trequest.Configuration.Metric,\n\t)\n\trequest.Configuration.LookupID = request.LookupHash\n\n\tif request.Configuration.ID != strings.ToLower(params.ByName(`ID`)) {\n\t\tx.replyBadRequest(&w, &request, fmt.Errorf(\n\t\t\t\"Mismatched IDs in update: [%s] vs [%s]\",\n\t\t\trequest.Configuration.ID,\n\t\t\tstrings.ToLower(params.ByName(`ID`)),\n\t\t))\n\t}\n\n\tif _, err := uuid.FromString(request.Configuration.ID); err != nil {\n\t\tx.replyBadRequest(&w, &request, err)\n\t\treturn\n\t}\n\n\tx.somaSetFeedbackURL(&request)\n\n\tif !x.isAuthorized(&request) {\n\t\tx.replyForbidden(&w, &request, nil)\n\t\treturn\n\t}\n\n\thandler := x.handlerMap.Get(`configuration_w`)\n\thandler.Intake() <- request\n\tresult := <-request.Reply\n\tx.respond(&w, &result)\n}", "func updateConfig() {\n\t// going to v1 apply these changes\n\tif instance.CfgVersion < 1 {\n\t\t// new known mod extension\n\t\tif !strings.Contains(instance.ModExtensions, \".pke\") {\n\t\t\tinstance.ModExtensions = instance.ModExtensions + \".pke\"\n\t\t}\n\t\t// additional known iwads\n\t\tinstance.IWADs = append(instance.IWADs, \"boa.ipk3\", \"plutonia.wad\", \"tnt.wad\", \"heretic.wad\")\n\t}\n\n\t// v2\n\tif instance.CfgVersion < 2 {\n\t\tif !strings.Contains(instance.ModExtensions, \".zip\") {\n\t\t\tinstance.ModExtensions = instance.ModExtensions + \".zip\"\n\t\t}\n\t}\n\n\tinstance.CfgVersion = CFG_VERSION\n\tgo Persist()\n}", "func UpdateConfig(config map[string]interface{}, runtimeName string, runtimePath string, setAsDefault bool) error {\n\t// Read the existing runtimes\n\truntimes := make(map[string]interface{})\n\tif _, exists := config[\"runtimes\"]; exists {\n\t\truntimes = config[\"runtimes\"].(map[string]interface{})\n\t}\n\n\t// Add / update the runtime definitions\n\truntimes[runtimeName] = map[string]interface{}{\n\t\t\"path\": runtimePath,\n\t\t\"args\": []string{},\n\t}\n\n\t// Update the runtimes definition\n\tif len(runtimes) > 0 {\n\t\tconfig[\"runtimes\"] = runtimes\n\t}\n\n\tif setAsDefault {\n\t\tconfig[\"default-runtime\"] = runtimeName\n\t}\n\n\treturn nil\n}", "func (*XMLDocument) UpdateSettings() {\n\tmacro.Rewrite(\"$_.updateSettings()\")\n}", "func updateConfig(w http.ResponseWriter, r *http.Request, updateUrl string) {\n\tnewGenTimeout, _ := strconv.Atoi(r.FormValue(\"generate_timeout\"))\n\tif newGenTimeout > 0 {\n\t\tchain.GetServerChain().SetGenerationTimeout(newGenTimeout)\n\t\tviper.Set(\"server_chain.block.generation.timeout\", newGenTimeout)\n\t}\n\tnewTxnWaitTime, _ := strconv.Atoi(r.FormValue(\"txn_wait_time\"))\n\tif newTxnWaitTime > 0 {\n\t\tchain.GetServerChain().SetRetryWaitTime(newTxnWaitTime)\n\t\tviper.Set(\"server_chain.block.generation.retry_wait_time\", newTxnWaitTime)\n\t}\n\tw.Header().Set(\"Content-Type\", \"text/html;charset=UTF-8\")\n\tfmt.Fprintf(w, \"<form action='%s' method='post'>\", updateUrl)\n\tfmt.Fprintf(w, \"Generation Timeout (time till a miner makes a block with less than max blocksize): <input type='text' name='generate_timeout' value='%v'><br>\", viper.Get(\"server_chain.block.generation.timeout\"))\n\tfmt.Fprintf(w, \"Retry Wait Time (time miner waits if there aren't enough transactions to reach max blocksize): <input type='text' name='txn_wait_time' value='%v'><br>\", viper.Get(\"server_chain.block.generation.retry_wait_time\"))\n\tfmt.Fprintf(w, \"<input type='submit' value='Submit'>\")\n\tfmt.Fprintf(w, \"</form>\")\n}", "func NewMatchConfig() *MatchConfig {\n\treturn &MatchConfig{\n\t\tInputQueueSize: 128,\n\t\tCallQueueSize: 128,\n\t\tJoinAttemptQueueSize: 128,\n\t\tDeferredQueueSize: 128,\n\t\tJoinMarkerDeadlineMs: 15000,\n\t\tMaxEmptySec: 0,\n\t}\n}", "func (h *WLSHandler) UpdateConfiguration(config *config.Configuration) {\n\th.lock.Lock()\n\tdefer h.lock.Unlock()\n\th.targetConfig = config\n}", "func (b *Backend) ApplyConfiguration(config gw.GatewayConfiguration) error {\n\tfor i := range config.Channels {\n\t\tloRaModConfig := config.Channels[i].GetLoraModulationConfig()\n\t\tif loRaModConfig != nil {\n\t\t\tloRaModConfig.Bandwidth = loRaModConfig.Bandwidth * 1000\n\t\t}\n\n\t\tfskModConfig := config.Channels[i].GetFskModulationConfig()\n\t\tif fskModConfig != nil {\n\t\t\tfskModConfig.Bandwidth = fskModConfig.Bandwidth * 1000\n\t\t}\n\t}\n\n\tlog.WithFields(log.Fields{\n\t\t\"version\": config.Version,\n\t}).Info(\"backend/concentratord: forwarding configuration command\")\n\n\t_, err := b.commandRequest(\"config\", &config)\n\tif err != nil {\n\t\tlog.WithError(err).Fatal(\"backend/concentratord: send configuration command error\")\n\t}\n\n\tcommandCounter(\"config\").Inc()\n\n\treturn nil\n}", "func (api *API) UpdateConfig(request *restful.Request, response *restful.Response) {\n\n\t// ToDo: check url name matches body name\n\n\tparams := request.PathParameters()\n\tk, err := setup(params)\n\tif err != nil {\n\t\tapi.writeError(http.StatusBadRequest, err.Error(), response)\n\t\treturn\n\t}\n\n\tconfig := &Config{}\n\tif err = request.ReadEntity(config); err != nil {\n\t\tapi.writeError(http.StatusBadRequest, err.Error(), response)\n\t\treturn\n\t}\n\n\tif err = config.ParseSpec(); err != nil {\n\t\tapi.writeError(http.StatusBadRequest, err.Error(), response)\n\t\treturn\n\t}\n\n\tglog.V(2).Infof(\"Updating config in Istio registry: key %+v, config %+v\", k, config)\n\n\t// TODO: incorrect use with new registry\n\tif _, err = api.registry.Put(config.ParsedSpec, \"\"); err != nil {\n\t\tswitch err.(type) {\n\t\tcase *model.ItemNotFoundError:\n\t\t\tapi.writeError(http.StatusNotFound, err.Error(), response)\n\t\tdefault:\n\t\t\tapi.writeError(http.StatusInternalServerError, err.Error(), response)\n\t\t}\n\t\treturn\n\t}\n\tglog.V(2).Infof(\"Updated config to %+v\", config)\n\tif err = response.WriteHeaderAndEntity(http.StatusOK, config); err != nil {\n\t\tapi.writeError(http.StatusInternalServerError, err.Error(), response)\n\t}\n}", "func (settings *Settings) Update(newSettings *Settings) {\n\tif settings == nil {\n\t\treturn\n\t}\n\tif newSettings.ViewsPath == \"\" {\n\t\tsettings.ViewsPath = \".\"\n\t} else {\n\t\tsettings.ViewsPath = newSettings.ViewsPath\n\t}\n\tif newSettings.StaticPath == \"\" {\n\t\tsettings.StaticPath = \".\"\n\t} else {\n\t\tsettings.StaticPath = newSettings.StaticPath\n\t}\n\tif newSettings.ViewExtension == \"\" {\n\t\tsettings.ViewExtension = \"html\"\n\t} else {\n\t\tsettings.ViewExtension = newSettings.ViewExtension\n\t}\n}", "func Update(section, option, value string) {\n\tcfg.Update(section, option, value)\n}", "func changeSettings(w http.ResponseWriter, r *http.Request) {\n\tfmt.Println(\"trying to change settings\")\n\tif err := r.ParseForm(); err != nil {\n\t\tfmt.Println(err)\n\t}\n}", "func (m *Microservice) UpdateApplicationConfiguration(configAsString string) {\n\tzap.L().Info(\"Updating application configuration\")\n\titems := strings.Split(configAsString, \"\\n\")\n\n\tfor _, item := range items {\n\t\tzap.S().Infof(\"Parsing configuration item: %s\", item)\n\t\tparts := strings.Split(item, \"=\")\n\n\t\tif len(parts) == 2 {\n\t\t\tkey := strings.TrimSpace(parts[0])\n\t\t\tvalue := strings.TrimSpace(parts[1])\n\n\t\t\tif m.Config.isPrivateSetting(key) {\n\t\t\t\tzap.S().Infof(\"Ignoring private property [%s]\", key)\n\t\t\t} else if strings.HasPrefix(key, \"#\") {\n\t\t\t\tzap.S().Infof(\"Ignore comment [%s]\", key)\n\t\t\t} else {\n\t\t\t\tzap.S().Infof(\"Setting property [%s] to [%s]\", key, value)\n\t\t\t\tm.Config.viper.Set(key, value)\n\t\t\t}\n\t\t} else {\n\t\t\tzap.L().Info(\"Checking item\")\n\t\t}\n\t}\n}", "func (s *Server) SyncWithConfiguration(cfg remote.ServerConfigurationResponse) error {\n\tc := Configuration{\n\t\tCrashDetectionEnabled: config.Get().System.CrashDetection.CrashDetectionEnabled,\n\t}\n\tif err := json.Unmarshal(cfg.Settings, &c); err != nil {\n\t\treturn errors.WithStackIf(err)\n\t}\n\n\ts.cfg.mu.Lock()\n\tdefer s.cfg.mu.Unlock()\n\n\t// Lock the new configuration. Since we have the deferred Unlock above we need\n\t// to make sure that the NEW configuration object is already locked since that\n\t// defer is running on the memory address for \"s.cfg.mu\" which we're explicitly\n\t// changing on the next line.\n\tc.mu.Lock()\n\n\t//goland:noinspection GoVetCopyLock\n\ts.cfg = c\n\n\ts.Lock()\n\ts.procConfig = cfg.ProcessConfiguration\n\ts.Unlock()\n\n\treturn nil\n}", "func (c *Configs) setConfig(confName configName, value *ConfigSettings) {\n\tassert(len(confName) == len(c.ConfigVariables))\n\tassert(value != nil)\n\tkey := confName.key()\n\tpair, ok := c.byConfig[key]\n\tassert(!ok, \"setConfig must not override existing keys (%s => %v)\", key, pair.value)\n\tc.byConfig[key] = configPair{confName, value}\n}", "func UpdateConfig(conf caddy.Config, host string) error {\n\tu := fmt.Sprintf(\"http://%s:2019/load\", host)\n\n\td, err := json.Marshal(conf)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"failed to json.Marshal: %w\", err)\n\t}\n\n\treq, err := http.NewRequest(http.MethodPost, u, bytes.NewBuffer(d))\n\tif err != nil {\n\t\treturn fmt.Errorf(\"failed to http.NewRequest: %w\", err)\n\t}\n\n\treq.Header.Add(\"Cache-Control\", \"must-revalidate\")\n\treq.Header.Add(\"Content-Type\", \"application/json\")\n\n\tclient := new(http.Client)\n\tresp, err := client.Do(req)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"failed to HTTP POST: %w\", err)\n\t}\n\tdefer resp.Body.Close()\n\n\tb, err := io.ReadAll(resp.Body)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"failed to read response body: %w\", err)\n\t}\n\n\tif resp.StatusCode >= 400 {\n\t\treturn fmt.Errorf(\"invalid status code (code: %d, body: %s)\", resp.StatusCode, b)\n\t}\n\n\tklog.Info(\"Update successfully!\")\n\n\treturn nil\n}", "func (d *GossipSystem) ConfigUpdate(key string, val string) error {\n\tswitch key {\n\tcase \"report_period_ms\":\n\t\tperiod, err := strconv.ParseInt(val, 10, 64)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\td.server.collector.UpdateReportPeriod(time.Duration(period) * time.Millisecond)\n\t\treturn nil\n\tdefault:\n\t\tglog.Warningf(\"unsupport key: %v\", key)\n\t\treturn errors.New(\"unsupport key\")\n\t}\n}", "func (c *Config) Update(ncfg Config) {\n\tconfigLock.Lock()\n\tdefer configLock.Unlock()\n\n\tc.Enable = ncfg.Enable\n\tc.Frequency = ncfg.Frequency\n}", "func UpdateConfiguration(haproxyConfiguration *HaproxyConfiguration, loadBalancer *ActivityLoadBalancer) error {\n\tfrontendAttributes := map[string]common.ParserData{}\n\tfrontendAttributes[\"mode\"] = configStringC(\"http\")\n\tfrontendAttributes[\"bind\"] = &types.Bind{Path: \"0.0.0.0:8080\"}\n\tfrontendAttributes[\"log-format\"] = configStringC(\"httplog %Ts %ci %cp %si %sp %Tq %Tw %Tc %Tr %Tt %ST %U %B %f %b %s %ts %r %hrl\")\n\tfrontendAttributes[\"log\"] = &types.Log{Address: \"/var/lib/load-balancer-servo/haproxy.sock\", Facility: \"local2\", Level: \"info\"}\n\tfrontendAttributes[\"option forwardfor\"] = &types.OptionForwardFor{Except: \"127.0.0.1\"}\n\tfrontendAttributes[\"timeout client\"] = &types.SimpleTimeout{Value: \"60s\"}\n\tfrontendAttributes[\"default_backend\"] = configStringC(\"backend-http-8080\")\n\tfrontendAttributes[\"http-request\"] = []types.HTTPAction{\n\t\t&actions.SetHeader{Name: \"X-Forwarded-Proto\", Fmt: \"http\"},\n\t\t&actions.SetHeader{Name: \"X-Forwarded-Port\", Fmt: \"8080\"},\n\t\t//TODO syntax not supported by haproxy 1.5\n\t\t// &actions.Capture{Sample: \"hdr(User-Agent)\", Len: configInt64(8192)},\n\t}\n\terr := UpdateConfigurationSection(haproxyConfiguration, parser.Frontends, \"http-8080\", frontendAttributes)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tbackendAttributes := map[string]common.ParserData{}\n\tbackendAttributes[\"mode\"] = configStringC(\"http\")\n\tbackendAttributes[\"balance\"] = &types.Balance{\"roundrobin\", nil, \"\"}\n\tbackendAttributes[\"http-response\"] = &actions.SetHeader{Name: \"Cache-control\", Fmt: `no-cache=\"set-cookie\"`}\n\tbackendAttributes[\"cookie\"] = &types.Cookie{Name: \"AWSELB\", Type: \"insert\", Indirect: true, Maxidle: 300000, Maxlife: 300000}\n\tbackendAttributes[\"server\"] = []types.Server{{Name: \"http-8080\", Address: \"10.111.10.215:8080\", Params: []params.ServerOption{&params.ServerOptionValue{Name: \"cookie\", Value: \"MTAuMTExLjEwLjIxNQ==\"}}}}\n\tbackendAttributes[\"timeout server\"] = &types.SimpleTimeout{Value: \"60s\"}\n\terr = UpdateConfigurationSection(haproxyConfiguration, parser.Backends, \"backend-http-8080\", backendAttributes)\n\treturn err\n}", "func (sc *Config) UpdateConfig() {\n\tviper.Set(sc.confServerNameKey(), sc.Name)\n\tviper.Set(sc.confServerTypeKey(), sc.ServerType)\n\tviper.Set(sc.confLocationNameKey(), sc.LocationName)\n\tviper.Set(sc.confImageNameKey(), sc.ImageName)\n\tviper.Set(sc.confSSKPublicKeyID(), sc.SSHPublicKeyID)\n\tviper.Set(sc.confRoles(), sc.Roles)\n\n\tif sc.ID != 0 {\n\t\tviper.Set(sc.confIDKey(), sc.ID)\n\t}\n\n\tif sc.PublicIP != \"\" {\n\t\tviper.Set(sc.confPublicIPKey(), sc.PublicIP)\n\t}\n\n\tif sc.PrivateIP != \"\" {\n\t\tviper.Set(sc.confPrivateIPKey(), sc.PrivateIP)\n\t}\n\n\tif sc.RootPassword != \"\" {\n\t\tviper.Set(sc.confRootPasswordKey(), sc.RootPassword)\n\t}\n}", "func SetConfig(settings *config.Settings) {\n cfg = settings\n}", "func (d *StaticSystem) ConfigUpdate(key string, val string) error {\n\tswitch key {\n\tcase \"report_period_ms\":\n\t\t// update myself\n\t\tperiod, err := strconv.ParseInt(val, 10, 64)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\td.server.collector.UpdateReportPeriod(time.Duration(period) * time.Millisecond)\n\t\treturn nil\n\tdefault:\n\t\tglog.Warningf(\"unsupport key: %v\", key)\n\t\treturn errors.New(\"unsupport key\")\n\t}\n}", "func (c *CheckpointAdvancer) UpdateConfig(newConf config.Config) {\n\tneedRefreshCache := newConf.AdvancingByCache != c.cfg.AdvancingByCache\n\tc.cfg = newConf\n\tif needRefreshCache {\n\t\tif c.cfg.AdvancingByCache {\n\t\t\tc.enableCache()\n\t\t} else {\n\t\t\tc.disableCache()\n\t\t}\n\t}\n}", "func setconfig(config *driver.Driver, run *paramstudy.ConfigMod, dir string) error {\n\n\t// For debugging\n\tconfig.Options.ExtIter = 10\n\n\t// Most of the options are straightforward\n\tconfig.Options.Aoa = run.Aoa\n\tconfig.Options.CflNumber = run.CFL\n\tconfig.Options.Mglevel = uint16(run.Mglevel) // cast Mglevel as a uint16\n\tconfig.Options.LinearSolverIter = uint64(run.LinSolveIter)\n\tconfig.Options.LimiterCoeff = run.Limiter\n\n\t// The mesh file lives in a specific location, so we have to set the value\n\t// relative to the new directory.\n\tmeshDir, err := filepath.Rel(dir, filepath.Join(paramstudy.MeshDir, run.Mesh))\n\tif err != nil {\n\t\treturn err\n\t}\n\tconfig.Options.MeshFilename = meshDir\n\treturn nil\n}", "func (self *PhysicsP2) SetConfigA(member interface{}) {\n self.Object.Set(\"config\", member)\n}", "func UpdateConfig(w http.ResponseWriter, r *http.Request) {\n\tbytes, err := ioutil.ReadAll(r.Body)\n\tif err != nil {\n\t\tlog.Errorf(\"UpdateConfig failed with error: %v\", err)\n\t\tReturnHTTPError(w, r, http.StatusBadRequest, \"Bad Request, Please check the request content\")\n\t}\n\tvar authConfig model.AuthConfig\n\n\terr = json.Unmarshal(bytes, &authConfig)\n\tif err != nil {\n\t\tlog.Errorf(\"UpdateConfig unmarshal failed with error: %v\", err)\n\t\tReturnHTTPError(w, r, http.StatusBadRequest, \"Bad Request, Please check the request content\")\n\t}\n\n\tif authConfig.Provider == \"\" {\n\t\tlog.Errorf(\"UpdateConfig: Provider is a required field\")\n\t\tReturnHTTPError(w, r, http.StatusBadRequest, \"Bad Request, Please check the request content, Provider is a required field\")\n\t}\n\terr = server.UpdateConfig(authConfig)\n\tif err != nil {\n\t\tlog.Errorf(\"UpdateConfig failed with error: %v\", err)\n\t\tReturnHTTPError(w, r, http.StatusBadRequest, \"Bad Request, Please check the request content\")\n\t} else {\n\t\tlog.Debugf(\"Updated config, listing the config back\")\n\t\t//list the config and return in response\n\t\tconfig, err := server.GetConfig(\"\")\n\t\tif err == nil {\n\t\t\tw.Header().Set(\"Content-Type\", \"application/json\")\n\t\t\tjson.NewEncoder(w).Encode(config)\n\t\t} else {\n\t\t\t//failed to get the config\n\t\t\tlog.Debugf(\"GetConfig failed with error %v\", err)\n\t\t\tReturnHTTPError(w, r, http.StatusInternalServerError, \"Failed to list the config\")\n\t\t}\n\t}\n}", "func (s *Syncthing) UpdateConfig() error {\n\tbuf := new(bytes.Buffer)\n\tif err := configTemplate.Execute(buf, s); err != nil {\n\t\treturn fmt.Errorf(\"failed to write syncthing configuration template: %w\", err)\n\t}\n\n\tif err := os.WriteFile(filepath.Join(s.Home, configFile), buf.Bytes(), 0600); err != nil {\n\t\treturn fmt.Errorf(\"failed to write syncthing configuration file: %w\", err)\n\t}\n\n\treturn nil\n}", "func UpdateConfig(w http.ResponseWriter, r *http.Request) {\n\tapiContext := api.GetApiContext(r)\n\tbytes, err := ioutil.ReadAll(r.Body)\n\tif err != nil {\n\t\tlog.Errorf(\"UpdateConfig failed with error: %v\", err)\n\t\tReturnHTTPError(w, r, http.StatusBadRequest, \"Bad Request, Please check the request content\")\n\t\treturn\n\t}\n\tvar authConfig model.AuthConfig\n\n\terr = json.Unmarshal(bytes, &authConfig)\n\tif err != nil {\n\t\tlog.Errorf(\"UpdateConfig unmarshal failed with error: %v\", err)\n\t\tReturnHTTPError(w, r, http.StatusBadRequest, \"Bad Request, Please check the request content\")\n\t\treturn\n\t}\n\n\tif authConfig.Provider == \"\" {\n\t\tlog.Errorf(\"UpdateConfig: Provider is a required field\")\n\t\tReturnHTTPError(w, r, http.StatusBadRequest, \"Bad Request, Please check the request content, Provider is a required field\")\n\t\treturn\n\t}\n\n\terr = server.UpdateConfig(authConfig)\n\tif err != nil {\n\t\tlog.Errorf(\"UpdateConfig failed with error: %v\", err)\n\t\tReturnHTTPError(w, r, http.StatusBadRequest, \"Bad Request, Please check the request content\")\n\t\treturn\n\t}\n\tlog.Debugf(\"Updated config, listing the config back\")\n\n\t//list the config and return in response\n\tconfig, err := server.GetConfig(\"\", true)\n\tif err == nil {\n\t\tapiContext.Write(&config)\n\t} else {\n\t\t//failed to get the config\n\t\tlog.Debugf(\"GetConfig failed with error %v\", err)\n\t\tReturnHTTPError(w, r, http.StatusInternalServerError, \"Failed to list the config\")\n\t\treturn\n\t}\n}", "func UpdateRules(newRules []C.Rule) {\n\tconfigMux.Lock()\n\trules = newRules\n\tconfigMux.Unlock()\n}", "func updateConfigurationParameter(fieldName string, fieldValue string) (retErr error) {\n\n\tsvc := dynamodb.New(common.Sess)\n\n\t//Espressione condizionale che impone l'esistenza del parametro (Senza questa condizione, DynamoDB può creare una entry se non trova la corrispettiva chiave nel database)\n\tcond := \"attribute_exists(FieldName)\"\n\n\tinput := &dynamodb.UpdateItemInput{\n\t\tExpressionAttributeValues: map[string]*dynamodb.AttributeValue{\n\t\t\t\":v\": {\n\t\t\t\tS: aws.String(fieldValue),\n\t\t\t},\n\t\t},\n\t\tTableName: aws.String(configTable),\n\t\tKey: map[string]*dynamodb.AttributeValue{\n\t\t\t\"FieldName\": {\n\t\t\t\tS: aws.String(fieldName),\n\t\t\t},\n\t\t},\n\t\tConditionExpression: &cond,\n\t\tReturnValues: aws.String(\"UPDATED_NEW\"),\n\t\tUpdateExpression: aws.String(\"set FieldValue = :v\"),\n\t}\n\n\t//Esecuzione della query\n\t_, err := svc.UpdateItem(input)\n\tif err != nil {\n\t\tcommon.Fatal(\"[BROKER] Errore nell'aggiornamento del parametro di configurazione. \" + err.Error())\n\t\treturn err\n\t}\n\n\tcommon.Info(\"[BROKER] Parametro aggiornato\")\n\n\treturn nil\n}", "func (c *Config) Update(c2 Config) {\n\tif c2.ClientID != \"\" {\n\t\tc.ClientID = c2.ClientID\n\t}\n\tif c2.Quality != \"\" {\n\t\tc.Quality = c2.Quality\n\t}\n\tif c2.StartTime != \"\" {\n\t\tc.StartTime = c2.StartTime\n\t}\n\tif c2.EndTime != \"\" {\n\t\tc.EndTime = c2.EndTime\n\t}\n\tif c2.Length != \"\" {\n\t\tc.EndTime = c2.Length\n\t}\n\tif c2.VodID != 0 {\n\t\tc.VodID = c2.VodID\n\t}\n\tif c2.FilePrefix != \"\" {\n\t\tc.FilePrefix = c2.FilePrefix\n\t}\n\tif c2.OutputFolder != \"\" {\n\t\tc.OutputFolder = c2.OutputFolder\n\t}\n\tif c2.Workers != 0 {\n\t\tc.Workers = c2.Workers\n\t}\n}", "func (w *Worker) UpdateRelayConfig(ctx context.Context, content string) error {\n\tw.Lock()\n\tdefer w.Unlock()\n\n\tif w.closed.Get() == closedTrue {\n\t\treturn terror.ErrWorkerAlreadyClosed.Generate()\n\t}\n\n\tstage := w.relayHolder.Stage()\n\tif stage == pb.Stage_Finished || stage == pb.Stage_Stopped {\n\t\treturn terror.ErrWorkerRelayUnitStage.Generate(stage.String())\n\t}\n\n\tsts := w.subTaskHolder.getAllSubTasks()\n\n\t// Check whether subtask is running syncer unit\n\tfor _, st := range sts {\n\t\tisRunning := st.CheckUnit()\n\t\tif !isRunning {\n\t\t\treturn terror.ErrWorkerNoSyncerRunning.Generate()\n\t\t}\n\t}\n\n\t// Save configure to local file.\n\tnewCfg := NewConfig()\n\terr := newCfg.UpdateConfigFile(content)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\terr = newCfg.Reload()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif newCfg.SourceID != w.cfg.SourceID {\n\t\treturn terror.ErrWorkerCannotUpdateSourceID.Generate()\n\t}\n\n\tw.l.Info(\"update relay config\", zap.Stringer(\"new config\", newCfg))\n\tcloneCfg, _ := newCfg.DecryptPassword()\n\n\t// Update SubTask configure\n\t// NOTE: we only update `DB.Config` in SubTaskConfig now\n\tfor _, st := range sts {\n\t\tcfg := config.NewSubTaskConfig()\n\n\t\tcfg.From = cloneCfg.From\n\t\tcfg.From.Adjust()\n\n\t\tstage := st.Stage()\n\t\tif stage == pb.Stage_Paused {\n\t\t\terr = st.UpdateFromConfig(cfg)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t} else if stage == pb.Stage_Running {\n\t\t\terr = st.Pause()\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\terr = st.UpdateFromConfig(cfg)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\terr = st.Resume()\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t}\n\n\tw.l.Info(\"update relay config of subtasks successfully.\")\n\n\t// Update relay unit configure\n\terr = w.relayHolder.Update(ctx, cloneCfg)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tw.cfg.From = newCfg.From\n\tw.cfg.AutoFixGTID = newCfg.AutoFixGTID\n\tw.cfg.Charset = newCfg.Charset\n\n\tif w.cfg.ConfigFile == \"\" {\n\t\tw.cfg.ConfigFile = \"dm-worker.toml\"\n\t}\n\tcontent, err = w.cfg.Toml()\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = w.cfg.UpdateConfigFile(content)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tw.l.Info(\"update relay config successfully, save config to local file\", zap.String(\"local file\", w.cfg.ConfigFile))\n\n\treturn nil\n}", "func UpdateConfig(host string, verifyTLS bool, apiKey string, project string, config string, name string) (models.ConfigInfo, Error) {\n\tpostBody := map[string]interface{}{\"name\": name}\n\tbody, err := json.Marshal(postBody)\n\tif err != nil {\n\t\treturn models.ConfigInfo{}, Error{Err: err, Message: \"Invalid config info\"}\n\t}\n\n\tvar params []queryParam\n\tparams = append(params, queryParam{Key: \"project\", Value: project})\n\tparams = append(params, queryParam{Key: \"config\", Value: config})\n\n\turl, err := generateURL(host, \"/v3/configs/config\", params)\n\tif err != nil {\n\t\treturn models.ConfigInfo{}, Error{Err: err, Message: \"Unable to generate url\"}\n\t}\n\n\tstatusCode, _, response, err := PostRequest(url, verifyTLS, apiKeyHeader(apiKey), body)\n\tif err != nil {\n\t\treturn models.ConfigInfo{}, Error{Err: err, Message: \"Unable to update config\", Code: statusCode}\n\t}\n\n\tvar result map[string]interface{}\n\terr = json.Unmarshal(response, &result)\n\tif err != nil {\n\t\treturn models.ConfigInfo{}, Error{Err: err, Message: \"Unable to parse API response\", Code: statusCode}\n\t}\n\n\tconfigInfo, ok := result[\"config\"].(map[string]interface{})\n\tif !ok {\n\t\treturn models.ConfigInfo{}, Error{Err: fmt.Errorf(\"Unexpected type parsing config info, expected map[string]interface{}, got %T\", result[\"config\"]), Message: \"Unable to parse API response\", Code: statusCode}\n\t}\n\tinfo := models.ParseConfigInfo(configInfo)\n\treturn info, Error{}\n}", "func UpdateConfig(authConfig model.AuthConfig) error {\n\tif authConfig.Provider == \"shibbolethconfig\" {\n\t\tauthConfig.ShibbolethConfig.IDPMetadataFilePath = IDPMetadataFile\n\t\tauthConfig.ShibbolethConfig.SPSelfSignedCertFilePath = selfSignedCertFile\n\t\tauthConfig.ShibbolethConfig.SPSelfSignedKeyFilePath = selfSignedKeyFile\n\t\tauthConfig.ShibbolethConfig.RancherAPIHost = GetRancherAPIHost()\n\t}\n\n\tnewProvider, err := initProviderWithConfig(&authConfig)\n\tif err != nil {\n\t\tlog.Errorf(\"UpdateConfig: Cannot update the config, error initializing the provider %v\", err)\n\t\treturn err\n\t}\n\t//store the config to db\n\tlog.Infof(\"newProvider %v\", newProvider.GetName())\n\n\tproviderSettings := newProvider.GetSettings()\n\n\tgenObjConfig := make(map[string]map[string]string)\n\tgenObjConfig[newProvider.GetName()] = providerSettings\n\terr = updateSettings(genObjConfig, newProvider.GetProviderSecretSettings(), newProvider.GetName(), authConfig.Enabled)\n\tif err != nil {\n\t\tlog.Errorf(\"UpdateConfig: Error Storing the provider settings %v\", err)\n\t\treturn err\n\t}\n\n\t//add the generic settings\n\tcommonSettings := make(map[string]string)\n\tcommonSettings[accessModeSetting] = authConfig.AccessMode\n\tcommonSettings[userTypeSetting] = newProvider.GetUserType()\n\tcommonSettings[identitySeparatorSetting] = newProvider.GetIdentitySeparator()\n\tcommonSettings[allowedIdentitiesSetting] = getAllowedIDString(authConfig.AllowedIdentities, newProvider.GetIdentitySeparator())\n\tcommonSettings[providerNameSetting] = authConfig.Provider\n\tcommonSettings[providerSetting] = authConfig.Provider\n\tcommonSettings[externalProviderSetting] = \"true\"\n\tcommonSettings[noIdentityLookupSupportedSetting] = strconv.FormatBool(!newProvider.IsIdentityLookupSupported())\n\terr = updateCommonSettings(commonSettings)\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"UpdateConfig: Error Storing the common settings\")\n\t}\n\n\t//set the security setting last specifically\n\tcommonSettings = make(map[string]string)\n\tcommonSettings[securitySetting] = strconv.FormatBool(authConfig.Enabled)\n\tcommonSettings[authServiceConfigUpdateTimestamp] = time.Now().String()\n\terr = updateCommonSettings(commonSettings)\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"UpdateConfig: Error Storing the provider securitySetting\")\n\t}\n\n\t//switch the in-memory provider\n\tif provider == nil {\n\t\tif authConfig.Provider == \"shibbolethconfig\" {\n\t\t\tSamlServiceProvider = authConfig.ShibbolethConfig.SamlServiceProvider\n\t\t}\n\t\tprovider = newProvider\n\t\tauthConfigInMemory = authConfig\n\t} else {\n\t\t//reload the in-memory provider\n\t\tlog.Infof(\"Calling reload\")\n\t\tskipped, err := Reload(true)\n\t\tfor skipped {\n\t\t\tif err != nil {\n\t\t\t\tlog.Errorf(\"Failed to reload the auth provider from db on updateConfig: %v\", err)\n\t\t\t\treturn err\n\t\t\t}\n\t\t\ttime.Sleep(30 * time.Millisecond)\n\t\t\tskipped, err = Reload(true)\n\t\t}\n\t\tif err != nil {\n\t\t\tlog.Errorf(\"Failed to reload the auth provider from db on updateConfig: %v\", err)\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}", "func (client AppsClient) UpdateSettingsResponder(resp *http.Response) (result OperationStatus, err error) {\n\terr = autorest.Respond(\n\t\tresp,\n\t\tclient.ByInspecting(),\n\t\tazure.WithErrorUnlessStatusCode(http.StatusOK),\n\t\tautorest.ByUnmarshallingJSON(&result),\n\t\tautorest.ByClosing())\n\tresult.Response = autorest.Response{Response: resp}\n\treturn\n}", "func (c *Config) UpdateConfig() (err error) {\n\tconfigJson, _ := json.MarshalIndent(c, \"\", \" \")\n\terr = ioutil.WriteFile(path, configJson, 0644)\n\treturn\n}", "func (c *Consumer) UpdateConfiguration(opts ...ConsumerOption) error {\n\tif !c.IsDurable() {\n\t\treturn fmt.Errorf(\"only durable consumers can be updated\")\n\t}\n\n\tncfg, err := NewConsumerConfiguration(*c.cfg, opts...)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t_, err = c.mgr.NewConsumerFromDefault(c.stream, *ncfg)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn c.Reset()\n}", "func (r CreateMatchmakingConfigurationRequest) Send(ctx context.Context) (*CreateMatchmakingConfigurationResponse, error) {\n\tr.Request.SetContext(ctx)\n\terr := r.Request.Send()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tresp := &CreateMatchmakingConfigurationResponse{\n\t\tCreateMatchmakingConfigurationOutput: r.Request.Data.(*CreateMatchmakingConfigurationOutput),\n\t\tresponse: &aws.Response{Request: r.Request},\n\t}\n\n\treturn resp, nil\n}", "func (ag *AccountService) UpdateSettings(opts *Optionals) (newSettings *AccountSettings, err error) {\n\tif opts == nil {\n\t\topts = NewOptionals()\n\t}\n\tnewSettings = &AccountSettings{}\n\terr = ag.Call(\"POST\", \"account/settings\", opts, newSettings)\n\treturn\n}", "func UpdateMatches(matches []Match, upskill interfaces.Skill) []Match {\n\t// Structure outside the match loop for tracking our adjusted skills.\n\tskills := make(map[string]float64)\n\n\tvar adjMatches []Match\n\t//\tglog.V(2).Infoln(\"INITIAL SKILL\")\n\tfor _, match := range matches {\n\t\tadjMatch := Match{}\n\n\t\t// Copy names over to the adjusted match\n\t\tadjMatch.P1name = match.P1name\n\t\tadjMatch.P2name = match.P2name\n\n\t\t// Look up the latest skills - or if seeing the player for the first time add\n\t\t// their skill based on the skill in this first match.\n\t\tp1skill, ok := skills[match.P1name]\n\t\tif !ok {\n\t\t\tskills[match.P1name] = match.P1skill\n\t\t\t//\t\t\tglog.V(2).Infof(\"Initial Skill: %s: %f\\n\", match.P1name, match.P1skill)\n\t\t\tadjMatch.P1skill = match.P1skill\n\t\t\tp1skill = match.P1skill\n\t\t}\n\t\tadjMatch.P1skill = p1skill\n\t\tp2skill, ok := skills[match.P2name]\n\t\tif !ok {\n\t\t\tskills[match.P2name] = match.P2skill\n\t\t\t//\t\t\tglog.V(2).Infof(\"Initial Skill: %s: %f\\n\", match.P2name, match.P2skill)\n\t\t\tadjMatch.P2skill = match.P2skill\n\t\t\tp2skill = match.P2skill\n\t\t}\n\t\tadjMatch.P2skill = p2skill\n\t\t//\t\tglog.V(2).Infof(\"Skills set to NplRace for %s vs %s: orig skills:%f|%f new skills:%f|%f \", match.P1name, match.P2name, match.P1skill, match.P2skill, adjMatch.P1skill, adjMatch.P2skill)\n\n\t\t// Look up and adjust needs from the race chart.\n\t\t//adjMatch.P1needs, adjMatch.P2needs = npl.NplRace(adjMatch.P1skill, adjMatch.P2skill)\n\t\tadjMatch.P1needs, adjMatch.P2needs = npl.FitRace(adjMatch.P1skill, adjMatch.P2skill, match.P1got, match.P2got)\n\t\t// Debug log when we make a change in race calculation.\n\t\tif adjMatch.P1needs != match.P1needs || adjMatch.P2needs != match.P2needs {\n\t\t\t//\t\t\tglog.V(2).Infof(\"Adjusted match for %s vs %s: orig race:%f|%f new race:%f|%f orig skills:%f|%f new skills:%f|%f \", match.P1name, match.P2name, match.P1needs, match.P2needs, adjMatch.P1needs, adjMatch.P2needs, match.P1skill, match.P2skill, adjMatch.P1skill, adjMatch.P2skill)\n\t\t}\n\n\t\t// Model a new \"got\" games, if historic data can't determine the winner.\n\n\t\tadjMatch.P1got, adjMatch.P2got = UpdateGot(adjMatch.P1needs, adjMatch.P2needs, match.P1got, match.P2got)\n\t\t//\t\tadjMatch.P1got, adjMatch.P2got = StatUpdateGot(adjMatch.P1needs, adjMatch.P2needs, match.P1got, match.P2got, match.P1needs, match.P2needs)\n\n\t\tmaxGames := adjMatch.P1needs + adjMatch.P2needs - 1\n\t\tplayedGames := match.P1got + match.P2got\n\n\t\t// Conditionaly adjust skills, based on who won and how close it was.\n\t\tif adjMatch.P1got == adjMatch.P1needs {\n\t\t\tw := skills[match.P1name]\n\t\t\tl := skills[match.P2name]\n\t\t\t//\t\t\tglog.V(2).Infof(\"### Sent %f, %f into Update. \", w, l)\n\t\t\tskills[match.P1name], skills[match.P2name] = upskill.Update(w, l, maxGames, playedGames)\n\t\t\t//\t\t\tglog.V(2).Infof(\"### Got %f, %f from Update. \", skills[match.P1name], skills[match.P2name])\n\t\t} else {\n\t\t\tw := skills[match.P2name]\n\t\t\tl := skills[match.P1name]\n\t\t\t//\t\t\tglog.V(2).Infof(\"### Sent %f, %f into Update. \", w, l)\n\t\t\tskills[match.P2name], skills[match.P1name] = upskill.Update(w, l, maxGames, playedGames)\n\t\t\t//\t\t\tglog.V(2).Infof(\"### Got %f, %f from Update. \", skills[match.P2name], skills[match.P1name])\n\t\t}\n\t\tadjMatch.P1skill, adjMatch.P2skill = skills[match.P1name], skills[match.P2name]\n\t\t/*\n\t\t\tglog.V(2).Infof(\n\t\t\t\t\"### %3f, %3f, %2f, %2f\\n### %3f, %3f, %2f, %2f\\n\\n\",\n\t\t\t\tmatch.P1skill, match.P2skill, match.P1needs, match.P2needs,\n\t\t\t\tadjMatch.P1skill, adjMatch.P2skill, adjMatch.P1needs, adjMatch.P2needs,\n\t\t\t)\n\t\t*/\n\n\t\tadjMatches = append(adjMatches, adjMatch)\n\t}\n\treturn adjMatches\n}", "func (cmw *configMapWatcher) updateConfig() {\n\tconf, err := readConfigMap(cmw.logger, cmw.dir)\n\tif err != nil {\n\t\tcmw.logger.Error(\"Unable to read the configMap\", zap.Error(err))\n\t\treturn\n\t}\n\terr = cmw.configUpdated(conf)\n\tif err != nil {\n\t\tcmw.logger.Error(\"Unable to update config\", zap.Error(err))\n\t\treturn\n\t}\n}", "func SetMatchmakingQueue(settings *playfab.Settings, postData *SetMatchmakingQueueRequestModel, entityToken string) (*SetMatchmakingQueueResultModel, error) {\n if entityToken == \"\" {\n return nil, playfab.NewCustomError(\"entityToken should not be an empty string\", playfab.ErrorGeneric)\n }\n b, errMarshal := json.Marshal(postData)\n if errMarshal != nil {\n return nil, playfab.NewCustomError(errMarshal.Error(), playfab.ErrorMarshal)\n }\n\n sourceMap, err := playfab.Request(settings, b, \"/Match/SetMatchmakingQueue\", \"X-EntityToken\", entityToken)\n if err != nil {\n return nil, err\n }\n \n result := &SetMatchmakingQueueResultModel{}\n\n config := mapstructure.DecoderConfig{\n DecodeHook: playfab.StringToDateTimeHook,\n Result: result,\n }\n \n decoder, errDecoding := mapstructure.NewDecoder(&config)\n if errDecoding != nil {\n return nil, playfab.NewCustomError(errDecoding.Error(), playfab.ErrorDecoding)\n }\n \n errDecoding = decoder.Decode(sourceMap)\n if errDecoding != nil {\n return nil, playfab.NewCustomError(errDecoding.Error(), playfab.ErrorDecoding)\n }\n\n return result, nil\n}", "func (c *Config) adjust(meta *toml.MetaData) error {\n\tconfigMetaData := configutil.NewConfigMetadata(meta)\n\tif err := configMetaData.CheckUndecoded(); err != nil {\n\t\tc.WarningMsgs = append(c.WarningMsgs, err.Error())\n\t}\n\n\tif c.Name == \"\" {\n\t\thostname, err := os.Hostname()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tconfigutil.AdjustString(&c.Name, fmt.Sprintf(\"%s-%s\", defaultName, hostname))\n\t}\n\tconfigutil.AdjustString(&c.DataDir, fmt.Sprintf(\"default.%s\", c.Name))\n\tconfigutil.AdjustPath(&c.DataDir)\n\n\tif err := c.validate(); err != nil {\n\t\treturn err\n\t}\n\n\tconfigutil.AdjustString(&c.BackendEndpoints, defaultBackendEndpoints)\n\tconfigutil.AdjustString(&c.ListenAddr, defaultListenAddr)\n\tconfigutil.AdjustString(&c.AdvertiseListenAddr, c.ListenAddr)\n\n\tif !configMetaData.IsDefined(\"enable-grpc-gateway\") {\n\t\tc.EnableGRPCGateway = utils.DefaultEnableGRPCGateway\n\t}\n\n\tc.adjustLog(configMetaData.Child(\"log\"))\n\tc.Security.Encryption.Adjust()\n\n\tif len(c.Log.Format) == 0 {\n\t\tc.Log.Format = utils.DefaultLogFormat\n\t}\n\n\tconfigutil.AdjustInt64(&c.LeaderLease, utils.DefaultLeaderLease)\n\n\tif err := c.Schedule.Adjust(configMetaData.Child(\"schedule\"), false); err != nil {\n\t\treturn err\n\t}\n\treturn c.Replication.Adjust(configMetaData.Child(\"replication\"))\n}", "func (d *DynamoConn) UpdateSettings(settings *models.Settings) error {\n\tvar SettingsUpdate struct {\n\t\tSOSSMS bool `json:\":a\"`\n\t\tSOSCalls bool `json:\":b\"`\n\t\tSOSLockscreenInfo bool `json:\":c\"`\n\t\tUpdates bool `json:\":d\"`\n\t\tUpdateFrequency int `json:\":e\"`\n\t}\n\n\t// Marshal the update expression struct for DynamoDB\n\tSettingsUpdate.SOSSMS = settings.SOSSMS\n\tSettingsUpdate.SOSCalls = settings.SOSCalls\n\tSettingsUpdate.SOSLockscreenInfo = settings.SOSLockscreenInfo\n\tSettingsUpdate.Updates = settings.Updates\n\tSettingsUpdate.UpdateFrequency = settings.UpdateFrequency\n\n\texpr, err := dynamodbattribute.MarshalMap(SettingsUpdate)\n\tif err != nil {\n\t\treturn err\n\n\t}\n\n\t// Define table schema's key\n\tkey := map[string]*dynamodb.AttributeValue{\n\t\t\"user_id\": {\n\t\t\tS: aws.String(settings.UserID),\n\t\t},\n\t}\n\n\t// Use marshalled map for UpdateItemInput\n\titem := &dynamodb.UpdateItemInput{\n\t\tExpressionAttributeValues: expr,\n\t\tTableName: aws.String(common.SettingsTableName),\n\t\tKey: key,\n\t\tReturnValues: aws.String(\"UPDATED_NEW\"),\n\t\tUpdateExpression: aws.String(\"set sos_sms = :a, sos_calls = :b, sos_lockscreen = :c, updates = :d, update_frequency = :e\"),\n\t}\n\n\t// Invoke the update\n\t_, err = d.Client.UpdateItem(item)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}", "func (p *F5DNSLBProvider) UpdateConfig(cfg *factory.ServiceRequest, subscriptionId string) error {\n\turl := fmt.Sprintf(\"https://api-unstable.dev.f5aas.com/v1/svc-subscription/subscriptions/%s\", subscriptionId)\n\tclient := resty.New()\n\tclient.SetRetryCount(10).SetRetryWaitTime(2 * time.Second)\n\n\tenc, err := json.Marshal(cfg)\n\tlog.Printf(\"Updated Config: %v\", string(enc))\n\n\tresp, err := client.R().\n\t\tSetHeader(\"Accept\", \"application/json\").\n\t\tSetAuthToken(p.client.authToken.AccessToken).\n\t\tSetBody(cfg).\n\t\tPut(url)\n\tif err != nil {\n\t\tlog.Printf(\"Could not update config. Error %v\", err)\n\t}\n\tif resp.StatusCode() == 200 {\n\t\tvar subscription map[string]interface{}\n\t\t_ = json.Unmarshal(resp.Body(), &subscription)\n\t\tp.client.subscriptionId = subscription[\"subscription_id\"].(string)\n\t\tlog.Printf(\"Successfully updated DNS records for subscription %s\", p.client.subscriptionId)\n\t} else {\n\t\tlog.Printf(\"Failed to Update err %v\", resp.StatusCode())\n\t\tlog.Printf(\"Error response %v\", string(resp.Body()))\n\t\treturn err\n\t}\n\treturn nil\n}", "func (adm Admin) SetConfig(cluster string, scope string, properties map[string]string) error {\n\tconn := newConnection(adm.ZkSvr)\n\terr := conn.Connect()\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer conn.Disconnect()\n\n\tswitch strings.ToUpper(scope) {\n\tcase \"CLUSTER\":\n\t\tif allow, ok := properties[\"allowParticipantAutoJoin\"]; ok {\n\t\t\tkeys := KeyBuilder{cluster}\n\t\t\tpath := keys.clusterConfig()\n\n\t\t\tif strings.ToLower(allow) == \"true\" {\n\t\t\t\tconn.UpdateSimpleField(path, \"allowParticipantAutoJoin\", \"true\")\n\t\t\t}\n\t\t}\n\tcase \"CONSTRAINT\":\n\tcase \"PARTICIPANT\":\n\tcase \"PARTITION\":\n\tcase \"RESOURCE\":\n\t}\n\n\treturn nil\n}", "func (agent *Agent) applyRoutingConfig(cfg *routeConfig) bool {\n\t// Check some basic things to ensure consistency!\n\tif cfg.vbMap != nil && cfg.vbMap.NumVbuckets() != agent.numVbuckets {\n\t\tlogErrorf(\"Received a configuration with a different number of vbuckets. Ignoring.\")\n\t\treturn false\n\t}\n\n\t// Only a single thing can modify the config at any time\n\tagent.configLock.Lock()\n\tdefer agent.configLock.Unlock()\n\n\tnewRouting := &routeData{\n\t\trevId: cfg.revId,\n\t\tuuid: cfg.uuid,\n\t\tcapiEpList: cfg.capiEpList,\n\t\tmgmtEpList: cfg.mgmtEpList,\n\t\tn1qlEpList: cfg.n1qlEpList,\n\t\tftsEpList: cfg.ftsEpList,\n\t\tcbasEpList: cfg.cbasEpList,\n\t\tvbMap: cfg.vbMap,\n\t\tketamaMap: cfg.ketamaMap,\n\t\tbktType: cfg.bktType,\n\t\tsource: cfg,\n\t}\n\n\tnewRouting.clientMux = agent.newMemdClientMux(cfg.kvServerList)\n\n\toldRouting := agent.routingInfo.Get()\n\tif oldRouting == nil {\n\t\treturn false\n\t}\n\n\t// Check that the new config data is newer than the current one, in the case where we've done a select bucket\n\t// against an existing connection then the revisions could be the same. In that case the configuration still\n\t// needs to be applied.\n\tif newRouting.revId == 0 {\n\t\tlogDebugf(\"Unversioned configuration data, \")\n\t} else if newRouting.revId == oldRouting.revId {\n\t\tlogDebugf(\"Ignoring configuration with identical revision number\")\n\t\treturn false\n\t} else if newRouting.revId < oldRouting.revId {\n\t\tlogDebugf(\"Ignoring new configuration as it has an older revision id\")\n\t\treturn false\n\t}\n\n\t// Attempt to atomically update the routing data\n\tif !agent.routingInfo.Update(oldRouting, newRouting) {\n\t\tlogErrorf(\"Someone preempted the config update, skipping update\")\n\t\treturn false\n\t}\n\n\tlogDebugf(\"Switching routing data (update)...\")\n\tlogDebugf(\"New Routing Data:\\n%s\", newRouting.DebugString())\n\n\tif oldRouting.clientMux == nil {\n\t\t// This is a new agent so there is no existing muxer. We can\n\t\t// simply start the new muxer.\n\t\tnewRouting.clientMux.Start()\n\t} else {\n\t\t// Get the new muxer to takeover the pipelines from the older one\n\t\tnewRouting.clientMux.Takeover(oldRouting.clientMux)\n\n\t\t// Gather all the requests from all the old pipelines and then\n\t\t// sort and redispatch them (which will use the new pipelines)\n\t\tvar requestList []*memdQRequest\n\t\toldRouting.clientMux.Drain(func(req *memdQRequest) {\n\t\t\trequestList = append(requestList, req)\n\t\t})\n\n\t\tsort.Sort(memdQRequestSorter(requestList))\n\n\t\tfor _, req := range requestList {\n\t\t\tagent.stopCmdTrace(req)\n\t\t\tagent.requeueDirect(req, false)\n\t\t}\n\t}\n\n\treturn true\n}", "func UpdateConfig(path string, c interface{}) (err error) {\n\tconfigsJson, _ := json.MarshalIndent(c, \"\", \" \")\n\terr = ioutil.WriteFile(path, configsJson, 0644)\n\treturn\n}", "func updateConfigValues() bool {\n leftModules := getConfValue(\"main;modules_left\", \"\")\n centerModules := getConfValue(\"main;modules_center\", \"\")\n rightModules := getConfValue(\"main;modules_right\", \"\")\n\n if leftModules == \"\" && centerModules == \"\" && rightModules == \"\" {\n leftModules = defaultEnabledModules[0]\n centerModules = defaultEnabledModules[1]\n rightModules = defaultEnabledModules[2]\n }\n\n paddingLeft := getConfInt(\"main;left_padding\", 0)\n paddingRight := getConfInt(\"main;right_padding\", 0)\n\n seperator := getConfValue(\"main;item_seperator\", \"|\")\n\n if leftModules != enabledModules[0] || centerModules != enabledModules[1] ||\n rightModules != enabledModules[2] {\n enabledModules[0] = leftModules\n enabledModules[1] = centerModules\n enabledModules[2] = rightModules\n return true\n }\n\n if paddingLeft != leftPadding || paddingRight != rightPadding {\n leftPadding = paddingLeft\n rightPadding = paddingRight\n return true\n }\n\n if seperator != elementSeperator {\n elementSeperator = seperator\n return true\n }\n\n return false\n}", "func (s *SimpleState) SetConfig(configMap map[string]float64) {\n\tif v, ok := configMap[\"fastSmoothConst\"]; ok {\n\t\tfastSmoothConst = v\n\t}\n\tif v, ok := configMap[\"slowSmoothConst\"]; ok {\n\t\tslowSmoothConst = v\n\t}\n\tif v, ok := configMap[\"verySlowSmoothConst\"]; ok {\n\t\tverySlowSmoothConst = v\n\t}\n\tif v, ok := configMap[\"gpsWeight\"]; ok {\n\t\tgpsWeight = v\n\t}\n\tif fastSmoothConst == 0 || slowSmoothConst == 0 || verySlowSmoothConst == 0 {\n\t\t// This doesn't make sense, means user hasn't set correctly.\n\t\t// Set sensible defaults.\n\t\tfastSmoothConst = fastSmoothConstDefault\n\t\tslowSmoothConst = slowSmoothConstDefault\n\t\tverySlowSmoothConst = verySlowSmoothConstDefault\n\t\tgpsWeight = gpsWeightDefault\n\t}\n}", "func (d *PrefsDialog) onSettingChange() {\n\t// Ignore if the dialog is not initialised yet\n\tif !d.initialised {\n\t\treturn\n\t}\n\tlog.Debug(\"onSettingChange()\")\n\n\t// Collect settings\n\tcfg := config.GetConfig()\n\t// General page\n\tcfg.MpdNetwork = d.MpdNetworkComboBox.GetActiveID()\n\tcfg.MpdSocketPath = util.EntryText(d.MpdPathEntry, \"\")\n\tcfg.MpdHost = util.EntryText(d.MpdHostEntry, \"\")\n\tcfg.MpdPort = int(d.MpdPortAdjustment.GetValue())\n\tif s, err := d.MpdPasswordEntry.GetText(); !errCheck(err, \"MpdPasswordEntry.GetText() failed\") {\n\t\tcfg.MpdPassword = s\n\t}\n\tcfg.MpdAutoConnect = d.MpdAutoConnectCheckButton.GetActive()\n\tcfg.MpdAutoReconnect = d.MpdAutoReconnectCheckButton.GetActive()\n\td.updateGeneralWidgets()\n\n\t// Interface page\n\tif b := d.QueueToolbarCheckButton.GetActive(); b != cfg.QueueToolbar {\n\t\tcfg.QueueToolbar = b\n\t\td.schedulePlayerSettingChange()\n\t}\n\tcfg.TrackDefaultReplace = d.LibraryDefaultReplaceRadioButton.GetActive()\n\tcfg.PlaylistDefaultReplace = d.PlaylistsDefaultReplaceRadioButton.GetActive()\n\tcfg.StreamDefaultReplace = d.StreamsDefaultReplaceRadioButton.GetActive()\n\n\t// Automation page\n\tcfg.SwitchToOnQueueReplace = d.AutomationQueueReplaceSwitchToCheckButton.GetActive()\n\tcfg.PlayOnQueueReplace = d.AutomationQueueReplacePlayCheckButton.GetActive()\n\n\t// Player page\n\tif b := d.PlayerShowAlbumArtTracksCheckButton.GetActive(); b != cfg.PlayerAlbumArtTracks {\n\t\tcfg.PlayerAlbumArtTracks = b\n\t\td.schedulePlayerSettingChange()\n\t}\n\tif b := d.PlayerShowAlbumArtStreamsCheckButton.GetActive(); b != cfg.PlayerAlbumArtStreams {\n\t\tcfg.PlayerAlbumArtStreams = b\n\t\td.schedulePlayerSettingChange()\n\t}\n\tif i := int(d.PlayerAlbumArtSizeAdjustment.GetValue()); i != cfg.PlayerAlbumArtSize {\n\t\tcfg.PlayerAlbumArtSize = i\n\t\td.schedulePlayerSettingChange()\n\t}\n\tif s, err := util.GetTextBufferText(d.PlayerTitleTemplateTextBuffer); !errCheck(err, \"util.GetTextBufferText() failed\") {\n\t\tif s != cfg.PlayerTitleTemplate {\n\t\t\tcfg.PlayerTitleTemplate = s\n\t\t\td.schedulePlayerSettingChange()\n\t\t}\n\t}\n}", "func Config(props map[string]string) error {\n\treturn lm().setNewProperties(props)\n}", "func (c *Config) Adjust(meta *toml.MetaData) {\n\tif len(c.Log.Format) == 0 {\n\t\tc.Log.Format = defaultLogFormat\n\t}\n\tif !meta.IsDefined(\"round\") {\n\t\tconfigutil.AdjustInt(&c.Round, defaultRound)\n\t}\n\n\tif !meta.IsDefined(\"store-count\") {\n\t\tconfigutil.AdjustInt(&c.StoreCount, defaultStoreCount)\n\t}\n\tif !meta.IsDefined(\"region-count\") {\n\t\tconfigutil.AdjustInt(&c.RegionCount, defaultRegionCount)\n\t}\n\n\tif !meta.IsDefined(\"key-length\") {\n\t\tconfigutil.AdjustInt(&c.KeyLength, defaultKeyLength)\n\t}\n\n\tif !meta.IsDefined(\"replica\") {\n\t\tconfigutil.AdjustInt(&c.Replica, defaultReplica)\n\t}\n\n\tif !meta.IsDefined(\"leader-update-ratio\") {\n\t\tconfigutil.AdjustFloat64(&c.LeaderUpdateRatio, defaultLeaderUpdateRatio)\n\t}\n\tif !meta.IsDefined(\"epoch-update-ratio\") {\n\t\tconfigutil.AdjustFloat64(&c.EpochUpdateRatio, defaultEpochUpdateRatio)\n\t}\n\tif !meta.IsDefined(\"space-update-ratio\") {\n\t\tconfigutil.AdjustFloat64(&c.SpaceUpdateRatio, defaultSpaceUpdateRatio)\n\t}\n\tif !meta.IsDefined(\"flow-update-ratio\") {\n\t\tconfigutil.AdjustFloat64(&c.FlowUpdateRatio, defaultFlowUpdateRatio)\n\t}\n\tif !meta.IsDefined(\"sample\") {\n\t\tc.Sample = defaultSample\n\t}\n}", "func (r *FloodlightConfigurationsService) Update(profileId int64, floodlightconfiguration *FloodlightConfiguration) *FloodlightConfigurationsUpdateCall {\n\tc := &FloodlightConfigurationsUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)}\n\tc.profileId = profileId\n\tc.floodlightconfiguration = floodlightconfiguration\n\treturn c\n}", "func (r *WindowsPhone81VpnConfigurationRequest) Update(ctx context.Context, reqObj *WindowsPhone81VpnConfiguration) error {\n\treturn r.JSONRequest(ctx, \"PATCH\", \"\", reqObj, nil)\n}", "func (adm Admin) SetConfig(cluster string, scope string, properties map[string]string) error {\n\tswitch strings.ToUpper(scope) {\n\tcase \"CLUSTER\":\n\t\tif allow, ok := properties[_allowParticipantAutoJoinKey]; ok {\n\t\t\tbuilder := KeyBuilder{cluster}\n\t\t\tpath := builder.clusterConfig()\n\n\t\t\tif strings.ToLower(allow) == \"true\" {\n\t\t\t\tadm.zkClient.UpdateSimpleField(path, _allowParticipantAutoJoinKey, \"true\")\n\t\t\t}\n\t\t}\n\tcase \"CONSTRAINT\":\n\tcase \"PARTICIPANT\":\n\tcase \"PARTITION\":\n\tcase \"RESOURCE\":\n\t}\n\n\treturn nil\n}", "func ConfigUpdateHandler(w http.ResponseWriter, r *http.Request) {\n\tupdateConfig(w, r, updateConfigURL)\n}", "func (config *internalConfiguration) setConfiguration(newConf *CoreConfiguration) {\n\n\tnewConf.apiPlatformClientID = config.APIPlatformClientID\n\tnewConf.apiPlatformHost = config.APIPlatformHost\n\tnewConf.idcsHost = config.IDCSHost\n\tnewConf.apiPlatformClientSecret = config.APIPlatformClientSecret\n\tnewConf.apiPlatformUser = config.APIPlatformUser\n\tnewConf.apiPlatformUserPassword = config.APIPlatformUserPassword\n\tnewConf.apiPlatformScope = config.APIPlatformScope\n}", "func (c *shortNameAliasCache) updateWithConfigurationFrom(updates *shortNameAliasCache) {\n\tmaps.Copy(c.namedAliases, updates.namedAliases)\n}", "func UpdateConfig(s *Site) http.HandlerFunc {\n\treturn func(w http.ResponseWriter, r *http.Request) {\n\t\tif s.Config.CanEdit == false {\n\t\t\tlog.Println(\"not allowed to edit this site\")\n\t\t\tw.WriteHeader(http.StatusForbidden)\n\t\t\treturn\n\t\t}\n\t\tw.Header().Set(\"Content-Type\", \"application/json\")\n\n\t\tb, err := ioutil.ReadAll(r.Body)\n\t\tif err != nil {\n\t\t\tlog.Println(\"error reading body\", err)\n\t\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\t\treturn\n\t\t}\n\t\tcfg, err := unmarshalConfig(b)\n\t\tif err != nil {\n\t\t\tlog.Println(\"error unmarshaling\", err)\n\t\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\t\treturn\n\t\t}\n\n\t\terr = ioutil.WriteFile(cfg.ConfigFilePath, b, 0644)\n\t\tif err != nil {\n\t\t\tlog.Println(\"could not write config file\", err)\n\t\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\t\treturn\n\t\t}\n\n\t\ts.Config.Merge(cfg)\n\t\tw.WriteHeader(http.StatusCreated)\n\t\tfmt.Fprint(w, string(b))\n\t}\n}", "func (client AppsClient) UpdateSettingsSender(req *http.Request) (*http.Response, error) {\n\treturn autorest.SendWithSender(client, req,\n\t\tautorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))\n}", "func (tx *Tx) UpdateConfiguration(configUpdates *Configuration) (*Configuration, bool, error) {\n\tupdates := make(map[string]interface{})\n\treload := false\n\tif configUpdates.PrivateKey != \"\" || configUpdates.PublicCertificate != \"\" {\n\t\torigConfig, err := tx.GetConfiguration()\n\t\tif err != nil {\n\t\t\treturn nil, false, err\n\t\t}\n\t\tprivateKey := origConfig.PrivateKey\n\t\tpublicCert := origConfig.PublicCertificate\n\t\tif configUpdates.PrivateKey != \"\" && configUpdates.PrivateKey != privateKey {\n\t\t\tprivateKey = configUpdates.PrivateKey\n\t\t\treload = true\n\t\t}\n\t\tif configUpdates.PublicCertificate != \"\" && configUpdates.PublicCertificate != publicCert {\n\t\t\tpublicCert = configUpdates.PublicCertificate\n\t\t\treload = true\n\t\t}\n\t\t// Verify the key pair is valid before storing to database\n\t\t_, err = tls.X509KeyPair([]byte(publicCert), []byte(privateKey))\n\t\tif err != nil {\n\t\t\treturn nil, false, errors.Errorf(errors.CodeBadRequest, \"Unable to update certificate: %s\", err)\n\t\t}\n\t\tupdates[\"private_key\"] = privateKey\n\t\tupdates[\"public_certificate\"] = publicCert\n\t}\n\tif configUpdates.EULAAccepted {\n\t\tupdates[\"eula_accepted\"] = true\n\t}\n\tif len(configUpdates.SessionAuthKey) > 0 {\n\t\tupdates[\"session_auth_key\"] = configUpdates.SessionAuthKey\n\t}\n\tif len(configUpdates.SessionCryptKey) > 0 {\n\t\tupdates[\"session_crypt_key\"] = configUpdates.SessionCryptKey\n\t}\n\tif len(updates) == 0 {\n\t\treturn nil, false, errors.New(errors.CodeBadRequest, \"No valid configuration updates supplied\")\n\t}\n\tassignments := make([]string, len(updates))\n\tvalues := make([]interface{}, len(updates))\n\ti := 0\n\tfor colName, value := range updates {\n\t\tassignments[i] = fmt.Sprintf(\"%s = $%d\", colName, i+1)\n\t\tvalues[i] = value\n\t\ti++\n\t}\n\tvar config Configuration\n\tquery := fmt.Sprintf(\"UPDATE configuration SET %s RETURNING *;\", strings.Join(assignments, \", \"))\n\terr := tx.Get(&config, query, values...)\n\tif err != nil {\n\t\treturn nil, false, errors.InternalError(err)\n\t}\n\treturn &config, reload, nil\n}", "func (client AppsClient) UpdateSettingsPreparer(ctx context.Context, appID uuid.UUID, applicationSettingUpdateObject ApplicationSettingUpdateObject) (*http.Request, error) {\n\turlParameters := map[string]interface{}{\n\t\t\"AzureRegion\": client.AzureRegion,\n\t}\n\n\tpathParameters := map[string]interface{}{\n\t\t\"appId\": autorest.Encode(\"path\", appID),\n\t}\n\n\tpreparer := autorest.CreatePreparer(\n\t\tautorest.AsContentType(\"application/json; charset=utf-8\"),\n\t\tautorest.AsPut(),\n\t\tautorest.WithCustomBaseURL(\"https://{AzureRegion}.api.cognitive.microsoft.com/luis/api/v2.0\", urlParameters),\n\t\tautorest.WithPathParameters(\"/apps/{appId}/settings\", pathParameters),\n\t\tautorest.WithJSON(applicationSettingUpdateObject))\n\treturn preparer.Prepare((&http.Request{}).WithContext(ctx))\n}", "func (s *Syncthing) UpdateConfig() error {\n\tbs, err := s.GetLocalConfigXML()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif err := ioutil.WriteFile(filepath.Join(s.LocalHome, configFile), bs, 0700); err != nil {\n\t\treturn fmt.Errorf(\"failed to write syncthing configuration file: %w\", err)\n\t}\n\n\treturn nil\n}", "func (i *ImageService) UpdateConfig(maxDownloads, maxUploads int) {\n\tpanic(\"not implemented\")\n}", "func Update(mutate func(cfg *Config) error) error {\n\treturn m.Update(func(ycfg yamlconf.Config) error {\n\t\treturn mutate(ycfg.(*Config))\n\t})\n}", "func (c *client) UpdateConfig(apiKey string, secretKey string, opts ...ClientOption) error {\n\tswitch {\n\tcase apiKey == \"\":\n\t\treturn errors.InvalidParameterError{Parameter: \"apiKey\", Reason: \"cannot be empty\"}\n\tcase secretKey == \"\":\n\t\treturn errors.InvalidParameterError{Parameter: \"secretKey\", Reason: \"cannot be empty\"}\n\t}\n\n\tc.apiKey = apiKey\n\tc.secretKey = secretKey\n\n\tfor _, opt := range opts {\n\t\tif err := opt(c); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}", "func UpdateSettingsFromFile(filename string) error {\n\tfile, err := os.Open(filename)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer file.Close()\n\tdecoder := json.NewDecoder(file)\n\terr = decoder.Decode(&Settings)\n\n\treturn err\n}", "func (kv *ShardKV) TryUpdateConfig () {\n\n kv.configMtx.Lock()\n curConfig := kv.config\n kv.configMtx.Unlock()\n\n newConfig := kv.mck.Query(curConfig.Num+1)\n if newConfig.Num == curConfig.Num+1 {\n if _, isLeader := kv.rf.GetState(); isLeader {\n recvFinished := false\n kv.configMtx.Lock()\n recvFinished = kv.shardToRecv.Empty() == true\n kv.configMtx.Unlock()\n\n if recvFinished {\n request := CfgChangeArgs{newConfig}\n kv.rf.Start(Op{Type: ReqCfgChange, ArgsCfgChange: request})\n }\n }\n }\n}", "func (c *StreamerController) UpdateSyncCfg(syncCfg replication.BinlogSyncerConfig, fromDB *dbconn.UpStreamConn) {\n\tc.Lock()\n\tc.fromDB = fromDB\n\tc.syncCfg = syncCfg\n\tc.Unlock()\n}", "func (auth Authenticate) UpdateAccountSettings(updatedAccount *types.Account, session *types.Session) (string, error) {\n\taccount, err := auth.CheckAccountSession(session)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tres, err := manager.AccountManager{}.UpdateAccountSettings(updatedAccount, account, auth.DB, auth.Cache)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\treturn res, nil\n}", "func (h *Handler) UpdateConfig(config *multichannelfanout.Config) error {\n\tif config == nil {\n\t\treturn errors.New(\"nil config\")\n\t}\n\n\th.updateLock.Lock()\n\tdefer h.updateLock.Unlock()\n\n\tih := h.getMultiChannelFanoutHandler()\n\tif diff := ih.ConfigDiff(*config); diff != \"\" {\n\t\th.logger.Info(\"Updating config (-old +new)\", zap.String(\"diff\", diff))\n\t\tnewIh, err := ih.CopyWithNewConfig(*config)\n\t\tif err != nil {\n\t\t\th.logger.Info(\"Unable to update config\", zap.Error(err), zap.Any(\"config\", config))\n\t\t\treturn err\n\t\t}\n\t\th.setMultiChannelFanoutHandler(newIh)\n\t}\n\treturn nil\n}", "func (ssc *StorageSmartContract) updateConfig(t *transaction.Transaction,\n\tinput []byte, balances chainState.StateContextI) (resp string, err error) {\n\n\tif t.ClientID != owner {\n\t\treturn \"\", common.NewError(\"update_config\",\n\t\t\t\"unauthorized access - only the owner can update the variables\")\n\t}\n\n\tvar conf *scConfig\n\tif conf, err = ssc.getConfig(balances, true); err != nil {\n\t\treturn \"\", common.NewError(\"update_config\",\n\t\t\t\"can't get config: \"+err.Error())\n\t}\n\n\tvar update scConfig\n\tif err = update.Decode(input); err != nil {\n\t\treturn \"\", common.NewError(\"update_config\", err.Error())\n\t}\n\n\tif err = update.validate(); err != nil {\n\t\treturn\n\t}\n\n\tupdate.Minted = conf.Minted\n\n\t_, err = balances.InsertTrieNode(scConfigKey(ssc.ID), &update)\n\tif err != nil {\n\t\treturn \"\", common.NewError(\"update_config\", err.Error())\n\t}\n\n\treturn string(update.Encode()), nil\n}", "func ModifyConfig() error {\n\treturn nil\n}", "func UpdateTrainerConfig(steps int, reg, learnRate float64) {\n\tcfg.Steps = steps\n\tcfg.Regularization = reg\n\tcfg.LearningRate = learnRate\n}", "func (app *App) Settings(settings *Settings) {\n\tapp.settings.Update(settings)\n}", "func (r PutPlaybackConfigurationRequest) Send(ctx context.Context) (*PutPlaybackConfigurationResponse, error) {\n\tr.Request.SetContext(ctx)\n\terr := r.Request.Send()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tresp := &PutPlaybackConfigurationResponse{\n\t\tPutPlaybackConfigurationOutput: r.Request.Data.(*PutPlaybackConfigurationOutput),\n\t\tresponse: &aws.Response{Request: r.Request},\n\t}\n\n\treturn resp, nil\n}", "func settingsPut(c *gin.Context) {\n\tsetting := models.Setting{}\n\tif err := c.ShouldBindJSON(&setting); err != nil {\n\t\tabortWithError(c, http.StatusBadRequest, err)\n\t\treturn\n\t}\n\tsetting, err := models.SettingsDB.Update(setting)\n\tif err != nil {\n\t\tabortWithError(c, http.StatusInternalServerError, err)\n\t\treturn\n\t}\n\tc.JSON(http.StatusOK, setting)\n}", "func (l *LambdaClient) updateFunctionSettings(function *FunctionConfig) error {\n\tparams := &lambda.UpdateFunctionConfigurationInput{\n\t\tFunctionName: aws.String(function.Name),\n\t\tRole: aws.String(function.RoleARN),\n\t\tTimeout: aws.Int64(function.Timeout),\n\t\tMemorySize: aws.Int64(function.MemorySize),\n\t}\n\t_, err := l.Client.UpdateFunctionConfiguration(params)\n\treturn err\n}", "func (u *Projects) Settings(c echo.Context) error {\n\tpc, ok := c.(*middlewares.ProjectContext)\n\tif !ok {\n\t\terr := errors.New(\"Can not cast context\")\n\t\tlogging.SharedInstance().ControllerWithStacktrace(err, c).Error(err)\n\t\treturn err\n\t}\n\tp := pc.Project\n\n\tsettingsProjectForm := new(SettingsProjectForm)\n\terr := c.Bind(settingsProjectForm)\n\tif err != nil {\n\t\terr := errors.Wrap(err, \"wrong parameter\")\n\t\tlogging.SharedInstance().ControllerWithStacktrace(err, c).Error(err)\n\t\treturn err\n\t}\n\tlogging.SharedInstance().Controller(c).Debugf(\"post edit project parameter: %+v\", settingsProjectForm)\n\tif err := board.UpdateProject(\n\t\tp,\n\t\tp.Title,\n\t\tp.Description,\n\t\tsettingsProjectForm.ShowIssues,\n\t\tsettingsProjectForm.ShowPullRequests,\n\t); err != nil {\n\t\tlogging.SharedInstance().ControllerWithStacktrace(err, c).Error(err)\n\t\treturn err\n\t}\n\tlogging.SharedInstance().Controller(c).Info(\"success to update project\")\n\n\tjsonProject, err := views.ParseProjectJSON(p)\n\tif err != nil {\n\t\tlogging.SharedInstance().Controller(c).Error(err)\n\t\treturn err\n\t}\n\treturn c.JSON(http.StatusOK, jsonProject)\n}", "func (d DB) SetRulesConfig(ctx context.Context, userID string, oldConfig, newConfig userconfig.RulesConfig) (bool, error) {\n\tupdated := false\n\terr := d.Transaction(func(tx DB) error {\n\t\tcurrent, err := d.GetConfig(ctx, userID)\n\t\tif err != nil && err != sql.ErrNoRows {\n\t\t\treturn err\n\t\t}\n\t\t// The supplied oldConfig must match the current config. If no config\n\t\t// exists, then oldConfig must be nil. Otherwise, it must exactly\n\t\t// equal the existing config.\n\t\tif !((err == sql.ErrNoRows && oldConfig.Files == nil) || oldConfig.Equal(current.Config.RulesConfig)) {\n\t\t\treturn nil\n\t\t}\n\t\tnew := userconfig.Config{\n\t\t\tAlertmanagerConfig: current.Config.AlertmanagerConfig,\n\t\t\tRulesConfig: newConfig,\n\t\t}\n\t\tupdated = true\n\t\treturn d.SetConfig(ctx, userID, new)\n\t})\n\treturn updated, err\n}", "func (i *TiFlashInstance) setTLSConfig(ctx context.Context, enableTLS bool, configs map[string]any, paths meta.DirPaths) (map[string]any, error) {\n\tif enableTLS {\n\t\tif configs == nil {\n\t\t\tconfigs = make(map[string]any)\n\t\t}\n\t\tconfigs[\"security.ca_path\"] = fmt.Sprintf(\n\t\t\t\"%s/tls/%s\",\n\t\t\tpaths.Deploy,\n\t\t\tTLSCACert,\n\t\t)\n\t\tconfigs[\"security.cert_path\"] = fmt.Sprintf(\n\t\t\t\"%s/tls/%s.crt\",\n\t\t\tpaths.Deploy,\n\t\t\ti.Role())\n\t\tconfigs[\"security.key_path\"] = fmt.Sprintf(\n\t\t\t\"%s/tls/%s.pem\",\n\t\t\tpaths.Deploy,\n\t\t\ti.Role())\n\t} else {\n\t\t// drainer tls config list\n\t\ttlsConfigs := []string{\n\t\t\t\"security.ca_path\",\n\t\t\t\"security.cert_path\",\n\t\t\t\"security.key_path\",\n\t\t}\n\t\t// delete TLS configs\n\t\tif configs != nil {\n\t\t\tfor _, config := range tlsConfigs {\n\t\t\t\tdelete(configs, config)\n\t\t\t}\n\t\t}\n\t}\n\n\treturn configs, nil\n}", "func (a *SyncApiService) UpdateSyncSettings(ctx context.Context, syncSettings SyncSettingsExtended) ( *http.Response, error) {\n\tvar (\n\t\tlocalVarHttpMethod = strings.ToUpper(\"Put\")\n\t\tlocalVarPostBody interface{}\n\t\tlocalVarFileName string\n\t\tlocalVarFileBytes []byte\n\t)\n\n\t// create path and map variables\n\tlocalVarPath := a.client.cfg.BasePath + \"/platform/3/sync/settings\"\n\n\tlocalVarHeaderParams := make(map[string]string)\n\tlocalVarQueryParams := url.Values{}\n\tlocalVarFormParams := url.Values{}\n\n\n\t// to determine the Content-Type header\n\tlocalVarHttpContentTypes := []string{ \"application/json\", }\n\n\t// set Content-Type header\n\tlocalVarHttpContentType := selectHeaderContentType(localVarHttpContentTypes)\n\tif localVarHttpContentType != \"\" {\n\t\tlocalVarHeaderParams[\"Content-Type\"] = localVarHttpContentType\n\t}\n\n\t// to determine the Accept header\n\tlocalVarHttpHeaderAccepts := []string{\n\t\t\"application/json\",\n\t\t}\n\n\t// set Accept header\n\tlocalVarHttpHeaderAccept := selectHeaderAccept(localVarHttpHeaderAccepts)\n\tif localVarHttpHeaderAccept != \"\" {\n\t\tlocalVarHeaderParams[\"Accept\"] = localVarHttpHeaderAccept\n\t}\n\t// body params\n\tlocalVarPostBody = &syncSettings\n\tr, err := a.client.prepareRequest(ctx, localVarPath, localVarHttpMethod, localVarPostBody, localVarHeaderParams, localVarQueryParams, localVarFormParams, localVarFileName, localVarFileBytes)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tlocalVarHttpResponse, err := a.client.callAPI(r)\n\tif err != nil || localVarHttpResponse == nil {\n\t\treturn localVarHttpResponse, err\n\t}\n\tdefer localVarHttpResponse.Body.Close()\n\tif localVarHttpResponse.StatusCode >= 300 {\n\t\tbodyBytes, _ := ioutil.ReadAll(localVarHttpResponse.Body)\n\t\treturn localVarHttpResponse, reportError(\"Status: %v, Body: %s\", localVarHttpResponse.Status, bodyBytes)\n\t}\n\n\treturn localVarHttpResponse, err\n}", "func handleUpdateSettings(msg []byte, id int) {\n\t// decode JSON request\n\tvar request UpdateSettingsRequest\n\terr := json.Unmarshal(msg, &request)\n\tif err != nil {\n\t\tresponse := UpdateSettingsResponse{OK: false, Cmd: \"setting\", Setting: request.Setting}\n\t\tsendJsonToOnlineID(id, &response)\n\t\treturn\n\t}\n\n\t// update database\n\tif request.Setting.Sign != \"\" {\n\t\terr = database.SetSignature(id, request.Setting.Sign)\n\t\tif err != nil {\n\t\t\tresponse := UpdateSettingsResponse{OK: false, Cmd: \"setting\", Setting: request.Setting}\n\t\t\tsendJsonToOnlineID(id, &response)\n\t\t\treturn\n\t\t}\n\t}\n\tif request.Setting.Avatar != \"\" {\n\t\terr = database.SetAvatar(id, request.Setting.Avatar)\n\t\tif err != nil {\n\t\t\tresponse := UpdateSettingsResponse{OK: false, Cmd: \"setting\", Setting: request.Setting}\n\t\t\tsendJsonToOnlineID(id, &response)\n\t\t\treturn\n\t\t}\n\t}\n\n\t// 通知朋友\n\tfriendships, err := database.GetFriendships(id)\n\tif err == nil {\n\t\tif request.Setting.Sign != \"\" {\n\t\t\tfor i := 0; i < len(friendships); i++ {\n\t\t\t\tfmt.Printf(\"%d 通知 %d 換簽名檔\\n\", id, friendships[i].FriendID)\n\t\t\t\tsendJsonToUnknownStatusID(\n\t\t\t\t\tfriendships[i].FriendID,\n\t\t\t\t\tSignCmd{Cmd: \"change_sign\", Who: id, Sign: request.Setting.Sign},\n\t\t\t\t)\n\t\t\t}\n\t\t}\n\t\tif request.Setting.Avatar != \"\" {\n\t\t\tfor i := 0; i < len(friendships); i++ {\n\t\t\t\tfmt.Printf(\"%d 通知 %d 換大頭貼\\n\", id, friendships[i].FriendID)\n\t\t\t\tsendJsonToUnknownStatusID(\n\t\t\t\t\tfriendships[i].FriendID,\n\t\t\t\t\tAvatarCmd{Cmd: \"change_avatar\", Who: id, Avatar: request.Setting.Avatar},\n\t\t\t\t)\n\t\t\t}\n\t\t}\n\t} else {\n\t\tfmt.Printf(\"getFriendships: %s\", err.Error())\n\t}\n\t// send success response\n\tresponse := UpdateSettingsResponse{OK: true, Cmd: \"setting\", Setting: request.Setting}\n\tsendJsonToOnlineID(id, &response)\n}", "func (z *PasswordService) SetConfig(configData []byte, blackList []string) error {\n\tvar cfg *PasswordRules\n\n\tz.Add(\"CCP\", ComfirmPassword)\n\tz.Add(\"CL\", CheckLength)\n\tz.Add(\"CUN\", CheckUserID)\n\tz.Add(\"CUC\", CheckUppercase)\n\tz.Add(\"CLC\", CheckLowercase)\n\tz.Add(\"CNC\", CheckNumeric)\n\tz.Add(\"CSC\", CheckSpecialChar)\n\tz.Add(\"CWS\", CheckWhiteSpace)\n\tz.Add(\"CH\", CheckHistory)\n\tz.Add(\"CBL\", CheckBlackList)\n\n\terr := json.Unmarshal(configData, &cfg)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tcfg.BlackList = blackList\n\n\tz.config = cfg\n\n\treturn nil\n}", "func (as *AdminServer) Settings(w http.ResponseWriter, r *http.Request) {\n\tswitch {\n\tcase r.Method == \"GET\":\n\t\tparams := newTemplateParams(r)\n\t\tparams.Title = \"Settings\"\n\t\tsession := ctx.Get(r, \"session\").(*sessions.Session)\n\t\tsession.Save(r, w)\n\t\tgetTemplate(w, \"settings\").ExecuteTemplate(w, \"base\", params)\n\tcase r.Method == \"POST\":\n\t\tu := ctx.Get(r, \"user\").(models.User)\n\t\tcurrentPw := r.FormValue(\"current_password\")\n\t\tnewPassword := r.FormValue(\"new_password\")\n\t\tconfirmPassword := r.FormValue(\"confirm_new_password\")\n\t\t// Check the current password\n\t\terr := auth.ValidatePassword(currentPw, u.Hash)\n\t\tmsg := models.Response{Success: true, Message: \"Settings Updated Successfully\"}\n\t\tif err != nil {\n\t\t\tmsg.Message = err.Error()\n\t\t\tmsg.Success = false\n\t\t\tapi.JSONResponse(w, msg, http.StatusBadRequest)\n\t\t\treturn\n\t\t}\n\t\tnewHash, err := auth.ValidatePasswordChange(u.Hash, newPassword, confirmPassword)\n\t\tif err != nil {\n\t\t\tmsg.Message = err.Error()\n\t\t\tmsg.Success = false\n\t\t\tapi.JSONResponse(w, msg, http.StatusBadRequest)\n\t\t\treturn\n\t\t}\n\t\tu.Hash = string(newHash)\n\t\tif err = models.PutUser(&u); err != nil {\n\t\t\tmsg.Message = err.Error()\n\t\t\tmsg.Success = false\n\t\t\tapi.JSONResponse(w, msg, http.StatusInternalServerError)\n\t\t\treturn\n\t\t}\n\t\tapi.JSONResponse(w, msg, http.StatusOK)\n\t}\n}", "func (a *HyperflexApiService) UpdateHyperflexUcsmConfigPolicy(ctx context.Context, moid string) ApiUpdateHyperflexUcsmConfigPolicyRequest {\n\treturn ApiUpdateHyperflexUcsmConfigPolicyRequest{\n\t\tApiService: a,\n\t\tctx: ctx,\n\t\tmoid: moid,\n\t}\n}", "func (sc *SimConfig) Adjust() {\n\tadjustDuration(&sc.SimTickInterval, defaultSimTickInterval)\n\tadjustDuration(&sc.NormTickInterval, defaultNormTickInterval)\n\tadjustUint64(&sc.StoreCapacityGB, defaultStoreCapacityGB)\n\tadjustUint64(&sc.StoreAvailableGB, defaultStoreAvailableGB)\n\tadjustInt64(&sc.StoreIOMBPerSecond, defaultStoreIOMBPerSecond)\n\tadjustString(&sc.StoreVersion, defaultStoreVersion)\n}", "func (config *Configuration) ApplyOverrides(overrides map[string]string) error {\n\tmatch := func(s1 string) func(string) bool {\n\t\treturn func(s2 string) bool {\n\t\t\treturn strings.ToLower(s2) == s1\n\t\t}\n\t}\n\telem := reflect.ValueOf(config).Elem()\n\tfor k, v := range overrides {\n\t\tsplit := strings.Split(strings.ToLower(k), \".\")\n\t\tif len(split) != 2 {\n\t\t\treturn fmt.Errorf(\"Bad option format: %s\", k)\n\t\t}\n\t\tfield := elem.FieldByNameFunc(match(split[0]))\n\t\tif !field.IsValid() {\n\t\t\treturn fmt.Errorf(\"Unknown config field: %s\", split[0])\n\t\t} else if field.Kind() != reflect.Struct {\n\t\t\treturn fmt.Errorf(\"Unsettable config field: %s\", split[0])\n\t\t}\n\t\tfield = field.FieldByNameFunc(match(split[1]))\n\t\tif !field.IsValid() {\n\t\t\treturn fmt.Errorf(\"Unknown config field: %s\", split[1])\n\t\t}\n\t\tswitch field.Kind() {\n\t\tcase reflect.String:\n\t\t\tfield.Set(reflect.ValueOf(v))\n\t\tcase reflect.Bool:\n\t\t\tv = strings.ToLower(v)\n\t\t\t// Mimics the set of truthy things gcfg accepts in our config file.\n\t\t\tfield.SetBool(v == \"true\" || v == \"yes\" || v == \"on\" || v == \"1\")\n\t\tcase reflect.Int:\n\t\t\ti, err := strconv.Atoi(v)\n\t\t\tif err != nil {\n\t\t\t\treturn fmt.Errorf(\"Invalid value for an integer field: %s\", v)\n\t\t\t}\n\t\t\tfield.Set(reflect.ValueOf(i))\n\t\tcase reflect.Int64:\n\t\t\tvar d cli.Duration\n\t\t\tif err := d.UnmarshalText([]byte(v)); err != nil {\n\t\t\t\treturn fmt.Errorf(\"Invalid value for a duration field: %s\", v)\n\t\t\t}\n\t\t\tfield.Set(reflect.ValueOf(d))\n\t\tcase reflect.Slice:\n\t\t\t// We only have to worry about slices of strings. Comma-separated values are accepted.\n\t\t\tfield.Set(reflect.ValueOf(strings.Split(v, \",\")))\n\t\tdefault:\n\t\t\treturn fmt.Errorf(\"Can't override config field %s (is %s)\", k, field.Kind())\n\t\t}\n\t}\n\treturn nil\n}", "func (a *apiServer) updateConfig(config *auth.AuthConfig) error {\n\tif config != nil {\n\t\tnewConfig, err := validateConfig(config, internal)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\ta.configCache = newConfig\n\t} else {\n\t\ta.configCache = nil\n\t}\n\tif a.configCache != nil && a.configCache.IDPName != \"\" {\n\t\t// construct SAML handler\n\t\ta.samlSP = &saml.ServiceProvider{\n\t\t\tLogger: logrus.New(),\n\t\t\tIDPMetadata: a.configCache.IDPMetadata,\n\t\t\tAcsURL: *a.configCache.ACSURL,\n\t\t\tMetadataURL: *a.configCache.MetadataURL,\n\n\t\t\t// Not set:\n\t\t\t// Key: Private key for Pachyderm ACS. Unclear if needed\n\t\t\t// Certificate: Public key for Pachyderm ACS. Unclear if needed\n\t\t\t// ForceAuthn: (whether users need to re-authenticate with the IdP, even\n\t\t\t// if they already have a session--leaving this false)\n\t\t\t// AuthnNameIDFormat: (format the ACS expects the AuthnName to be in)\n\t\t\t// MetadataValidDuration: (how long the SP endpoints are valid? Returned\n\t\t\t// by the Metadata service)\n\t\t}\n\t\ta.redirectAddress = a.configCache.DashURL // Set redirect address from config as well\n\t} else {\n\t\ta.samlSP = nil\n\t\ta.redirectAddress = nil\n\t}\n\treturn nil\n}", "func (m *Manager) ApplyConfig(cfg map[string]sd_config.ServiceDiscoveryConfig) error {\n\tm.mtx.Lock()\n\tdefer m.mtx.Unlock()\n\n\tm.cancelDiscoverers()\n\tfor name, scfg := range cfg {\n\t\tfor provName, prov := range m.providersFromConfig(scfg) {\n\t\t\tm.startProvider(m.ctx, poolKey{setName: name, provider: provName}, prov)\n\t\t}\n\t}\n\n\treturn nil\n}", "func (c *Controller) applyConfiguration(admin submarine.AdminInterface, cluster *rapi.SubmarineCluster) (bool, error) {\n\tglog.Info(\"applyConfiguration START\")\n\tdefer glog.Info(\"applyConfiguration STOP\")\n\n\tasChanged := false\n\n\t// expected replication factor and number of master nodes\n\tcReplicaFactor := *cluster.Spec.ReplicationFactor\n\tcNbMaster := *cluster.Spec.NumberOfMaster\n\t// Adapt, convert CR to structure in submarine package\n\trCluster, nodes, err := newSubmarineCluster(admin, cluster)\n\tif err != nil {\n\t\tglog.Errorf(\"Unable to create the SubmarineCluster view, error:%v\", err)\n\t\treturn false, err\n\t}\n\t// PodTemplate changes require rolling updates\n\tif needRollingUpdate(cluster) {\n\t\tif setRollingUpdateCondition(&cluster.Status, true) {\n\t\t\tif cluster, err = c.updateHandler(cluster); err != nil {\n\t\t\t\treturn false, err\n\t\t\t}\n\t\t}\n\n\t\tglog.Info(\"applyConfiguration needRollingUpdate\")\n\t\treturn c.manageRollingUpdate(admin, cluster, rCluster, nodes)\n\t}\n\tif setRollingUpdateCondition(&cluster.Status, false) {\n\t\tif cluster, err = c.updateHandler(cluster); err != nil {\n\t\t\treturn false, err\n\t\t}\n\t}\n\n\t// if the number of Pods is greater than expected\n\tif needLessPods(cluster) {\n\t\tif setRebalancingCondition(&cluster.Status, true) {\n\t\t\tif cluster, err = c.updateHandler(cluster); err != nil {\n\t\t\t\treturn false, err\n\t\t\t}\n\t\t}\n\t\tglog.Info(\"applyConfiguration needLessPods\")\n\t\t// Configure Submarine cluster\n\t\treturn c.managePodScaleDown(admin, cluster, rCluster, nodes)\n\t}\n\t// If it is not a rolling update, modify the Condition\n\tif setRebalancingCondition(&cluster.Status, false) {\n\t\tif cluster, err = c.updateHandler(cluster); err != nil {\n\t\t\treturn false, err\n\t\t}\n\t}\n\n\tclusterStatus := &cluster.Status.Cluster\n\tif (clusterStatus.NbPods - clusterStatus.NbSubmarineRunning) != 0 {\n\t\tglog.V(3).Infof(\"All pods not ready wait to be ready, nbPods: %d, nbPodsReady: %d\", clusterStatus.NbPods, clusterStatus.NbSubmarineRunning)\n\t\treturn false, err\n\t}\n\n\t// First, we define the new masters\n\t// Select the desired number of Masters and assign Hashslots to each Master. The Master will be distributed to different K8S nodes as much as possible\n\t// Set the cluster status to Calculating Rebalancing\n\tnewMasters, curMasters, allMaster, err := clustering.DispatchMasters(rCluster, nodes, cNbMaster, admin)\n\tif err != nil {\n\t\tglog.Errorf(\"Cannot dispatch slots to masters: %v\", err)\n\t\trCluster.Status = rapi.ClusterStatusError\n\t\treturn false, err\n\t}\n\t// If the number of new and old masters is not the same\n\tif len(newMasters) != len(curMasters) {\n\t\tasChanged = true\n\t}\n\n\t// Second select Node that is already a slave\n\tcurrentSlaveNodes := nodes.FilterByFunc(submarine.IsSlave)\n\n\t//New slaves are slaves which is currently a master with no slots\n\tnewSlave := nodes.FilterByFunc(func(nodeA *submarine.Node) bool {\n\t\tfor _, nodeB := range newMasters {\n\t\t\tif nodeA.ID == nodeB.ID {\n\t\t\t\treturn false\n\t\t\t}\n\t\t}\n\t\tfor _, nodeB := range currentSlaveNodes {\n\t\t\tif nodeA.ID == nodeB.ID {\n\t\t\t\treturn false\n\t\t\t}\n\t\t}\n\t\treturn true\n\t})\n\n\t// Depending on whether we scale up or down, we will dispatch slaves before/after the dispatch of slots\n\tif cNbMaster < int32(len(curMasters)) {\n\t\t// this happens usually after a scale down of the cluster\n\t\t// we should dispatch slots before dispatching slaves\n\t\tif err := clustering.DispatchSlotToNewMasters(rCluster, admin, newMasters, curMasters, allMaster); err != nil {\n\t\t\tglog.Error(\"Unable to dispatch slot on new master, err:\", err)\n\t\t\treturn false, err\n\t\t}\n\n\t\t// assign master/slave roles\n\t\tnewSubmarineSlavesByMaster, bestEffort := clustering.PlaceSlaves(rCluster, newMasters, currentSlaveNodes, newSlave, cReplicaFactor)\n\t\tif bestEffort {\n\t\t\trCluster.NodesPlacement = rapi.NodesPlacementInfoBestEffort\n\t\t}\n\n\t\tif err := clustering.AttachingSlavesToMaster(rCluster, admin, newSubmarineSlavesByMaster); err != nil {\n\t\t\tglog.Error(\"Unable to dispatch slave on new master, err:\", err)\n\t\t\treturn false, err\n\t\t}\n\t} else {\n\t\t// We are scaling up the nbmaster or the nbmaster doesn't change.\n\t\t// assign master/slave roles\n\t\tnewSubmarineSlavesByMaster, bestEffort := clustering.PlaceSlaves(rCluster, newMasters, currentSlaveNodes, newSlave, cReplicaFactor)\n\t\tif bestEffort {\n\t\t\trCluster.NodesPlacement = rapi.NodesPlacementInfoBestEffort\n\t\t}\n\n\t\tif err := clustering.AttachingSlavesToMaster(rCluster, admin, newSubmarineSlavesByMaster); err != nil {\n\t\t\tglog.Error(\"Unable to dispatch slave on new master, err:\", err)\n\t\t\treturn false, err\n\t\t}\n\n\t\tif err := clustering.DispatchSlotToNewMasters(rCluster, admin, newMasters, curMasters, allMaster); err != nil {\n\t\t\tglog.Error(\"Unable to dispatch slot on new master, err:\", err)\n\t\t\treturn false, err\n\t\t}\n\t}\n\n\tglog.V(4).Infof(\"new nodes status: \\n %v\", nodes)\n\n\t// Set the cluster status\n\trCluster.Status = rapi.ClusterStatusOK\n\t// wait a bit for the cluster to propagate configuration to reduce warning logs because of temporary inconsistency\n\ttime.Sleep(1 * time.Second)\n\treturn asChanged, nil\n}", "func SetConfig(config *Config, name ...string) {\n\tgroup := DefaultGroupName\n\tif len(name) > 0 {\n\t\tgroup = name[0]\n\t}\n\tconfigs.Set(group, config)\n\tinstances.Remove(group)\n\n\tintlog.Printf(`SetConfig for group \"%s\": %+v`, group, config)\n}", "func (c *Config) UpdateRules() error {\n\tcfg, err := c.readConfig()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn c.parseRules(cfg)\n}", "func (api *AdminApi) ModifySettings(options ...func(*url.Values)) (*SettingsResult, error) {\n\topts := url.Values{}\n\tfor _, o := range options {\n\t\to(&opts)\n\t}\n\t_, body, err := api.SignedCall(\"POST\", \"/admin/v1/settings\", opts, duoapi.UseTimeout)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tret := &SettingsResult{}\n\tif err = json.Unmarshal(body, ret); err != nil {\n\t\treturn nil, err\n\t}\n\treturn ret, nil\n}", "func (q *QueryResolver) UpdateUserSettings(ctx context.Context, args *updateUserSettingsArgs) (bool, error) {\n\tsCtx, err := authcontext.FromContext(ctx)\n\tif err != nil {\n\t\treturn false, err\n\t}\n\tgrpcAPI := q.Env.ProfileServiceClient\n\n\tkeys := make([]string, len(args.Keys))\n\tfor i := range args.Keys {\n\t\tkeys[i] = *args.Keys[i]\n\t}\n\tvalues := make([]string, len(args.Values))\n\tfor i := range args.Values {\n\t\tvalues[i] = *args.Values[i]\n\t}\n\n\tresp, err := grpcAPI.UpdateUserSettings(ctx, &profilepb.UpdateUserSettingsRequest{\n\t\tID: utils.ProtoFromUUIDStrOrNil(sCtx.Claims.GetUserClaims().UserID),\n\t\tKeys: keys,\n\t\tValues: values,\n\t})\n\tif err != nil {\n\t\treturn false, err\n\t}\n\n\treturn resp.OK, nil\n}", "func (m *IntentsDeviceManagementIntentItemRequestBuilder) UpdateSettings()(*IntentsItemUpdateSettingsRequestBuilder) {\n return NewIntentsItemUpdateSettingsRequestBuilderInternal(m.BaseRequestBuilder.PathParameters, m.BaseRequestBuilder.RequestAdapter)\n}", "func (s *Service) ApplyConfiguration(config interface{}) error {\n\tif err := s.CheckConfiguration(config); err != nil {\n\t\treturn err\n\t}\n\tvar ok bool\n\ts.Cfg, ok = config.(Configuration)\n\tif !ok {\n\t\treturn fmt.Errorf(\"ApplyConfiguration> Invalid Elasticsearch configuration\")\n\t}\n\ts.Router = &api.Router{\n\t\tMux: mux.NewRouter(),\n\t\tConfig: s.Cfg.HTTP,\n\t}\n\ts.HTTPURL = s.Cfg.URL\n\ts.ServiceName = s.Cfg.Name\n\ts.ServiceType = sdk.TypeElasticsearch\n\ts.MaxHeartbeatFailures = s.Cfg.API.MaxHeartbeatFailures\n\n\treturn nil\n}" ]
[ "0.5717687", "0.542755", "0.53194195", "0.52470493", "0.5154396", "0.5133524", "0.5035096", "0.5025223", "0.49815646", "0.48762172", "0.4805697", "0.4791137", "0.47735512", "0.47543612", "0.4748372", "0.47025505", "0.4683927", "0.46761623", "0.46550635", "0.46143624", "0.46081856", "0.46013725", "0.45959035", "0.45848483", "0.4560347", "0.45364776", "0.45358396", "0.45282245", "0.45216438", "0.45059568", "0.44913286", "0.44753814", "0.44643748", "0.44612184", "0.44586822", "0.4454429", "0.44425604", "0.44381538", "0.44341478", "0.44329172", "0.44256285", "0.4421108", "0.44184482", "0.4409771", "0.44065696", "0.43997288", "0.439469", "0.4393831", "0.43872678", "0.4381823", "0.43794388", "0.43740532", "0.4372249", "0.4346088", "0.43448922", "0.43428957", "0.4338576", "0.43274823", "0.4325513", "0.43154958", "0.4314346", "0.43138766", "0.43064803", "0.43059126", "0.4305231", "0.4300383", "0.42993444", "0.4290218", "0.4281455", "0.42791525", "0.42676917", "0.42648098", "0.42560112", "0.42481306", "0.42408735", "0.4236885", "0.42325824", "0.42320934", "0.42278707", "0.42216498", "0.42098236", "0.4202707", "0.4200787", "0.4193545", "0.4192273", "0.4184819", "0.4182416", "0.41807687", "0.41783962", "0.4160743", "0.41602704", "0.41587344", "0.41580543", "0.41535783", "0.4151563", "0.41514027", "0.41498643", "0.4145322", "0.41424724", "0.4140309" ]
0.6018916
0
routeActiveHandler handles new route
func (rc *RouteConfig) RouteHandler(writer http.ResponseWriter, request *http.Request) { log := rc.route.Logger. WithEventSource(rc.route.EventSource.Name). WithEndpoint(rc.route.Webhook.Endpoint). WithPort(rc.route.Webhook.Port) log.Info("request received") if !helper.ActiveEndpoints[rc.route.Webhook.Endpoint].Active { log.Info("endpoint is not active") common.SendErrorResponse(writer, "") return } body, err := ioutil.ReadAll(request.Body) if err != nil { log.WithError(err).Error("failed to parse request body") common.SendErrorResponse(writer, "") return } helper.ActiveEndpoints[rc.route.Webhook.Endpoint].DataCh <- body log.Info("request successfully processed") common.SendSuccessResponse(writer, "") }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (r *router) handle(c *Context){\n\tn, params := r.getRoute(c.Method, c.Path)\n\tif n != nil {\n\t\tc.Params = params\n\t\t// connection between Context and Router!\n\t\t// it's important\n\t\tkey := c.Method + \"-\" + n.pattern\n\t\t// 两种函数都放到一起了\n\t\tc.handlers = append(c.handlers, r.handlers[key])\n\t\t//r.handlers[key](c)\n\t}else{\n\t\tc.handlers = append(c.handlers, func(c *Context){\n\t\t\tc.String(http.StatusNotFound, \"404 NOT FOUND%s\\n\", c.Path)\n\t\t})\n\t}\n\t//放在这里一起执行, 中间执行, 其逻辑导致\"并行\"效果\n\tc.Next()\n}", "func newRoute(registedPath string, handler Handler) *Route {\n\tr := &Route{handler: handler, fullpath: registedPath}\n\tr.processPath()\n\treturn r\n}", "func (objDB *ObjDB) HandleRoute(w http.ResponseWriter, r *http.Request) {\n\tcb.Call(func() error {\n\t\tvar err error\n\t\tswitch r.URL.RequestURI() {\n\t\tcase \"/user\":\n\t\t\terr = controllers.HandleUserRequest(w, r, objDB.DB)\n\t\tcase \"/users\":\n\t\t\terr = controllers.HandleUsersRequest(w, r, objDB.DB)\n\t\tdefault:\n\t\t\terr = controllers.HandleGetUserRequest(w, r, objDB.DB)\n\t\t}\n\t\tif err != nil {\n\t\t\ttotal = total + 1\n\t\t\tfailureRequests = failureRequests + 1\n\t\t} else {\n\t\t\tsuccessRequests = successRequests + 1\n\t\t}\n\t\treturn err\n\t}, 0)\n}", "func (r *router) handle(c *Context) {\n\tn, params := r.getRoute(c.Method, c.Path) //if request method and path exist, return pattern of node and params\n\tif n != nil {\n\t\tc.Params = params\n\t\tc.handlers = append(c.handlers, n.handler) //insert handler after middleware\n\t} else {\n\t\tc.handlers = append(c.handlers, func(c *Context) {\n\t\t\tc.String(http.StatusNotFound, \"404 NOT FOUND: %s\\n\", c.Path)\n\t\t})\n\t}\n\tc.Next()\n}", "func (e *engine) handleRoute(ctx *Context) flowResult {\n\tdomain := AppRouter().FindDomain(ctx.Req)\n\tif domain == nil {\n\t\twriteErrorInfo(ctx, http.StatusNotFound, \"Not Found\")\n\t\treturn flowStop\n\t}\n\n\troute, pathParams, rts := domain.Lookup(ctx.Req)\n\tif route == nil { // route not found\n\t\tif err := handleRtsOptionsMna(ctx, domain, rts); err == nil {\n\t\t\treturn flowStop\n\t\t}\n\n\t\tctx.route = domain.NotFoundRoute\n\t\thandleRouteNotFound(ctx, domain, domain.NotFoundRoute)\n\t\treturn flowStop\n\t}\n\n\tctx.route = route\n\tctx.domain = domain\n\n\t// security form auth case\n\tif isFormAuthLoginRoute(ctx) {\n\t\treturn flowCont\n\t}\n\n\t// Path parameters\n\tif pathParams.Len() > 0 {\n\t\tctx.Req.Params.Path = make(map[string]string, pathParams.Len())\n\t\tfor _, v := range *pathParams {\n\t\t\tctx.Req.Params.Path[v.Key] = v.Value\n\t\t}\n\t}\n\n\t// Serving static file\n\tif route.IsStatic {\n\t\tif err := e.serveStatic(ctx); err == errFileNotFound {\n\t\t\thandleRouteNotFound(ctx, domain, route)\n\t\t\tctx.Reply().done = false // override\n\t\t}\n\t\treturn flowStop\n\t}\n\n\t// No controller or action found for the route\n\tif err := ctx.setTarget(route); err == errTargetNotFound {\n\t\thandleRouteNotFound(ctx, domain, route)\n\t\treturn flowStop\n\t}\n\n\treturn flowCont\n}", "func (r *Router) addRoute(m, p, t string, fn Handle) {\n\n\tpath := r.subPath(p)\n\n\t// Add to index\n\tif len(t) > 0 && m == \"GET\" {\n\t\t// TODO: Display total path including host\n\t\tr.index[t] = path\n\t}\n\n\t// Wrapper function to bypass the parameter problem\n\twf := func(w http.ResponseWriter, req *http.Request, p httprouter.Params) {\n\t\tfn(w, req, paramsFromHTTPRouter(p))\n\t}\n\n\tr.router.Handle(m, path, wf)\n}", "func (rc *Router) HandleRoute(writer http.ResponseWriter, request *http.Request) {\n\troute := rc.route\n\n\tlogger := route.Logger.WithFields(\n\t\tmap[string]interface{}{\n\t\t\tcommon.LabelEventSource: route.EventSource.Name,\n\t\t\tcommon.LabelEndpoint: route.Context.Endpoint,\n\t\t\tcommon.LabelHTTPMethod: route.Context.Method,\n\t\t})\n\n\tlogger.Info(\"request a received, processing it...\")\n\n\tif !route.Active {\n\t\tlogger.Warn(\"endpoint is not active, won't process it\")\n\t\tcommon.SendErrorResponse(writer, \"endpoint is inactive\")\n\t\treturn\n\t}\n\n\tlogger.Infoln(\"verifying the request...\")\n\terr := rc.verifyRequest(request)\n\tif err != nil {\n\t\tlogger.WithError(err).Error(\"failed to validate the request\")\n\t\tcommon.SendInternalErrorResponse(writer, err.Error())\n\t\treturn\n\t}\n\n\tvar data []byte\n\t// Interactive element actions are always\n\t// sent as application/x-www-form-urlencoded\n\t// If request was generated by an interactive element, it will be a POST form\n\tif len(request.Header[\"Content-Type\"]) > 0 && request.Header[\"Content-Type\"][0] == \"application/x-www-form-urlencoded\" {\n\t\tlogger.Infoln(\"handling slack interaction...\")\n\t\tdata, err = rc.handleInteraction(request)\n\t\tif err != nil {\n\t\t\tlogger.WithError(err).Error(\"failed to process the interaction\")\n\t\t\tcommon.SendInternalErrorResponse(writer, err.Error())\n\t\t\treturn\n\t\t}\n\t} else {\n\t\t// If there's no payload in the post body, this is likely an\n\t\t// Event API request. Parse and process if valid.\n\t\tlogger.Infoln(\"handling slack event...\")\n\t\tvar response []byte\n\t\tdata, response, err = rc.handleEvent(request)\n\t\tif err != nil {\n\t\t\tlogger.WithError(err).Error(\"failed to handle the event\")\n\t\t\tcommon.SendInternalErrorResponse(writer, err.Error())\n\t\t\treturn\n\t\t}\n\t\tif response != nil {\n\t\t\twriter.Header().Set(\"Content-Type\", \"text\")\n\t\t\tif _, err := writer.Write(response); err != nil {\n\t\t\t\tlogger.WithError(err).Error(\"failed to write the response for url verification\")\n\t\t\t\t// don't return, we want to keep this running to give user chance to retry\n\t\t\t}\n\t\t}\n\t}\n\n\tif data != nil {\n\t\tlogger.Infoln(\"dispatching event on route's data channel...\")\n\t\troute.DataCh <- data\n\t}\n\n\tlogger.Info(\"request successfully processed\")\n\tcommon.SendSuccessResponse(writer, \"success\")\n}", "func (r *Router) handle(c *Ctx) {\n\tvar handler HandlerFunc\n\treq := c.Request()\n\tw := c.Writer()\n\tpath := req.URL.Path\n\tmethod := req.Method\n\tres := r.trie.Match(path)\n\n\tif res.Node == nil {\n\t\t// FixedPathRedirect or TrailingSlashRedirect\n\t\tif res.TSR != \"\" || res.FPR != \"\" {\n\t\t\treq.URL.Path = res.TSR\n\t\t\tif res.FPR != \"\" {\n\t\t\t\treq.URL.Path = res.FPR\n\t\t\t}\n\t\t\tcode := 301\n\t\t\tif method != \"GET\" {\n\t\t\t\tcode = 307\n\t\t\t}\n\t\t\thttp.Redirect(w, req, req.URL.String(), code)\n\t\t\treturn\n\t\t}\n\t\tif r.noRoute == nil {\n\t\t\thttp.Error(w, fmt.Sprintf(`\"%s\" not implemented`, path), 501)\n\t\t\treturn\n\t\t}\n\t\thandler = r.noRoute\n\t} else {\n\t\t// ok := false\n\t\thd := res.Node.GetHandler(method)\n\t\thandler, _ = hd.(HandlerFunc)\n\t\t// handler = r.wrapHandler(hd)\n\t\t// if !ok {\n\t\t// \tpanic(\"handler error\")\n\t\t// }\n\t\tif handler == nil {\n\t\t\t// OPTIONS support\n\t\t\tif method == http.MethodOptions {\n\t\t\t\tw.Header().Set(\"Allow\", res.Node.GetAllow())\n\t\t\t\tw.WriteHeader(204)\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tif r.noMethod == nil {\n\t\t\t\t// If no route handler is returned, it's a 405 error\n\t\t\t\tw.Header().Set(\"Allow\", res.Node.GetAllow())\n\t\t\t\thttp.Error(w, fmt.Sprintf(`\"%s\" not allowed in \"%s\"`, method, path), 405)\n\t\t\t\treturn\n\t\t\t}\n\t\t\thandler = r.noMethod\n\t\t}\n\t}\n\n\tif len(res.Params) != 0 {\n\t\tc.params = res.Params\n\t}\n\tc.handlers = append(c.handlers, handler)\n\tc.Next()\n}", "func (s *Server) route() {\n\ts.r.HandleFunc(\"/\", s.HandleArticlesGetAll())\n\ts.r.HandleFunc(\"/article/{slug}\", s.HandleArticlesGetOne())\n\ts.r.HandleFunc(\"/talks\", s.HandleTalksGetOne())\n\ts.r.HandleFunc(\"/talks/{slug}\", s.HandleTalksGetOne())\n\ts.r.HandleFunc(\"/about\", s.HandleAboutGet())\n}", "func (this *Router) HandleRoute(path string, f ReqResFunc) {\n this.routes[path] = f\n http.HandleFunc(path, this.assignRouteToHttp(f))\n}", "func (t TransactionHandler) route() {\n\tt.Router.HandleFunc(\"/getItemList\", t.respond(getItemList, handleError)).Methods(\"POST\")\n\tt.Router.HandleFunc(\"/addItems\", t.respond(createItems, handleError)).Methods(\"POST\")\n\tt.Router.HandleFunc(\"/submitTx\", t.respond(submitTx, handleError)).Methods(\"POST\")\n\tt.Router.HandleFunc(\"/getTxByRecipient\", t.respond(getRecipientTx, handleError)).Methods(\"POST\")\n\tt.Router.HandleFunc(\"/getTxByMerchant\", t.respond(getMerchantTx, handleError)).Methods(\"POST\")\n}", "func (f RouteHandlerFunc) RouteHandle(rm *RouteMatch) { f(rm) }", "func (h *HandlerRouter) findRoute(method, requestPath string, c *context.Context) {\n\terror := NewError(c)\n\n\tr := h.matchRouter(requestPath)\n\tif r == nil {\n\t\terror.Code(404, 404).JSON()\n\t\treturn\n\t}\n\tc.RunHandler = r.HandlerType\n\n\th.findFilter(BEFORE_ROUTE, requestPath, c)\n\t// Exit handler\n\tif c.GetStatus() != http.StatusOK {\n\t\treturn\n\t}\n\n\tif r != nil {\n\t\t// Loading controller handler before the filter\n\t\t// If the HTTP status code is not 200, stop running,\n\t\t// apply to websocket.\n\t\th.findFilter(BEFORE_HANDLER, requestPath, c)\n\t\t// Exit handler\n\t\tif c.GetStatus() != http.StatusOK {\n\t\t\treturn\n\t\t}\n\t\tswitch c.ReqType {\n\t\tcase context.RPC_HTTP:\n\t\t\tc.SetParams(r.MatchParams(requestPath))\n\t\t}\n\n\t\tvc := reflect.New(r.HandlerType)\n\t\texecController, ok := vc.Interface().(HandlerInterface)\n\t\tif !ok {\n\t\t\terror.Code(500, 500).JSON()\n\t\t}\n\n\t\texecController.Init(c)\n\t\tswitch method {\n\t\tcase \"GET\":\n\t\t\texecController.Get()\n\t\tcase \"POST\":\n\t\t\texecController.Post()\n\t\tcase \"DELETE\":\n\t\t\texecController.Delete()\n\t\tcase \"PUT\":\n\t\t\texecController.Put()\n\t\tcase \"PATCH\":\n\t\t\texecController.Patch()\n\t\tcase \"HEAD\":\n\t\t\texecController.Head()\n\t\tcase \"OPTIONS\":\n\t\t\texecController.Options()\n\t\tcase \"WS\":\n\t\t\texecController.Websocket()\n\t\tdefault:\n\t\t\texecController.Get()\n\t\t}\n\t}\n}", "func (ro *Route) handler(uri []string, httpMethod string, ids idMap) (*handler, error) {\n\n\t//log.Println(\"Route Handling\", uri, \"in the\", ro)\n\n\t// Check if is trying to request some Handler of this Route\n\tif len(uri) == 0 {\n\t\th, exist := ro.Handlers[httpMethod]\n\t\tif !exist {\n\t\t\treturn nil, fmt.Errorf(\"Method %s not found in the %s\", httpMethod, ro)\n\t\t}\n\t\treturn h, nil\n\t}\n\n\t// Check if is trying to request some Action Handler of this Route\n\tif len(uri) == 1 {\n\n\t\th, exist := ro.Handlers[httpMethod+uri[0]]\n\t\tif exist {\n\t\t\treturn h, nil\n\t\t}\n\n\t\t// It is not an error, cause could have an resources with this name, not an action\n\t\t//log.Println(\"Action \" + httpMethod + uri[0] + \" NOT FOUND\")\n\t}\n\n\t// If we are in a Slice Route, get its ID and search in the Elem Route\n\tif ro.IsSlice {\n\t\t// Add its ID to the Map\n\t\tid := &ID{id: uri[0]}\n\t\tids[ro.Elem.Value.Type()] = reflect.ValueOf(id)\n\n\t\treturn ro.Elem.handler(uri[1:], httpMethod, ids)\n\t}\n\n\t// If we are in an Elem Route, the only possibility is to have a Child with this Name\n\tchild, exist := ro.Children[uri[0]]\n\tif exist {\n\t\treturn child.handler(uri[1:], httpMethod, ids)\n\t}\n\n\treturn nil, fmt.Errorf(\"Not exist any Child '%s' or Action '%s' in the %s\", uri[0], httpMethod+strings.Title(uri[0]), ro)\n}", "func routeHandler(res http.ResponseWriter, req *http.Request) {\n\tpath := req.URL.Path\n\tswitch path {\n\tcase \"/\": HomeRoute(res, req)\n\tcase \"/test\": TestRoute(res, req)\n\tcase \"/login\": LoginRoute(res, req)\n\tcase \"/register\": RegisterRoute(res, req)\n case \"/logout\": LogoutRoute(res, req)\n\tcase \"/library\": LibraryRoute(res, req)\n\tcase \"/upload\": UploadRoute(res, req)\n\tcase \"/read\": ReadRoute(res, req)\n\tdefault: data,err := ioutil.ReadFile(path[1:])\n \tif err != nil{\n \t\t\tNotFoundRoute(res, req)\n \t} else {\n \t\t\tres.Write(data)\n \t}\n\t}\n}", "func (s *WebService) Match(method string, route string, handler interface{}) {\r\n\ts.addRoute(route, method, handler)\r\n}", "func dynamicRoutingHandler(w http.ResponseWriter, r *http.Request) {\n\t// Handle sleeps\n\tif strings.Contains(r.URL.Path, \"/sleep\") {\n\t\tsleepHandler(w, r)\n\t\treturn\n\t}\n\n\tresp := fmt.Sprintf(`{\"RequestURI\": \"%s\"}`, r.URL.RequestURI())\n\tw.Write([]byte(resp))\n}", "func (c *L3RouteResolver) sendRouteIfActive(r nodenameRoute) {\n\tif !c.routeReady(r) {\n\t\tlogrus.WithField(\"route\", r).Debug(\"Route wasn't ready, ignoring send\")\n\t\treturn\n\t}\n\tlogrus.WithField(\"route\", r).Info(\"Sending route update\")\n\tc.callbacks.OnRouteUpdate(&proto.RouteUpdate{\n\t\tType: proto.RouteType_WORKLOADS_NODE, // FIXME we throw away the route type, will want that if we rework VXLAN resolver to use our routes.\n\t\tDst: r.dst.String(),\n\t\tNode: r.nodeName,\n\t\tGw: c.nodeNameToIPAddr[r.nodeName],\n\t})\n}", "func newRoute(pattern string, handles []HandlerFunc, router *Router) *Route {\n\tr := new(Route)\n\tr.pattern = pattern\n\tr.handlers = handles\n\tr.router = router\n\tr.children = make(map[string]*Route)\n\treturn r\n}", "func CreateRouteHandler(rw http.ResponseWriter, req *http.Request) {\n\tvar r util.Redirect\n\tdecoder := json.NewDecoder(req.Body)\n\terr := decoder.Decode(&r)\n\tif err != nil {\n\t\tutil.WriteErrorResp(rw, http.StatusBadRequest, \"Invalid Request\")\n\t\treturn\n\t}\n\n\tkey, err := getKey()\n\tif err != nil {\n\t\tutil.WriteErrorResp(rw, http.StatusInternalServerError, \"Could not generate id\")\n\t\treturn\n\t}\n\n\terr = client.Set(key, r.Target, 0).Err()\n\tif err != nil {\n\t\tutil.WriteErrorResp(rw, http.StatusInternalServerError, \"failed to write to db\")\n\t\treturn\n\t}\n\n\tr.Key = key\n\tutil.WriterJSONResponse(rw, r)\n}", "func (s *Server) addRoute(method string, pattern string, handler RouteHandler) {\n\ts.routes = append(s.routes, Route{handler : handler, pattern : pattern, method : method})\n}", "func (d *Daemon) routeHandler(w *rest.ResponseWriter, r *rest.Request) {\n\t//id := strings.Split(r.URL.Path, \"/\")[2]\n\t//route := strings.Split(r.URL.Path, \"/\")[3]\n\n\tid, ok := r.PathParams[\"id\"]\n\tif ok == false {\n\t\tApiResponse(w, 500, \"MISSING_BLOCK_ID\")\n\t\treturn\n\t}\n\n\troute, ok := r.PathParams[\"route\"]\n\tif ok == false {\n\t\tApiResponse(w, 500, \"MISSING_ROUTE\")\n\t\treturn\n\t}\n\n\t_, ok = d.blockMap[id]\n\tif ok == false {\n\t\tApiResponse(w, 500, \"BLOCK_ID_NOT_FOUND\")\n\t\treturn\n\t}\n\n\t_, ok = d.blockMap[id].Routes[route]\n\tif ok == false {\n\t\tApiResponse(w, 500, \"ROUTE_NOT_FOUND\")\n\t\treturn\n\t}\n\n\tmsg, err := ioutil.ReadAll(io.LimitReader(r.Body, READ_MAX))\n\n\tif err != nil {\n\t\tApiResponse(w, 500, \"BAD_REQUEST\")\n\t\treturn\n\t}\n\n\tvar outMsg interface{}\n\n\tif len(msg) > 0 {\n\t\terr = json.Unmarshal(msg, &outMsg)\n\t\tif err != nil {\n\t\t\tlog.Println(msg)\n\t\t\tApiResponse(w, 500, \"BAD_JSON\")\n\t\t\treturn\n\t\t}\n\t}\n\n\tResponseChan := make(chan interface{})\n\tblockRouteChan := d.blockMap[id].Routes[route]\n\tblockRouteChan <- &blocks.BMsg{\n\t\tMsg: outMsg,\n\t\tResponseChan: ResponseChan,\n\t}\n\trespMsg := <-ResponseChan\n\n\trespJson, err := json.Marshal(respMsg)\n\tif err != nil {\n\t\tApiResponse(w, 500, \"BAD_RESPONSE_FROM_BLOCK\")\n\t\treturn\n\t}\n\n\tDataResponse(w, respJson)\n}", "func (h *Hooks) OnRoute(handler ...OnRouteHandler) {\n\th.app.mutex.Lock()\n\th.onRoute = append(h.onRoute, handler...)\n\th.app.mutex.Unlock()\n}", "func (rs *routeServer) addRoutesHandler(w http.ResponseWriter, req *http.Request) {\n\tlog.Printf(\"Adding routes at %s\\n\", req.URL.Path)\n\n\tloc := mux.Vars(req)[\"location\"]\n\n\tmediatype, _, err := mime.ParseMediaType(req.Header.Get(\"Content-Type\"))\n\tif err != nil {\n\t\thttp.Error(w, err.Error(), http.StatusBadRequest)\n\t\treturn\n\t}\n\tif mediatype != \"application/json\" {\n\t\thttp.Error(w, \"requires application/json Content-Type\", http.StatusUnsupportedMediaType)\n\t\treturn\n\t}\n\n\tdec := json.NewDecoder(req.Body)\n\tvar routes map[string]float64\n\tif err := dec.Decode(&routes); err != nil {\n\t\thttp.Error(w, err.Error(), http.StatusBadRequest)\n\t\treturn\n\t}\n\n\tif rs.store.AddRoutes(loc, routes) != nil {\n\t\thttp.Error(w, err.Error(), http.StatusBadRequest)\n\t\treturn\n\t}\n}", "func (h *handle) AddRoute(path string, handlers ...fiber.Handler) *handle {\n\th.Application.Add(\"get\", path, handlers...)\n\treturn h\n}", "func NewRoute(command, path string, handler Handler) *Route {\n\tif command != scr.CmdSet &&\n\t\tcommand != scr.CmdAppend &&\n\t\tcommand != scr.CmdDelete &&\n\t\tcommand != scr.CmdCall {\n\t\treturn nil\n\t}\n\tif !regexpHandlerPath.MatchString(path) {\n\t\tlog.Println(ErrInvalidPath, path)\n\t\treturn nil\n\t}\n\treturn &Route{\n\t\tCommand: command,\n\t\tPath: path,\n\t\tpathParts: strings.Split(path, pathDelimiter),\n\t\tHandler: handler,\n\t}\n}", "func (r *Router) Handle(registedPath string, handler HTTPHandler, methods ...string) *Route {\n\tr.mu.Lock()\n\tdefer r.mu.Unlock()\n\tvar route *Route\n\tif registedPath == \"\" {\n\t\tregistedPath = \"/\"\n\t}\n\n\tif handler != nil || registedPath == MatchEverything {\n\n\t\t//validate the handler to be a func\n\n\t\tif reflect.TypeOf(handler).Kind() != reflect.Func {\n\t\t\tpanic(\"iris | Router.go:50 -- Inline Handler HAS TO BE A func\")\n\t\t}\n\n\t\t//I will do it inside the Prepare, because maybe developer don't wants the GET if methods not defined yet.\n\t\t//\t\tif methods == nil {\n\t\t//\t\t\tmethods = []string{HttpMethods.GET}\n\t\t//\t\t}\n\n\t\troute = newRoute(registedPath, handler, methods...)\n\n\t\tif len(r.middlewareHandlers) > 0 {\n\t\t\t//if global middlewares are registed then push them to this route.\n\t\t\troute.middlewareHandlers = r.middlewareHandlers\n\t\t}\n\n\t\tr.routes = append(r.routes, route)\n\t}\n\n\troute.errorHandlers = r.errorHandlers\n\n\treturn route\n}", "func (auth *AuthManager) Route(route string, handler func(account.AccountID, *msgjson.Message) *msgjson.Error) {\n\tcomms.Route(route, func(conn comms.Link, msg *msgjson.Message) *msgjson.Error {\n\t\tclient := auth.conn(conn)\n\t\tif client == nil {\n\t\t\treturn &msgjson.Error{\n\t\t\t\tCode: msgjson.UnauthorizedConnection,\n\t\t\t\tMessage: \"cannot use route '\" + route + \"' on an unauthorized connection\",\n\t\t\t}\n\t\t}\n\t\treturn handler(client.acct.ID, msg)\n\t})\n}", "func (c *L3RouteResolver) withdrawRouteIfActive(r nodenameRoute) {\n\tif !c.routeReady(r) {\n\t\tlogrus.WithField(\"route\", r).Debug(\"Route wasn't ready, ignoring withdraw\")\n\t\treturn\n\t}\n\tlogrus.WithField(\"route\", r).Info(\"Sending route remove\")\n\tc.callbacks.OnRouteRemove(proto.RouteType_WORKLOADS_NODE, r.dst.String())\n}", "func (msg MsgCreateIndex) Route() string { return RouterKey }", "func (router *Router) Route(ctx context.Context, req *events.APIGatewayProxyRequest) (events.APIGatewayProxyResponse, error) {\n\n\tvar res events.APIGatewayProxyResponse\n\tvar err error\n\tstrLen := len(router.ResourceName)\n\n\trequestUser := router.UserDetails.GetUser(&req.RequestContext)\n\tctxWithUser := context.WithValue(ctx, DceCtxKey, *requestUser)\n\n\tswitch {\n\tcase req.HTTPMethod == http.MethodGet && strings.HasSuffix(req.Path, router.ResourceName):\n\t\tres, err = router.ListController.Call(ctxWithUser, req)\n\tcase req.HTTPMethod == http.MethodGet && strings.Compare(string(req.Path[0:strLen+1]), fmt.Sprintf(\"%s/\", router.ResourceName)) == 0:\n\t\tres, err = router.GetController.Call(ctxWithUser, req)\n\tcase req.HTTPMethod == http.MethodDelete &&\n\t\t(strings.Compare(req.Path, fmt.Sprintf(\"%s/\", router.ResourceName)) == 0 || strings.Compare(req.Path, router.ResourceName) == 0):\n\t\tres, err = router.DeleteController.Call(ctxWithUser, req)\n\tcase req.HTTPMethod == http.MethodPost && strings.HasSuffix(req.Path, router.ResourceName):\n\t\tres, err = router.CreateController.Call(ctxWithUser, req)\n\tdefault:\n\t\terrMsg := fmt.Sprintf(\"Resource %s not found for method %s\", req.Path, req.HTTPMethod)\n\t\tlog.Println(errMsg)\n\t\treturn response.BadRequestError(errMsg), nil\n\t}\n\n\t// Handle errors that the controllers did not know how to handle\n\tif err != nil {\n\t\tlog.Printf(\"Controller error: %s\", err)\n\t\treturn response.ServerError(), nil\n\t}\n\n\treturn res, nil\n}", "func (c *RouterController) HandleRoute() {\n\teventType, route, err := c.NextRoute()\n\tif err != nil {\n\t\tglog.Errorf(\"Unable to read routes: %v\", err)\n\t\treturn\n\t}\n\n\tc.lock.Lock()\n\tdefer c.lock.Unlock()\n\n\tglog.V(4).Infof(\"Processing Route: %s\", route.ServiceName)\n\tglog.V(4).Infof(\" Alias: %s\", route.Host)\n\tglog.V(4).Infof(\" Event: %s\", eventType)\n\n\tc.Plugin.HandleRoute(eventType, route)\n}", "func (h *Handler) Add(cmd int32, hf HandlerFunc) {\n\th.router[cmd] = hf\n}", "func Route(w http.ResponseWriter, r *http.Request) {\n\tlog.Printf(\"%v %v %v%v\", r.RemoteAddr, r.Method, r.Host, r.RequestURI)\n\tHandleDefault(w, r)\n}", "func (r *Router) match(method, uri string, c *Context) *Route {\n\tru := r.lookup(uri, r.routeMap[method], c)\n\tif ru != nil && ru.handlers != nil {\n\t\treturn ru\n\t}\n\treturn nil\n}", "func WithMatchedRoute(log logrus.FieldLogger) func(http.Handler) http.Handler {\n\tm := middleware.New(middleware.Config{\n\t\tRecorder: NewRecorder(Config{Log: log}),\n\t\tService: \"assisted-installer\",\n\t})\n\n\treturn func(next http.Handler) http.Handler {\n\t\treturn Handler(log, m, next)\n\t}\n}", "func (s *Server) AddRoute(handler *common.HTTPHandler, lock *sync.RWMutex, base, endpoint string, log logging.Logger) error {\n\turl := fmt.Sprintf(\"%s/%s\", baseURL, base)\n\ts.log.Info(\"adding route %s%s\", url, endpoint)\n\t// Apply logging middleware\n\th := handlers.CombinedLoggingHandler(log, handler.Handler)\n\t// Apply middleware to grab/release chain's lock before/after calling API method\n\th, err := lockMiddleware(h, handler.LockOptions, lock)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn s.router.AddRouter(url, endpoint, h)\n}", "func (s *server) routes() {\n s.router.HandleFunc(\"/\", s.homePageHandler)\n s.router.HandleFunc(\"/signup/\", s.signupHandler)\n s.router.HandleFunc(\"/signin/\", s.signinHandler)\n s.router.HandleFunc(\"/signout/\", s.makeHandler(s.signoutHandler))\n s.router.HandleFunc(\"/view/\", s.makeHandler(s.viewHandler))\n s.router.HandleFunc(\"/save/\", s.makeHandler(s.saveHandler))\n s.router.HandleFunc(\"/edit/\", s.makeHandler(s.editHandler))\n s.router.HandleFunc(\"/delete/\", s.makeHandler(s.deleteHandler))\n\n s.validPath = regexp.MustCompile(\n \"^/(new|view|save|edit|delete|signout)/([0-9]*)$\")\n}", "func GetRouteHandler(rw http.ResponseWriter, req *http.Request) {\n\tkey := mux.Vars(req)[\"key\"]\n\t// Retreive key from Redis.\n\tresp := client.Get(key)\n\turl, err := resp.Result()\n\tif err != nil {\n\t\tutil.WriteErrorResp(rw, http.StatusNotFound, \"Not Found!\")\n\t\treturn\n\t}\n\tredirect := util.Redirect{Key: key, Target: url}\n\tutil.WriterJSONResponse(rw, redirect)\n}", "func (rc *RouteConfig) RouteHandler(writer http.ResponseWriter, request *http.Request) {\n\tr := rc.route\n\n\tlog := r.Logger.\n\t\tWithEventSource(r.EventSource.Name).\n\t\tWithEndpoint(r.Webhook.Endpoint).\n\t\tWithPort(r.Webhook.Port).\n\t\tWithHTTPMethod(request.Method)\n\n\tlog.Info(\"request received\")\n\n\tif !helper.ActiveEndpoints[r.Webhook.Endpoint].Active {\n\t\tlog.Warn(\"endpoint is not active\")\n\t\tcommon.SendErrorResponse(writer, \"\")\n\t\treturn\n\t}\n\n\tvar buf bytes.Buffer\n\tif _, err := buf.ReadFrom(request.Body); err != nil {\n\t\tlog.WithError(err).Error(\"failed to parse request body\")\n\t\tcommon.SendInternalErrorResponse(writer, \"\")\n\t\treturn\n\t}\n\n\tbody := buf.String()\n\teventsAPIEvent, err := slackevents.ParseEvent(json.RawMessage(body), slackevents.OptionVerifyToken(&slackevents.TokenComparator{VerificationToken: rc.token}))\n\tif err != nil {\n\t\tlog.WithError(err).Error(\"failed to extract event\")\n\t\tcommon.SendInternalErrorResponse(writer, \"\")\n\t\treturn\n\t}\n\n\tif eventsAPIEvent.Type == slackevents.URLVerification {\n\t\tvar r *slackevents.ChallengeResponse\n\t\terr := json.Unmarshal([]byte(body), &r)\n\t\tif err != nil {\n\t\t\tlog.WithError(err).Error(\"failed to verify the challenge\")\n\t\t\tcommon.SendInternalErrorResponse(writer, \"\")\n\t\t\treturn\n\t\t}\n\t\twriter.Header().Set(\"Content-Type\", \"text\")\n\t\tif _, err := writer.Write([]byte(r.Challenge)); err != nil {\n\t\t\tlog.WithError(err).Error(\"failed to write the response for url verification\")\n\t\t\t// don't return, we want to keep this running to give user chance to retry\n\t\t}\n\t}\n\n\tif eventsAPIEvent.Type == slackevents.CallbackEvent {\n\t\tdata, err := json.Marshal(eventsAPIEvent.InnerEvent.Data)\n\t\tif err != nil {\n\t\t\tlog.WithError(err).Error(\"failed to marshal event data\")\n\t\t\tcommon.SendInternalErrorResponse(writer, \"\")\n\t\t\treturn\n\t\t}\n\t\thelper.ActiveEndpoints[rc.route.Webhook.Endpoint].DataCh <- data\n\t}\n\n\tlog.Info(\"request successfully processed\")\n\tcommon.SendSuccessResponse(writer, \"\")\n}", "func (mx *Mux) buildRouteHandler() {\n\tmx.buildRouterMutex.Lock()\n\tdefer mx.buildRouterMutex.Unlock()\n\tif mx.handler == nil {\n\t\th := HttpHandler(mx.routeHTTP)\n\t\tif mx.routeHandler != nil {\n\t\t\tmainHandler := h\n\t\t\th = HttpHandler(func(w http.ResponseWriter, r *http.Request, rctx *RouteContext) {\n\t\t\t\tmx.routeHandler(mainHandler, w, r, rctx)\n\t\t\t})\n\t\t}\n\n\t\tvar minterseptors []Middlewares\n\t\tp := mx\n\t\tfor p != nil {\n\t\t\tif p.handlerInterseptors.Len > 0 {\n\t\t\t\tminterseptors = append(minterseptors, p.handlerInterseptors.All())\n\t\t\t}\n\t\t\tp = p.parent\n\t\t}\n\n\t\tvar hinterseptors Middlewares\n\t\tfor i := len(minterseptors) - 1; i >= 0; i-- {\n\t\t\thinterseptors = append(hinterseptors, minterseptors[i]...)\n\t\t}\n\n\t\tmx.handlerInterseptors.Add(hinterseptors, DUPLICATION_SKIP).Build()\n\t\tmx.interseptors.Build()\n\t\tmx.middlewares.Build()\n\t\tmx.handler = mx.chainHandler(h)\n\t}\n}", "func assignRoutes(router *mux.Router) *mux.Router {\n\tvar logger log.Logger\n\t{\n\t\tlogger = log.NewLogfmtLogger(os.Stderr)\n\t\tlogger = log.NewSyncLogger(logger)\n\t\tlogger = level.NewFilter(logger, level.AllowDebug())\n\t\tlogger = log.With(logger,\n\t\t\t\"svc:\", \"pilot-management\",\n\t\t\t\"ts:\", log.DefaultTimestampUTC,\n\t\t\t\"caller:\", log.DefaultCaller,\n\t\t)\n\t}\n\n\tlevel.Info(logger).Log(\"msg\", \"service started\")\n\tdefer level.Info(logger).Log(\"msg\", \"service ended\")\n\n\tservice := impl.MakeServiceImpl(logger)\n\n\toptions := []httpTransport.ServerOption{\n\t\thttpTransport.ServerErrorEncoder(EncodeErrorResponse),\n\t\thttpTransport.ServerErrorLogger(logger),\n\t}\n\n\tstatusHandler := httpTransport.NewServer(\n\t\tMakeStatusEndpoint(service),\n\t\tDecodeStatusRequest,\n\t\tEncodeResponse,\n\t)\n\n\tlistPilotsHandler := httpTransport.NewServer(\n\t\tMakeListPilotsEndpoint(service),\n\t\tDecodeListPilotsRequest,\n\t\tEncodeResponse,\n\t\toptions...,\n\t)\n\n\tgetPilotHandler := httpTransport.NewServer(\n\t\tMakeGetPilotEndpoint(service),\n\t\tDecodeGetPilotRequest,\n\t\tEncodeResponse,\n\t\toptions...,\n\t)\n\n\tCreatePilotHandler := httpTransport.NewServer(\n\t\tMakeCreatePilotEndpoint(service),\n\t\tDecodeCreatePilotRequest,\n\t\tEncodeResponse,\n\t\toptions...,\n\t)\n\n\tUpdatePilotHandler := httpTransport.NewServer(\n\t\tMakeUpdatePilotEndpoint(service),\n\t\tDecodeUpdatePilotRequest,\n\t\tEncodeResponse,\n\t\toptions...,\n\t)\n\n\tDeletePilotHandler := httpTransport.NewServer(\n\t\tMakeDeletePilotEndpoint(service),\n\t\tDecodeDeletePilotRequest,\n\t\tEncodeResponse,\n\t\toptions...,\n\t)\n\n\tChangePilotStatusHandler := httpTransport.NewServer(\n\t\tMakeChangePilotStatusEndpoint(service),\n\t\tDecodeChangePilotStatusRequest,\n\t\tEncodeResponse,\n\t\toptions...,\n\t)\n\n\trouter.Handle(\"/supply/pilots/status\", statusHandler).Methods(\"GET\")\n\trouter.Handle(\"/supply/pilots\", listPilotsHandler).Methods(\"GET\")\n\trouter.Handle(\"/supply/pilots/{id}\", getPilotHandler).Methods(\"GET\")\n\trouter.Handle(\"/supply/pilots\", CreatePilotHandler).Methods(\"POST\")\n\trouter.Handle(\"/supply/pilots/{id}\", UpdatePilotHandler).Methods(\"PATCH\")\n\trouter.Handle(\"/supply/pilots/{id}\", DeletePilotHandler).Methods(\"DELETE\")\n\trouter.Handle(\"/supply/pilots/{id}/{status}\", ChangePilotStatusHandler).Methods(\"PATCH\")\n\treturn router\n}", "func (router *Router) Handle(method string, uri string, handler http.Handler) {\n\troutes := router.routes[method]\n\tpath := strings.Split(uri, \"/\")\n\troutes = append(routes, Route{path, handler})\n\trouter.routes[method] = routes\n}", "func (b *BaseHandler) setRoute(r *mux.Route) {\n\tb.route = r\n}", "func (srv *Server) renterHostsActiveHandler(w http.ResponseWriter, req *http.Request, _ httprouter.Params) {\n\twriteJSON(w, ActiveHosts{\n\t\tHosts: srv.renter.ActiveHosts(),\n\t})\n}", "func AddApproutes(route *mux.Router) {\r\n\r\n\tlog.Println(\"Loadeding Routes...\")\r\n\r\n\troute.HandleFunc(\"/\", RenderHome)\r\n\r\n\troute.HandleFunc(\"/login\", RenderLogin)\r\n\r\n\troute.HandleFunc(\"/register\", RenderRegister)\r\n\r\n\troute.HandleFunc(\"/signin\", SignInUser).Methods(\"POST\")\r\n\r\n\troute.HandleFunc(\"/signup\", SignUpUser).Methods(\"POST\")\r\n\r\n\troute.HandleFunc(\"/userDetails\", GetUserDetails).Methods(\"GET\")\r\n\r\n\tlog.Println(\"Routes are Loaded.\")\r\n}", "func HandleRoutes() {\n\tmux := http.NewServeMux()\n\tch := http.HandlerFunc(c.CreateItem)\n\tdh := http.HandlerFunc(c.DeleteItem)\n\tgh := http.HandlerFunc(c.GetItem)\n\tah := http.HandlerFunc(c.ListItems)\n\tmux.HandleFunc(\"/\", func(w http.ResponseWriter, r *http.Request) { fmt.Print(\"Hello\") })\n\tmux.Handle(\"/create\", ch)\n\tmux.Handle(\"/delete\", dh)\n\tmux.Handle(\"/get\", gh)\n\tmux.Handle(\"/list\", ah)\n}", "func routePath(w http.ResponseWriter, r *http.Request, trimURL string) {\n\n\t/***********************************************/\n\t//TODO: add your custom web API here:\n\t/**********************************************/\n\n\tif strings.HasPrefix(trimURL, \"login\") && webServer.IsPOST(r) { //>>>>authentication\n\t\tauthenticateHandler.HandleHTTPLogin(w, r)\n\t} else if strings.HasPrefix(trimURL, \"logout\") && webServer.IsPOST(r) {\n\t\tauthenticateHandler.HandleHTTPLogout(w, r)\n\t} else if strings.Compare(trimURL, \"current-user\") == 0 && webServer.IsGET(r) {\n\t\tauthenticateHandler.HandleCurrentUser(w, r)\n\t} else if strings.Compare(trimURL, \"role\") == 0 && webServer.IsPOST(r) { //>>>>authorization\n\t\tauthorizeHandler.HandleAddRole(w, r)\n\t} else if strings.Compare(trimURL, \"role\") == 0 && webServer.IsGET(r) {\n\t\tauthorizeHandler.HandleGetRole(w, r)\n\t} else if strings.Compare(trimURL, \"role-access\") == 0 && webServer.IsGET(r) {\n\t\tauthorizeHandler.HandleGetAccessRole(w, r)\n\t} else if strings.Compare(trimURL, \"role-access-count\") == 0 && webServer.IsGET(r) {\n\t\tauthorizeHandler.HandleGetAccessRoleCount(w, r)\n\t} else if strings.Compare(trimURL, \"access\") == 0 && webServer.IsGET(r) {\n\t\tauthorizeHandler.HandleGetAccess(w, r)\n\t} else if strings.HasPrefix(trimURL, \"meals\") { //>>>>sample return JSON\n\t\tw.Header().Set(\"Content-Type\", \"application/json\") //MIME to application/json\n\t\tw.WriteHeader(http.StatusOK) //status code 200, OK\n\t\tw.Write([]byte(\"{ \\\"msg\\\": \\\"this is meal A \\\" }\")) //body text\n\t\treturn\n\t} else if strings.HasPrefix(trimURL, \"img/\") { //>>>>sample return virtual JPG file to client\n\t\tlogicalFilePath := \"./logic-files/\"\n\t\tphysicalFileName := \"neon.jpg\"\n\n\t\t// try read file\n\t\tdata, err := ioutil.ReadFile(logicalFilePath + physicalFileName)\n\t\tif err != nil {\n\t\t\t// show error page if failed to read file\n\t\t\thandleErrorCode(500, \"Unable to retrieve image file\", w)\n\t\t} else {\n\t\t\t//w.Header().Set(\"Content-Type\", \"image/jpg\") // #optional HTTP header info\n\n\t\t\t// uncomment if image file is meant to download instead of display on web browser\n\t\t\t// clientDisplayFileName = \"customName.jpg\"\n\t\t\t//w.header().Set(\"Content-Disposition\", \"attachment; filename=\\\"\" + clientDisplayFileName + \"\\\"\")\n\n\t\t\t// write file (in binary format) direct into HTTP return content\n\t\t\tw.Write(data)\n\t\t}\n\t} else {\n\t\t// show error code 404 not found\n\t\t//(since the requested URL doesn't match any of it)\n\t\thandleErrorCode(404, \"Path not found.\", w)\n\t}\n\n}", "func (s *Server) createRoutes() {\n\tvar routes = util.Routes{\n\t\tutil.Route{\n\t\t\tName: \"pong\",\n\t\t\tMethod: \"GET\",\n\t\t\tPattern: \"/\",\n\t\t\tHandlerFunc: s.pong(),\n\t\t},\n\t\tutil.Route{\n\t\t\tName: \"healthz\",\n\t\t\tMethod: \"GET\",\n\t\t\tPattern: \"/healthz\",\n\t\t\tHandlerFunc: util.Healthz(),\n\t\t},\n\t\tutil.Route{\n\t\t\tName: \"getAllItems\",\n\t\t\tMethod: \"GET\",\n\t\t\tPattern: \"/items\",\n\t\t\tHandlerFunc: s.getAllItems(),\n\t\t},\n\t\tutil.Route{\n\t\t\tName: \"setItemsPOST\",\n\t\t\tMethod: \"POST\",\n\t\t\tPattern: \"/items\",\n\t\t\tHandlerFunc: s.setItem(false),\n\t\t},\n\t\tutil.Route{\n\t\t\tName: \"setItemsPUT\",\n\t\t\tMethod: \"PUT\",\n\t\t\tPattern: \"/items\",\n\t\t\tHandlerFunc: s.setItem(true),\n\t\t},\n\t\tutil.Route{\n\t\t\tName: \"getItem\",\n\t\t\tMethod: \"GET\",\n\t\t\tPattern: \"/items/{id:[a-zA-Z0-9]+}\",\n\t\t\tHandlerFunc: s.getItem(),\n\t\t},\n\t\tutil.Route{\n\t\t\tName: \"delItem\",\n\t\t\tMethod: \"DELETE\",\n\t\t\tPattern: \"/items/{id:[a-zA-Z0-9]+}\",\n\t\t\tHandlerFunc: s.delItem(),\n\t\t},\n\t\tutil.Route{\n\t\t\tName: \"delay\",\n\t\t\tMethod: \"GET\",\n\t\t\tPattern: \"/delay\",\n\t\t\tHandlerFunc: s.delay(),\n\t\t},\n\t\tutil.Route{\n\t\t\tName: \"simulateError\",\n\t\t\tMethod: \"GET\",\n\t\t\tPattern: \"/error\",\n\t\t\tHandlerFunc: s.simulateError(),\n\t\t},\n\t}\n\n\tfor _, route := range routes {\n\t\th := route.HandlerFunc\n\n\t\t// Tracing each request\n\t\th = util.TracerMiddleware(h, route)\n\n\t\t// Logging each request\n\t\th = util.LoggerMiddleware(h, s.logger)\n\n\t\t// Assign requestID to each request\n\t\th = util.AssignRequestID(h, s.logger)\n\n\t\t// Monitoring each request\n\t\t// TODO: pass proper handler\n\t\tpromHandler := util.PrometheusMiddleware(h, route.Pattern, rm)\n\n\t\ts.router.\n\t\t\tMethods(route.Method).\n\t\t\tPath(route.Pattern).\n\t\t\tName(route.Name).\n\t\t\tHandler(promHandler)\n\t}\n\n\t// Prometheus endpoint\n\troute := util.Route{\n\t\tName: \"metrics\",\n\t\tMethod: \"GET\",\n\t\tPattern: \"/metrics\",\n\t\tHandlerFunc: nil,\n\t}\n\n\tpromHandler := promhttp.HandlerFor(s.promReg, promhttp.HandlerOpts{})\n\tpromHandler = promhttp.InstrumentMetricHandler(s.promReg, promHandler)\n\ts.router.\n\t\tMethods(route.Method).\n\t\tPath(route.Pattern).\n\t\tName(route.Name).\n\t\tHandler(promHandler)\n\n\t// 404 handler\n\tnotFound := util.PrometheusMiddleware(s.notFound(), \"metrics\", rm)\n\ts.router.NotFoundHandler = notFound\n}", "func (a *AApi) registerRoute(f func(http.ResponseWriter, *http.Request), path string, methods ...string) {\n\ta.logger.WithField(\"func\", \"registerRoute\").\n\t\tDebugf(\"Initializing route %s with methods: %v\", path, methods)\n\ta.router.HandleFunc(path, f).Name(path).Methods(methods...) // Name if set for ability to exclude route from authz\n}", "func mapRoutes() {\n\t//http.HandleFunc(\"/user\", controllers.GetUser)\n}", "func (controllerHandler *ControllerHandler) handleRoutes(identifier string, httpMethods []string) api.EmitterFile {\n\n\tvar actualGenerator *generator.RouteGenerator\n\taddRouteToEmmitFiles := false\n\tgen := context.GetFromRegistry(context.Route).GetCtx(apiContext)\n\n\tif gen == nil {\n\t\taddRouteToEmmitFiles = true\n\t\tactualGenerator = generator.NewRouteGenerator()\n\t\tcontext.GetFromRegistry(context.Route).AddToCtx(apiContext, actualGenerator)\n\t} else {\n\t\tactualGenerator = gen.(*generator.RouteGenerator)\n\t}\n\tactualGenerator.AddDefaultRestRoutes(identifier, httpMethods)\n\tif addRouteToEmmitFiles {\n\t\treturn core.NewPhpEmitterFile(\"asher_api.php\", api.RouteFilePath, actualGenerator, api.RouterFile)\n\t}\n\n\treturn nil\n}", "func (msg MsgCreatePermanentLockedAccount) Route() string { return RouterKey }", "func (router *Router) MapRoutes() {\n http.HandleFunc(\"/\", func(w http.ResponseWriter, r *http.Request) {\n found := false\n url := html.EscapeString(r.URL.Path)\n log.Printf(\"%q %q\", r.Method, url)\n\n for _, route := range router.Routes {\n if url == route.Pattern && r.Method == route.Method {\n found = true\n route.Handler.ServeHTTP(w, r)\n }\n }\n\n if !found {\n http.NotFound(w, r)\n }\n }) \n}", "func (r RegexRouter) Handle(regex string, handlers map[string]http.Handler) {\n\t*r.routes = append(*r.routes, route{regexp.MustCompile(regex), handlers})\n}", "func (server *testHTTPServerImpl) AddRoute(method string, path string, handlerFunc http.HandlerFunc) {\n\tserver.router.HandleFunc(path, server.wrapHandlerFunc(handlerFunc)).Methods(method)\n}", "func (this *Router) Handle(method, route string, handler app.Handler) error {\n\t// Invalid route\n\tif route[0] != '/' {\n\t\treturn fmt.Errorf(\"invalid route \\\"%s\\\"\", route)\n\t}\n\n\t// Get path tokens\n\ttokens := strings.Split(route, \"/\")\n\n\t// Special case for root path\n\tif tokens[1] == \"\" {\n\t\ttokens = tokens[1:]\n\t}\n\n\t// Add new route\n\tthis.routes.append(method, tokens, handler)\n\treturn nil\n}", "func (rs *routeServer) routesBetweenHandler(w http.ResponseWriter, req *http.Request) {\n\tlog.Printf(\"Finding routes at %s\\n\", req.URL.Path)\n\n\tvars := mux.Vars(req)\n\tfrom, to := vars[\"from\"], vars[\"to\"]\n\n\troutes, err := rs.store.RoutesBetween(from, to)\n\tif err != nil {\n\t\thttp.Error(w, err.Error(), http.StatusBadRequest)\n\t\treturn\n\t}\n\n\trenderJSON(w, routes)\n}", "func (s *Server) getRouteHandler(method string, path string, params *map[string]string) (RouteHandler, int) {\n\tvar i int\n\t/* Find corresponding route */\n\tfor i = 0; i < len(s.routes); i++ {\n\t\tif utils.ParseURL(s.routes[i].pattern, path, params) && s.routes[i].method == method {\n\t\t\tbreak\n\t\t}\n\t}\n\t/* Not found : return 404 */\n\tif i == len(s.routes) {\n\t\treturn nil, http.StatusNotFound\n\t}\n\t/* Return its handler */\n\treturn s.routes[i].handler, 0\n}", "func (h *Handler) Route(r api.Router) {\n\tgetLink := h.Auth.GetLinkExplicit\n\tr.MethodFunc(http.MethodGet, getLink(\"{provisionerID}\", false, nil), h.lookupProvisioner(h.Get))\n\tr.MethodFunc(http.MethodPost, getLink(\"{provisionerID}\", false, nil), h.lookupProvisioner(h.Post))\n}", "func (a *Router) Handle(method string, pattern string, hs ...func(*Context) error) *Router {\n\tr := a.Route(pattern)\n\tr.handlers[method] = append(r.handlers[method], hs...)\n\treturn a\n}", "func (sh ServerHandler) DefineMainRoute(r *mux.Router) {\n\t// Main route\n\tr.Methods(\"GET\").Path(\"/\").HandlerFunc(sh.homePage)\n\n\t// Notes route\n\tnotesSubrouter := r.PathPrefix(\"/note\").Subrouter()\n\tnotesSubrouter.Methods(\"GET\").Path(\"/{id}\").HandlerFunc(sh.notePage)\n}", "func (app *application) routes(cfg *Config) http.Handler {\n\n\t// Create a middleware chain containing our \"standard middleware\" that is used for every request.\n\tchain := alice.New(app.recoverPanic, app.logRequest, secureHeaders)\n\n\t// Create a new middleware chain containing the middleware for our \n\t// Application routes. This will only contain the session middleware for now\n\tdynamicChain := alice.New(app.session.Enable, noSurf, app.authenticate)\n\n\t// Use the http.NewServeMux() to initialize a new servemux, then\n\t// register the home function as the handler for the \"/\" path\n\t//mux := http.NewServeMux() // this is the default, but still define it for security.\n\t// Starting to use the GIN framework\n\t//mux.HandleFunc(\"/\", app.home) // subtree path, has an ending /\n\t//mux.HandleFunc(\"/snippet\", app.showSnippet) // fixed path, url must match this exactly.\n\t//mux.HandleFunc(\"/snippet/create\", app.createSnippet) // fixed path, url must match this exactly.\n\tmux := pat.New()\n\t// mux.Get(\"/\", app.session.Enable(http.HandlerFunc(app.home))) // If we were not using Alice to manage our middleware.\n\tmux.Get(\"/\", dynamicChain.ThenFunc(app.home))\n\tmux.Get(\"/snippet/create\", dynamicChain.Append(app.requireAuthenticatedUser).ThenFunc(app.createSnippetForm))\n\tmux.Post(\"/snippet/create\", dynamicChain.Append(app.requireAuthenticatedUser).ThenFunc(app.createSnippet))\n\tmux.Get(\"/snippet/:id\", dynamicChain.ThenFunc(app.showSnippet))\n\n\t// Add the five new routes.\n\tmux.Get(\"/user/signup\", dynamicChain.ThenFunc(app.signupUserForm))\n\tmux.Post(\"/user/signup\", dynamicChain.ThenFunc(app.signupUser))\n\tmux.Get(\"/user/login\", dynamicChain.ThenFunc(app.loginUserForm))\n\tmux.Post(\"/user/login\", dynamicChain.ThenFunc(app.loginUser))\n\tmux.Post(\"/user/logout\", dynamicChain.Append(app.requireAuthenticatedUser).ThenFunc(app.logoutUser))\n\n\t// Create a fileserver to serve static content from\n\tfileServer := http.FileServer(http.Dir(cfg.StaticDir))\n\n\t// use the mux.Handle() to register the file serveras the handler\n\t// all url paths start with /static/. Strip the /static prefix before\n\t// the request reaches the file server\n\t// mux.Handle(\"/static/\", http.StripPrefix(\"/static\", fileServer))\n\tmux.Get(\"/static/\", http.StripPrefix(\"/static\", fileServer))\n\t// without middleware\n\t// return mux\n\n\t// If we do not use alice\n\t//return app.recoverPanic(app.logRequest(secureHeaders(mux)))\n\n\t// With Alice\n\treturn chain.Then(mux)\n}", "func (g *RouterGroup) addRoute(method string, pattern string, handler Handler) {\n\tpattern = g.prefix + pattern\n\tif g.engine.config.Debug {\n\t\tlog.Printf(\"Route %4s - %s\", method, pattern)\n\t}\n\tg.engine.router.addRoute(method, pattern, handler)\n}", "func initRoute(route *gin.Engine) {\n\troute.GET(entry_point.Index, showIndex)\n\troute.GET(entry_point.ViewMovie, showMovie)\n}", "func (h *RouteMux) addRoute(r *route) {\n\t//and finally append to the list of Routes\n\t//create the Route\n\th.routes = append(h.routes, r)\n}", "func (s *DeviceService) AddRoute(route string, handler func(http.ResponseWriter, *http.Request), methods ...string) error {\n\treturn s.controller.AddRoute(route, handler, methods...)\n}", "func (am AppModule) Route() sdk.Route {\r\n\treturn sdk.NewRoute(types.RouterKey, NewHandler(am.keeper))\r\n}", "func (hr *httpRouter) Handler() http.Handler {\n\n\tc, _ := console.New(console.Options{Color: true})\n\t_ = logger.Register(\"console\", logger.Config{Writer: c})\n\tcLogger, _ := logger.Get(\"console\")\n\tl := log.New(cLogger)\n\n\tfmt.Print(\"Loading Routes...\")\n\t//add files in a directory\n\tro := newHttpRouterExtended(hr)\n\n\tmw := middleware.Chain{}\n\n\t//adding files\n\tfor path, file := range hr.file {\n\t\tro.HandlerFunc(\"GET\", path, mw.Add(l.MW).Handle(\n\t\t\tfunc(w http.ResponseWriter, req *http.Request) {\n\t\t\t\thttp.ServeFile(w, req, hr.file[req.Context().Value(router.PATTERN).(string)])\n\t\t\t}))\n\t\tfmt.Printf(\"\\n\\x1b[32m %#v [GET]%v \\x1b[49m\\x1b[39m \", path, file)\n\t}\n\n\t// adding directories\n\tfor k, path := range hr.dir {\n\t\tfileServer := http.FileServer(http.Dir(path))\n\t\tpattern := k + \"/*filepath\"\n\t\tro.HandlerFunc(\"GET\", pattern, mw.Add(l.MW).Handle(\n\t\t\tfunc(w http.ResponseWriter, req *http.Request) {\n\t\t\t\t//disable directory listing\n\t\t\t\tif strings.HasSuffix(req.URL.Path, \"/\") {\n\t\t\t\t\thttp.NotFound(w, req)\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\tif val, ok := req.Context().Value(router.PARAMS).(map[string][]string)[\"filepath\"]; ok {\n\t\t\t\t\treq.URL.Path = val[0]\n\t\t\t\t\tfileServer.ServeHTTP(w, req)\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\thttp.NotFound(w, req)\n\t\t\t\treturn\n\n\t\t\t}))\n\t\tfmt.Printf(\"\\n\\x1b[32m %#v [GET]%v \\x1b[49m\\x1b[39m \", pattern, http.Dir(path))\n\t}\n\n\t//register all controller routes\n\tfor _, r := range hr.routes {\n\t\tfmt.Printf(\"\\n\\x1b[32m %#v :name \\x1b[49m\\x1b[39m \", r.pattern)\n\t\tfor method, fn := range r.controller.MappingBy(r.pattern) {\n\t\t\tif r.mws != nil {\n\t\t\t\tro.HandlerFunc(strings.ToUpper(method), r.pattern, r.mws.Handle(r.controller.ServeHTTP)) //TODO ????? error no url pattern\n\t\t\t} else {\n\t\t\t\tro.HandlerFunc(strings.ToUpper(method), r.pattern, r.controller.ServeHTTP)\n\t\t\t}\n\t\t\tfmt.Printf(\"\\x1b[32m [%v]%v name \\x1b[49m\\x1b[39m \", method, fn)\n\t\t}\n\t}\n\n\t//Not Found Handler\n\tif hr.notFound != nil {\n\t\tro.NotFound = hr.notFound\n\t}\n\n\treturn ro\n}", "func newRoute() *Route {\n\treturn &Route{\n\t\thandlers: make(map[string]http.Handler),\n\t\tmiddleware: make([]*middlewareForVerb, 0),\n\t\tchildren: make([]*Route, 0),\n\t}\n}", "func (am AppModule) Route() sdk.Route {\n\treturn sdk.NewRoute(sdktransfertypes.RouterKey, transfer.NewHandler(keeper.NewMsgServerImpl(am.sdkTransferKeeper, am.bankKeeper, am.whitelistKeeper)))\n}", "func HandlerRoute1(ctx *ugo.RequestCtx) error {\n\treturn ctx.HTTPResponse(\"OK\", 200)\n}", "func (h *Handler) AddRoute(service config.Service) {\n\th.Routes = append(h.Routes, service)\n}", "func (t *Trace) HookRoute(method string, path string) {\n\tt.name = strings.Replace(path, \"/\", \".\", -1)\n\tt.startTime = time.Now()\n}", "func (msg MsgAddMember) Route() string { return RouterKey }", "func (router *Routes) Route(message *telegram.Message) {\n\tvar elements []string\n\tfor _, item := range strings.Split(message.Text, \" \") {\n\t\tif item == \"\" {\n\t\t\tcontinue\n\t\t}\n\t\telements = append(elements, item)\n\t}\n\n\tcount := len(elements)\n\tanswer := telegram.NewAnswerBack(message, router.Bot.API)\n\tmessageContext := MessageContext{map[string]string{}, answer}\n\n\tfor _, handler := range router.handlers {\n\t\tif count != len(handler.elements) {\n\t\t\tcontinue\n\t\t}\n\t\tmatched := true\n\t\tfor index, item := range handler.elements {\n\t\t\tgivenItem := elements[index]\n\t\t\tswitch {\n\t\t\tcase strings.Index(item, \":\") == 0:\n\t\t\t\tmessageContext.Params[item[1:]] = givenItem\n\t\t\tcase item != givenItem:\n\t\t\t\tmatched = false\n\t\t\t}\n\t\t}\n\t\tif !matched {\n\t\t\tcontinue\n\t\t}\n\t\tcheck := handler.actions.Check(&messageContext)\n\n\t\tswitch {\n\t\tcase check == RouteAccept:\n\t\t\thandler.actions.Handle(&messageContext)\n\t\t\treturn\n\t\tcase check == RouteStop:\n\t\t\treturn\n\t\t}\n\t}\n}", "func (r *Route) handle(c *Context) {\n\tfor _, h := range r.handlers {\n\t\th(c)\n\t\tif c.Resp.Wrote() {\n\t\t\treturn\n\t\t}\n\t}\n}", "func (c *RouterController) HandleRoute() {\n\teventType, route, err := c.NextRoute()\n\tif err != nil {\n\t\tutilruntime.HandleError(fmt.Errorf(\"unable to read routes: %v\", err))\n\t\treturn\n\t}\n\n\tc.lock.Lock()\n\tdefer c.lock.Unlock()\n\n\t// Change the local sync state within the lock to ensure that all\n\t// event handlers have the same view of sync state.\n\tc.routesListConsumed = c.RoutesListConsumed()\n\tc.updateLastSyncProcessed()\n\n\tglog.V(4).Infof(\"Processing Route: %s -> %s\", route.Name, route.Spec.To.Name)\n\tglog.V(4).Infof(\" Alias: %s\", route.Spec.Host)\n\tglog.V(4).Infof(\" Event: %s\", eventType)\n\n\tif err := c.Plugin.HandleRoute(eventType, route); err != nil {\n\t\tutilruntime.HandleError(err)\n\t}\n}", "func makeRouteHandler() http.HandlerFunc {\n\treturn func(w http.ResponseWriter, r *http.Request) {\n\t\tswitch r.Method {\n\t\tcase \"GET\":\n\t\t\thandleGetRequest(w, r)\n\t\tcase \"POST\":\n\t\t\thandlePostRequest(w, r)\n\t\tcase \"PUT\":\n\t\t\thandlePutRequest(w, r)\n\t\tdefault:\n\t\t\tfmt.Fprint(w, \"Invalid request\")\n\t\t}\n\t}\n}", "func (msg MsgCreateSession) Route() string { return RouterKey }", "func (s *WebService) Handle(route string, method string, httpHandler http.Handler) {\r\n\ts.addRoute(route, method, httpHandler)\r\n}", "func (h *routeHandler) Add(routePath string, routeCallback func(*Request)) {\n\tif(h.callbacks == nil) {\n\t\th.callbacks = make(map[string]callback)\n\t}\n\th.callbacks[routePath] = routeCallback\n}", "func (s *server) routes() {\n\ts.router.HandleFunc(\"/payments\", s.paymentHandler())\n}", "func (application *Application) Route(controller interface{}, route string, protected int) interface{} {\n\tfn := func(c web.C, w http.ResponseWriter, r *http.Request) {\n\t\tc.Env[\"Content-Type\"] = \"text/html\"\n\n\t\tif protected == RouteProtected && c.Env[\"User\"] == nil {\n\t\t\thttp.Redirect(w, r, \"/signin\", http.StatusSeeOther)\n\t\t\treturn\n\t\t}\n\n\t\tmethodValue := reflect.ValueOf(controller).MethodByName(route)\n\t\tmethodInterface := methodValue.Interface()\n\t\tmethod := methodInterface.(func(c web.C, r *http.Request) (string, int))\n\n\t\tbody, code := method(c, r)\n\n\t\tif session, exists := c.Env[\"Session\"]; exists {\n\t\t\terr := session.(*sessions.Session).Save(r, w)\n\t\t\tif err != nil {\n\t\t\t\tlog.Errorf(\"Can't save session: %v\", err)\n\t\t\t}\n\t\t}\n\n\t\tswitch code {\n\t\tcase http.StatusOK:\n\t\t\tif _, exists := c.Env[\"Content-Type\"]; exists {\n\t\t\t\tw.Header().Set(\"Content-Type\", c.Env[\"Content-Type\"].(string))\n\t\t\t}\n\t\t\tif _, exists := c.Env[\"Content-Length\"]; exists {\n\t\t\t\tw.Header().Set(\"Content-Length\", c.Env[\"Content-Length\"].(string))\n\t\t\t}\n\t\t\tio.WriteString(w, body)\n\t\tcase http.StatusSeeOther, http.StatusFound:\n\t\t\thttp.Redirect(w, r, body, code)\n\t\tdefault:\n\t\t\tw.WriteHeader(code)\n\t\t\tio.WriteString(w, body)\n\t\t}\n\t}\n\treturn fn\n}", "func (r *Router) masterHandler(c *gin.Context) {\n\tklog.V(4).Infof(\"no router for method:%s, url:%s\", c.Request.Method, c.Request.URL.Path)\n\tc.JSON(404, gin.H{\n\t\t\"Method\": c.Request.Method,\n\t\t\"Path\": c.Request.URL.Path,\n\t\t\"error\": \"router not found\"})\n}", "func (s *HTTPServer) Get(r Route, handler RouterHandler) {\n\t// put this in a trie\n}", "func (i *instances) Route(req *route.Request) route.Response {\n\tlog.Route(req, \"Instances\")\n\n\tswitch req.Top() {\n\tcase \"instance\":\n\t\treq.Pop()\n\t\tif req.Top() == \"\" {\n\t\t\tinstanceHelp(\"\")\n\t\t\treturn route.FAIL\n\t\t}\n\t\tinstance := i.Find(req.Top())\n\t\tif instance == nil {\n\t\t\tmsg.Error(\"Unknown instance %q.\", req.Top())\n\t\t\treturn route.FAIL\n\t\t}\n\t\treturn instance.Route(req.Pop())\n\t}\n\n\tswitch req.Command() {\n\tcase route.Load, route.Create, route.Provision, route.Start, route.Stop, route.Restart, route.Replace:\n\t\treturn i.RouteInOrder(req)\n\tcase route.Destroy:\n\t\treturn i.RouteReverseOrder(req)\n\tcase route.Help:\n\t\ti.help()\n\t\treturn route.OK\n\tcase route.Config:\n\t\ti.config()\n\t\treturn route.OK\n\tcase route.Info:\n\t\ti.info(req)\n\t\treturn route.OK\n\t}\n\treturn route.FAIL\n}", "func (mx *Mux) handle(method methodTyp, pattern string, handler http.Handler) {\n\tif len(pattern) == 0 || pattern[0] != '/' {\n\t\tpanic(fmt.Sprintf(\"routing pattern must begin with '/' in '%s'\", pattern))\n\t}\n\n\t// Build the final routing handler for this Mux.\n\tif mx.handler == nil {\n\t\tmx.buildRouteHandler()\n\t}\n\n\tmx.tree.InsertRoute(method, pattern, handler, false)\n}", "func (r *Route) handler(h Handler) http.Handler {\n\treturn &handler{h}\n}", "func RouteHandler(w http.ResponseWriter, r *http.Request) {\n\turl := r.URL.Path\n\t// fmt.Println(\"]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]] url: \", url)\n\t// 1. skip special resources. TODO Expand to config. // TODO better this\n\tif url == \"/favicon.ico\" {\n\t\treturn\n\t}\n\n\tvar starttime time.Time\n\tif config.ROUTE_PRINT_TIME {\n\t\tfmt.Println(\">>> Start Logging time. now is: \", time.Now())\n\t\tstarttime = time.Now()\n\t}\n\tprintAccessHeader(r)\n\n\t// -------- Error Handling --------------------------------------------------------------\n\tdefer func() {\n\t\tif err := recover(); err != nil {\n\n\t\t\tif !enable_error_handler {\n\t\t\t\tif true { // Debug print\n\t\t\t\t\tfmt.Println(\"\\n_______________________________________________________________\")\n\t\t\t\t\tfmt.Println(\"---- DEBUG: ErrorHandler >> Meet An Error -------------------------\")\n\t\t\t\t\t// fmt.Println(reflect.TypeOf(err))\n\t\t\t\t\tif e, ok := err.(error); ok {\n\t\t\t\t\t\tfmt.Println(debug.StackString(e))\n\t\t\t\t\t} else if s, ok := err.(string); ok {\n\t\t\t\t\t\terr = fmt.Errorf(s)\n\t\t\t\t\t\tdebug.DebugPrintVariable(err)\n\t\t\t\t\t} else {\n\t\t\t\t\t\tdebug.DebugPrintVariable(err)\n\t\t\t\t\t}\n\t\t\t\t\tfmt.Println(\"-------------------------------------------------------------------\")\n\t\t\t\t\tfmt.Println(\"- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - --\")\n\t\t\t\t\tfmt.Println(\"\")\n\t\t\t\t}\n\n\t\t\t\tw.Header().Add(\"content-type\", \"text/plain\")\n\t\t\t\tw.Write([]byte(fmt.Sprint(\"[ErrorHandler can't handler this error!]<br>\\n\")))\n\t\t\t\tw.Write([]byte(fmt.Sprint(err)))\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\t// Error hancler process.\n\n\t\t\tfmt.Println(\"\\n============= ErrorHandler: Panic Occured. =============\")\n\t\t\t// Give control to ErrorHandler if panic occurs.\n\t\t\tif b := errorhandler.Process(w, r, err); b == false {\n\t\t\t\t// return\n\t\t\t\tif false { // **** disable this function\n\t\t\t\t\tw.Header().Add(\"content-type\", \"text/plain\")\n\t\t\t\t\tw.Write([]byte(fmt.Sprint(\"[ErrorHandler can't handler this error, it returns false]\")))\n\t\t\t\t\tw.Write([]byte(fmt.Sprint(err)))\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t}\n\n\t\t}\n\n\t\t// clear request scope data store.:: should clear context here? Where to?\n\t\tcontext.Clear(r)\n\n\t\tif config.ROUTE_PRINT_TIME {\n\t\t\tfmt.Println(\">>> Request Execution Time: \", time.Now().Sub(starttime))\n\t\t}\n\t\tprintAccessFooter(r)\n\n\t\t// this route is ended here.\n\t}()\n\t// -------- Routing... --------------------------------------------------------------\n\n\t// 3. let'sp find the right pages.\n\tresult := lookup(url)\n\tif logRoute.Trace() {\n\t\tlogRoute.Printf(\"Lookup(%s) is:\\n%v\", url, result)\n\t}\n\tif result == nil && !result.IsValid() {\n\t\tpanic(exception.NewPageNotFoundError(fmt.Sprintf(\"Page %s not found!\", r.URL.Path)))\n\t}\n\n\t// TODO Later: Create New page object every request? howto share some page object? see tapestry5.\n\n\tvar lcc *lifecircle.LifeCircleControl\n\n\t// Check if this is an page request after redirect.\n\t// if has verification code, this is a redirect page and with some data.\n\tpageRedirectVerificationKeys, ok := r.URL.Query()[config.VERIFICATION_CODE_KEY]\n\tif ok && len(pageRedirectVerificationKeys) > 0 {\n\t\tfmt.Println(\"********************************************************************************\")\n\t\tfmt.Println(\"********************************************************************************\")\n\n\t\tvar flash_session_key = config.PAGE_REDIRECT_KEY + pageRedirectVerificationKeys[0]\n\t\tsessionId := sessions.SessionId(r, w) // called when needed.\n\t\tif targetPageInterface, ok := sessions.GetOk(sessionId, flash_session_key); ok {\n\t\t\tfmt.Println(\"key is \", flash_session_key)\n\t\t\tfmt.Println(\"target page interface is \", targetPageInterface)\n\t\t\tif targetPage, ok := targetPageInterface.(core.Pager); ok {\n\t\t\t\tlcc = lifecircle.NewPageFlowFromExistingPage(w, r, targetPage)\n\t\t\t\tfmt.Println(\"successfully get targetpage and continue. TODO:!!!!! here is a memory leak!\")\n\n\t\t\t\t// remove targetpage from session. OR will memery leak!!\n\t\t\t\tsessions.Delete(sessionId, flash_session_key)\n\t\t\t}\n\t\t}\n\t\tfmt.Println(\"********************************************************************************\")\n\t\tfmt.Println(\"********************************************************************************\")\n\t}\n\n\t// ************************************************************\n\t// Normal request page flow, create then flow.\n\t// ************************************************************\n\tif lcc == nil {\n\t\tlcc = lifecircle.NewPageFlow(w, r, result.Segment)\n\t}\n\tlcc.SetParameters(result.Parameters)\n\tlcc.SetEventName(result.EventName) // ?\n\n\t// set lcc to session\n\tlcc.SetToRequest(config.LCC_OBJECT_KEY, lcc)\n\n\t// Done: print some information.\n\tdefer func() {\n\t\t// fmt.Println(\"---- [defer] ------------------------\")\n\t\t// fmt.Println(\"Describe the page structure:\")\n\t\t// fmt.Println(lcc.PrintCallStructure())\n\n\t\t// fmt.Println(\"-- Page Result is ---------\")\n\t\t// fmt.Println(result)\n\t}()\n\n\t// Process result & returns.\n\tif !result.IsEventCall() {\n\t\t// page render flow\n\t\tlcc.PageFlow()\n\t\t// handleReturn(lcc, result.Segment)\n\t} else {\n\t\t// event call\n\t\tlcc.EventCall(result)\n\n\t\t// TODO wrong here. this is wrong. sudo refactor lifecircle-return.\n\t\t// if lcc not returned, return the current page.\n\t\tif lcc.Err != nil {\n\t\t\tpanic(lcc.Err.Error())\n\t\t}\n\t\t// // default return the current page.\n\t\t// if result.Segment != nil && lcc.ResultType == \"\" {\n\t\t// \turl := lcc.r.URL.Path\n\t\t// \thttp.Redirect(lcc.w, lcc.r, url, http.StatusFound)\n\t\t// }\n\t}\n}", "func (r *Router) Route(evt map[string]interface{}) (domain.Response, error) {\n\trecord := evt[\"Records\"].([]interface{})[0].(map[string]interface{})\n\troute, ok := r.routes[record[\"Sns\"].(map[string]interface{})[\"TopicArn\"].(string)]\n\tif !ok {\n\t\treturn nil, errors.New(\"handler func missing\")\n\t}\n\treturn route.Handler(NewInput(evt)), nil\n}", "func receiveRoute(w http.ResponseWriter, r *http.Request) {\n\troute := \"Receive\"\n\n\tquery := r.URL.Query()\n\treceiverID, senderID := query.Get(\"receiverID\"), query.Get(\"senderID\")\n\n\t// Optional Sender ID\n\tif senderID != \"\" {\n\t\thandleRoute(route, receiverID, senderID)\n\t} else {\n\t\thandleRoute(route, receiverID)\n\t}\n}", "func (r *Plugin) PostHandle(route iris.IRoute) {\n\tif r.routes == nil {\n\t\tr.routes = make([]RouteInfo, 0)\n\t}\n\tr.routes = append(r.routes, RouteInfo{route.GetMethod(), route.GetDomain(), route.GetPath(), time.Now()})\n}", "func (h *MxHandler) Handler(pattern *checkSelection, handler http.Handler) {\n\th.routes = append(h.routes, &route{pattern, handler})\n}", "func (b *Baa) Route(pattern, methods string, h ...HandlerFunc) RouteNode {\n\tvar ru RouteNode\n\tvar ms []string\n\tif methods == \"*\" {\n\t\tfor m := range RouterMethods {\n\t\t\tms = append(ms, m)\n\t\t}\n\t} else {\n\t\tms = strings.Split(methods, \",\")\n\t}\n\tfor _, m := range ms {\n\t\tru = b.Router().Add(strings.TrimSpace(m), pattern, h)\n\t}\n\treturn ru\n}", "func (r *Route) getHandler(method string, ex *routeExecution) {\n\t// check specific method match\n\tif h, ok := r.handlers[method]; ok {\n\t\tex.handler = h\n\t\treturn\n\t}\n\n\t// if this is a HEAD we can fall back on GET\n\tif method == http.MethodHead {\n\t\tif h, ok := r.handlers[http.MethodGet]; ok {\n\t\t\tex.handler = h\n\t\t\treturn\n\t\t}\n\t}\n\n\t// check the ANY handler\n\tif h, ok := r.handlers[methodAny]; ok {\n\t\tex.handler = h\n\t\treturn\n\t}\n\n\t// last ditch effort is to generate our own method not allowed handler\n\t// this is regenerated each time in case routes are added during runtime\n\t// not generated if a previous handler is already set\n\tif ex.handler == nil {\n\t\tex.handler = r.methodNotAllowed()\n\t}\n\treturn\n}", "func (r *Router) AddRoute(method string, path string, callback http.HandlerFunc) (err error) {\n\tkeys := setupKeys(strings.Split(path, \"/\"))\n\tpathParams := []string{}\n\n\tif r.root == nil {\n\t\tr.root = &segment{}\n\t\tr.root.children = map[string]*segment{}\n\t\tr.root.endpoints = map[string]*endpoint{}\n\t}\n\n\tcurr := r.root\n\n\tfor i, key := range keys {\n\t\tif i == 0 {\n\t\t\tcontinue\n\t\t}\n\n\t\tif isParameter(key) {\n\t\t\tpathParams = append(pathParams, key[2:])\n\n\t\t}\n\n\t\tif child, _ := getChild(key, curr); child == nil {\n\t\t\tseg := addSegment(curr, key)\n\t\t\tcurr = seg\n\t\t} else {\n\t\t\tcurr = child\n\t\t}\n\t}\n\n\tif _, ok := curr.endpoints[method]; ok {\n\t\terr = errors.New(\"path already exists\")\n\n\t\treturn\n\t}\n\n\tcurr.endpoints[method] = &endpoint{callback, path, pathParams}\n\tr.routes = append(r.routes, route{callback, method, path})\n\n\treturn\n}", "func (r *Router) add(method string, pattern string, handlers []HandlerFunc) *Route {\n\tif _, ok := METHODS[method]; !ok {\n\t\tpanic(\"unsupport http method [\" + method + \"]\")\n\t}\n\tif pattern == \"\" {\n\t\tpanic(\"route pattern can not be emtpy!\")\n\t}\n\tif pattern[0] != '/' {\n\t\tpanic(\"route pattern must begin /\")\n\t}\n\tif len(pattern) > RouteMaxLength {\n\t\tpanic(fmt.Sprintf(\"route pattern max length limit %d\", RouteMaxLength))\n\t}\n\tr.mu.Lock()\n\tdefer r.mu.Unlock()\n\n\t// check group set, not concurrent safe\n\tif r.group.pattern != \"\" {\n\t\tpattern = r.group.pattern + pattern\n\t\tif len(r.group.handlers) > 0 {\n\t\t\th := make([]HandlerFunc, 0, len(r.group.handlers)+len(handlers))\n\t\t\th = append(h, r.group.handlers...)\n\t\t\th = append(h, handlers...)\n\t\t\thandlers = h[:]\n\t\t}\n\t}\n\n\troot := r.routeMap[method]\n\tradix := _radix[0:]\n\tvar j int\n\tvar k int\n\tvar tru *Route\n\tfor i := 0; i < len(pattern); i++ {\n\t\t//param route\n\t\tif pattern[i] == ':' {\n\t\t\t// clear static route\n\t\t\tif j > 0 {\n\t\t\t\troot = r.insert(root, newRoute(string(radix[:j]), nil, nil))\n\t\t\t\tj = 0\n\t\t\t}\n\t\t\t// set param route\n\t\t\tparam := _param[0:]\n\t\t\tk = 0\n\t\t\tfor i = i + 1; i < len(pattern); i++ {\n\t\t\t\tif !isParamChar(pattern[i]) {\n\t\t\t\t\ti--\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t\tparam[k] = pattern[i]\n\t\t\t\tk++\n\t\t\t}\n\t\t\tif k == 0 {\n\t\t\t\tpanic(\"route pattern param is empty\")\n\t\t\t}\n\t\t\tif k > RouterParamMaxLength {\n\t\t\t\tpanic(fmt.Sprintf(\"route pattern param max length limit %d\", RouterParamMaxLength))\n\t\t\t}\n\t\t\t// check last character\n\t\t\tp := \":\" + string(param[:k])\n\t\t\tif i == len(pattern) {\n\t\t\t\ttru = newRoute(p, handlers, r)\n\t\t\t} else {\n\t\t\t\ttru = newRoute(p, nil, nil)\n\t\t\t}\n\t\t\ttru.hasParam = true\n\t\t\troot = r.insert(root, tru)\n\t\t\tcontinue\n\t\t}\n\t\tradix[j] = pattern[i]\n\t\tj++\n\t}\n\n\t// static route\n\tif j > 0 {\n\t\ttru = newRoute(string(radix[:j]), handlers, r)\n\t\tr.insert(root, tru)\n\t}\n\n\treturn newRoute(pattern, handlers, r)\n}", "func NewRouteHandler(\n\tctx context.Context, protocol string) (RouteHandler, error) {\n\tif protocol == \"\" {\n\t\treturn nil, fmt.Errorf(\"Empty protocol\")\n\t}\n\tipPath, err := exec.LookPath(\"ip\")\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tr := ipRoute{\n\t\tprotocol: protocol,\n\t\tip: ipPath,\n\t}\n\treturn &r, nil\n}", "func (self *Route) Handle(context *Context) bool {\n\tif matches := self.Match(context.Path); matches != nil {\n\t\tif context_ := context.AppendName(self.Name); context == context_ {\n\t\t\tcontext = context.Copy()\n\t\t} else {\n\t\t\tcontext = context_\n\t\t}\n\n\t\tfor key, value := range matches {\n\t\t\tswitch key {\n\t\t\tcase PathVariable:\n\t\t\t\tcontext.Path = value\n\n\t\t\tdefault:\n\t\t\t\tcontext.Variables[key] = value\n\t\t\t}\n\t\t}\n\n\t\tif self.Handler != nil {\n\t\t\treturn self.Handler(context)\n\t\t}\n\t}\n\n\treturn false\n}" ]
[ "0.64635617", "0.64116824", "0.6350044", "0.62140054", "0.6200163", "0.6078606", "0.6064739", "0.60356003", "0.6023833", "0.6018736", "0.5923108", "0.59198534", "0.59184206", "0.59153384", "0.59059286", "0.58410233", "0.57837915", "0.57669526", "0.57576", "0.5736621", "0.5689622", "0.56532663", "0.5649393", "0.5647566", "0.5647327", "0.5621295", "0.56013596", "0.5584263", "0.5580134", "0.556613", "0.55628645", "0.556268", "0.55564624", "0.5552239", "0.5534078", "0.55228496", "0.5522816", "0.5520528", "0.55127144", "0.5506078", "0.5487578", "0.54869074", "0.54830205", "0.54811096", "0.54797703", "0.54743946", "0.5472939", "0.5470806", "0.545417", "0.544445", "0.5440334", "0.5425184", "0.54182047", "0.5416724", "0.5410421", "0.5402337", "0.540163", "0.5399098", "0.539711", "0.538865", "0.5377059", "0.5373238", "0.537129", "0.53645664", "0.5362267", "0.5361203", "0.53534317", "0.5351776", "0.53429747", "0.5342679", "0.534132", "0.53379744", "0.53370404", "0.53366596", "0.53358454", "0.5334392", "0.532939", "0.5327616", "0.5324474", "0.532168", "0.53192294", "0.531593", "0.53149426", "0.5313512", "0.53111166", "0.5305558", "0.53051555", "0.5302947", "0.5299586", "0.5292627", "0.52925396", "0.5282227", "0.52745", "0.52618146", "0.52546597", "0.5246671", "0.5243272", "0.52430516", "0.5236739", "0.5230882" ]
0.5844667
15
StartEventSource starts an event source
func (ese *GitlabEventSourceExecutor) StartEventSource(eventSource *gateways.EventSource, eventStream gateways.Eventing_StartEventSourceServer) error { defer gateways.Recover(eventSource.Name) log := ese.Log.WithEventSource(eventSource.Name) log.Info("operating on event source") config, err := parseEventSource(eventSource.Data) if err != nil { log.WithError(err).Error("failed to parse event source") return err } gl := config.(*gitlabEventSource) return gwcommon.ProcessRoute(&RouteConfig{ route: &gwcommon.Route{ EventSource: eventSource, Logger: ese.Log, Webhook: gl.Hook, StartCh: make(chan struct{}), }, namespace: ese.Namespace, clientset: ese.Clientset, ges: gl, }, helper, eventStream) }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (ese *SlackEventSourceExecutor) StartEventSource(eventSource *gateways.EventSource, eventStream gateways.Eventing_StartEventSourceServer) error {\n\tdefer gateways.Recover(eventSource.Name)\n\n\tlog := ese.Log.WithEventSource(eventSource.Name)\n\tlog.Info(\"operating on event source\")\n\n\tconfig, err := parseEventSource(eventSource.Data)\n\tif err != nil {\n\t\tlog.WithError(err).Error(\"failed to parse event source\")\n\t\treturn err\n\t}\n\n\tses := config.(*slackEventSource)\n\n\ttoken, err := store.GetSecrets(ese.Clientset, ese.Namespace, ses.Token.Name, ses.Token.Key)\n\tif err != nil {\n\t\tlog.WithError(err).Error(\"failed to retrieve token\")\n\t\treturn err\n\t}\n\n\treturn gwcommon.ProcessRoute(&RouteConfig{\n\t\troute: &gwcommon.Route{\n\t\t\tLogger: ese.Log,\n\t\t\tStartCh: make(chan struct{}),\n\t\t\tWebhook: ses.Hook,\n\t\t\tEventSource: eventSource,\n\t\t},\n\t\ttoken: token,\n\t\tclientset: ese.Clientset,\n\t\tnamespace: ese.Namespace,\n\t\tses: ses,\n\t}, helper, eventStream)\n}", "func (listener *EventListener) StartEventSource(eventSource *gateways.EventSource, eventStream gateways.Eventing_StartEventSourceServer) error {\n\tdefer server.Recover(eventSource.Name)\n\n\tlistener.Logger.WithField(common.LabelEventSource, eventSource.Name).Infoln(\"started processing the event source...\")\n\n\tctx := eventStream.Context()\n\n\tdataCh := make(chan []byte)\n\terrorCh := make(chan error)\n\tdoneCh := make(chan struct{}, 1)\n\n\tgo listener.listenEvents(ctx, eventSource, dataCh, errorCh, doneCh)\n\treturn server.HandleEventsFromEventSource(eventSource.Name, eventStream, dataCh, errorCh, doneCh, listener.Logger)\n}", "func (listener *EventListener) StartEventSource(eventSource *gateways.EventSource, eventStream gateways.Eventing_StartEventSourceServer) error {\n\tdefer server.Recover(eventSource.Name)\n\n\tlog := listener.Logger.WithField(common.LabelEventSource, eventSource.Name)\n\tlog.Info(\"started processing the event source...\")\n\n\tdataCh := make(chan []byte)\n\terrorCh := make(chan error)\n\tdoneCh := make(chan struct{}, 1)\n\n\tgo listener.listenEvents(eventSource, dataCh, errorCh, doneCh)\n\treturn server.HandleEventsFromEventSource(eventSource.Name, eventStream, dataCh, errorCh, doneCh, listener.Logger)\n}", "func (listener *EventListener) StartEventSource(eventSource *gateways.EventSource, eventStream gateways.Eventing_StartEventSourceServer) error {\n\tlistener.Logger.WithField(common.LabelEventSource, eventSource.Name).Infoln(\"started processing the event source...\")\n\n\tchannels := server.NewChannels()\n\n\tgo server.HandleEventsFromEventSource(eventSource.Name, eventStream, channels, listener.Logger)\n\n\tdefer func() {\n\t\tchannels.Stop <- struct{}{}\n\t}()\n\n\tif err := listener.listenEvents(eventSource, channels); err != nil {\n\t\tlistener.Logger.WithField(common.LabelEventSource, eventSource.Name).WithError(err).Errorln(\"failed to listen to events\")\n\t\treturn err\n\t}\n\n\treturn nil\n}", "func (listener *EventListener) StartEventSource(eventSource *gateways.EventSource, eventStream gateways.Eventing_StartEventSourceServer) error {\n\tlistener.Logger.WithField(common.LabelEventSource, eventSource.Name).Infoln(\"started processing the event source...\")\n\n\tchannels := server.NewChannels()\n\n\tgo server.HandleEventsFromEventSource(eventSource.Name, eventStream, channels, listener.Logger)\n\n\tdefer func() {\n\t\tchannels.Stop <- struct{}{}\n\t}()\n\n\tif err := listener.listenEvents(eventSource, channels); err != nil {\n\t\tlistener.Logger.WithField(common.LabelEventSource, eventSource.Name).WithError(err).Errorln(\"failed to listen to events\")\n\t\treturn err\n\t}\n\n\treturn nil\n}", "func (listener *EventListener) StartEventSource(eventSource *gateways.EventSource, eventStream gateways.Eventing_StartEventSourceServer) error {\n\tlistener.Logger.WithField(common.LabelEventSource, eventSource.Name).Infoln(\"started processing the event source...\")\n\n\tchannels := server.NewChannels()\n\n\tgo server.HandleEventsFromEventSource(eventSource.Name, eventStream, channels, listener.Logger)\n\n\tdefer func() {\n\t\tchannels.Stop <- struct{}{}\n\t}()\n\n\tif err := listener.listenEvents(eventSource, channels); err != nil {\n\t\tlistener.Logger.WithField(common.LabelEventSource, eventSource.Name).WithError(err).Errorln(\"failed to listen to events\")\n\t\treturn err\n\t}\n\n\treturn nil\n}", "func (listener *EventListener) StartEventSource(eventSource *gateways.EventSource, eventStream gateways.Eventing_StartEventSourceServer) error {\n\tdefer server.Recover(eventSource.Name)\n\n\tlogger := listener.Logger.WithField(common.LabelEventSource, eventSource.Name)\n\n\tlogger.Infoln(\"started processing the event source...\")\n\n\tlogger.Infoln(\"parsing slack event source...\")\n\n\tvar slackEventSource *v1alpha1.SlackEventSource\n\tif err := yaml.Unmarshal(eventSource.Value, &slackEventSource); err != nil {\n\t\tlogger.WithError(err).Errorln(\"failed to parse the event source\")\n\t\treturn err\n\t}\n\n\tif slackEventSource.Namespace == \"\" {\n\t\tslackEventSource.Namespace = listener.Namespace\n\t}\n\n\tlogger.Infoln(\"retrieving the slack token...\")\n\ttoken, err := store.GetSecrets(listener.K8sClient, slackEventSource.Namespace, slackEventSource.Token.Name, slackEventSource.Token.Key)\n\tif err != nil {\n\t\tlogger.WithError(err).Error(\"failed to retrieve the token\")\n\t\treturn err\n\t}\n\n\tlogger.Infoln(\"retrieving the signing secret...\")\n\tsigningSecret, err := store.GetSecrets(listener.K8sClient, slackEventSource.Namespace, slackEventSource.SigningSecret.Name, slackEventSource.SigningSecret.Key)\n\tif err != nil {\n\t\tlogger.WithError(err).Warn(\"failed to retrieve the signing secret\")\n\t\treturn err\n\t}\n\n\troute := webhook.NewRoute(slackEventSource.Webhook, listener.Logger, eventSource)\n\n\treturn webhook.ManageRoute(&Router{\n\t\troute: route,\n\t\ttoken: token,\n\t\tsigningSecret: signingSecret,\n\t\tk8sClient: listener.K8sClient,\n\t\tslackEventSource: slackEventSource,\n\t}, controller, eventStream)\n}", "func (s *SourceControl) Start(sourceName *string, reply *bool) error {\n\t*reply = false\n\tif s.isSourceActive {\n\t\treturn fmt.Errorf(\"already have active source, do not start\")\n\t}\n\tname := strings.ToUpper(*sourceName)\n\tswitch name {\n\tcase \"SIMPULSESOURCE\":\n\t\ts.ActiveSource = DataSource(s.simPulses)\n\t\ts.status.SourceName = \"SimPulses\"\n\n\tcase \"TRIANGLESOURCE\":\n\t\ts.ActiveSource = DataSource(s.triangle)\n\t\ts.status.SourceName = \"Triangles\"\n\n\tcase \"LANCEROSOURCE\":\n\t\ts.ActiveSource = DataSource(s.lancero)\n\t\ts.status.SourceName = \"Lancero\"\n\n\tcase \"ROACHSOURCE\":\n\t\ts.ActiveSource = DataSource(s.roach)\n\t\ts.status.SourceName = \"Roach\"\n\n\tcase \"ABACOSOURCE\":\n\t\ts.ActiveSource = DataSource(s.abaco)\n\t\ts.status.SourceName = \"Abaco\"\n\n\tcase \"ERRORINGSOURCE\":\n\t\ts.ActiveSource = DataSource(s.erroring)\n\t\ts.status.SourceName = \"Erroring\"\n\n\tdefault:\n\t\treturn fmt.Errorf(\"data Source \\\"%s\\\" is not recognized\", *sourceName)\n\t}\n\n\tlog.Printf(\"Starting data source named %s\\n\", *sourceName)\n\ts.status.Running = true\n\tif err := Start(s.ActiveSource, s.queuedRequests, s.status.Npresamp, s.status.Nsamples); err != nil {\n\t\ts.status.Running = false\n\t\ts.isSourceActive = false\n\t\treturn err\n\t}\n\ts.isSourceActive = true\n\ts.status.SamplePeriod = s.ActiveSource.SamplePeriod()\n\ts.status.Nchannels = s.ActiveSource.Nchan()\n\ts.status.ChanGroups = s.ActiveSource.ChanGroups()\n\ts.broadcastStatus()\n\ts.broadcastTriggerState()\n\ts.broadcastGroupTriggerState()\n\ts.broadcastChannelNames()\n\ts.storeChannelGroups()\n\t*reply = true\n\treturn nil\n}", "func (es *EventStream) Start() {\n\tif es.Events == nil {\n\t\tes.Events = make(chan []Event)\n\t}\n\n\t// register eventstream in the local registry for later lookup\n\t// in C callback\n\tcbInfo := registry.Add(es)\n\tes.registryID = cbInfo\n\tes.uuid = GetDeviceUUID(es.Device)\n\tes.start(es.Paths, cbInfo)\n}", "func (el *EventListener) StartListening(ctx context.Context, dispatch func([]byte, ...eventsourcecommon.Option) error) error {\n\tlog := logging.FromContext(ctx).\n\t\tWith(logging.LabelEventSourceType, el.GetEventSourceType(), logging.LabelEventName, el.GetEventName())\n\n\tlog.Info(\"started processing the AMQP event source...\")\n\tdefer sources.Recover(el.GetEventName())\n\n\tamqpEventSource := &el.AMQPEventSource\n\tvar conn *amqplib.Connection\n\tif err := common.DoWithRetry(amqpEventSource.ConnectionBackoff, func() error {\n\t\tc := amqplib.Config{\n\t\t\tHeartbeat: 10 * time.Second,\n\t\t\tLocale: \"en_US\",\n\t\t}\n\t\tif amqpEventSource.TLS != nil {\n\t\t\ttlsConfig, err := common.GetTLSConfig(amqpEventSource.TLS)\n\t\t\tif err != nil {\n\t\t\t\treturn fmt.Errorf(\"failed to get the tls configuration, %w\", err)\n\t\t\t}\n\t\t\tc.TLSClientConfig = tlsConfig\n\t\t}\n\t\tif amqpEventSource.Auth != nil {\n\t\t\tusername, err := common.GetSecretFromVolume(amqpEventSource.Auth.Username)\n\t\t\tif err != nil {\n\t\t\t\treturn fmt.Errorf(\"username not found, %w\", err)\n\t\t\t}\n\t\t\tpassword, err := common.GetSecretFromVolume(amqpEventSource.Auth.Password)\n\t\t\tif err != nil {\n\t\t\t\treturn fmt.Errorf(\"password not found, %w\", err)\n\t\t\t}\n\t\t\tc.SASL = []amqplib.Authentication{&amqplib.PlainAuth{\n\t\t\t\tUsername: username,\n\t\t\t\tPassword: password,\n\t\t\t}}\n\t\t}\n\t\tvar err error\n\t\tvar url string\n\t\tif amqpEventSource.URLSecret != nil {\n\t\t\turl, err = common.GetSecretFromVolume(amqpEventSource.URLSecret)\n\t\t\tif err != nil {\n\t\t\t\treturn fmt.Errorf(\"urlSecret not found, %w\", err)\n\t\t\t}\n\t\t} else {\n\t\t\turl = amqpEventSource.URL\n\t\t}\n\t\tconn, err = amqplib.DialConfig(url, c)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\t}); err != nil {\n\t\treturn fmt.Errorf(\"failed to connect to amqp broker for the event source %s, %w\", el.GetEventName(), err)\n\t}\n\n\tlog.Info(\"opening the server channel...\")\n\tch, err := conn.Channel()\n\tif err != nil {\n\t\treturn fmt.Errorf(\"failed to open the channel for the event source %s, %w\", el.GetEventName(), err)\n\t}\n\n\tlog.Info(\"checking parameters and set defaults...\")\n\tsetDefaults(amqpEventSource)\n\n\tlog.Info(\"setting up the delivery channel...\")\n\tdelivery, err := getDelivery(ch, amqpEventSource)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"failed to get the delivery for the event source %s, %w\", el.GetEventName(), err)\n\t}\n\n\tif amqpEventSource.JSONBody {\n\t\tlog.Info(\"assuming all events have a json body...\")\n\t}\n\n\tlog.Info(\"listening to messages on channel...\")\n\tfor {\n\t\tselect {\n\t\tcase msg, ok := <-delivery:\n\t\t\tif !ok {\n\t\t\t\tlog.Error(\"failed to read a message, channel might have been closed\")\n\t\t\t\treturn fmt.Errorf(\"channel might have been closed\")\n\t\t\t}\n\t\t\tif err := el.handleOne(amqpEventSource, msg, dispatch, log); err != nil {\n\t\t\t\tlog.Errorw(\"failed to process an AMQP message\", zap.Error(err))\n\t\t\t\tel.Metrics.EventProcessingFailed(el.GetEventSourceName(), el.GetEventName())\n\t\t\t}\n\t\tcase <-ctx.Done():\n\t\t\terr = conn.Close()\n\t\t\tif err != nil {\n\t\t\t\tlog.Errorw(\"failed to close connection\", zap.Error(err))\n\t\t\t}\n\t\t\treturn nil\n\t\t}\n\t}\n}", "func newEventSource(opts ...sourceOption) *v1alpha1.AWSSNSSource {\n\tsrc := &v1alpha1.AWSSNSSource{\n\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\tNamespace: tNs,\n\t\t\tName: tName,\n\t\t},\n\t\tStatus: v1alpha1.AWSSNSSourceStatus{\n\t\t\tStatus: commonv1alpha1.Status{\n\t\t\t\tSourceStatus: duckv1.SourceStatus{\n\t\t\t\t\tSinkURI: tSinkURI,\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n\n\t// *reconcilerImpl.Reconcile calls this method before any reconciliation loop. Calling it here ensures that the\n\t// object is initialized in the same manner, and prevents tests from wrongly reporting unexpected status updates.\n\treconciler.PreProcessReconcile(context.Background(), src)\n\n\tfor _, opt := range opts {\n\t\topt(src)\n\t}\n\n\treturn src\n}", "func NewStreamStartEvent(stream_id string) *Event {\n\ts := (*C.gchar)(C.CString(stream_id))\n\tdefer C.free(unsafe.Pointer(s))\n\tr := new(Event)\n\tr.SetPtr(glib.Pointer(C.gst_event_new_stream_start(s)))\n\treturn r\n}", "func (s *Basegff3Listener) EnterSource(ctx *SourceContext) {}", "func (s *EventStore) StartTracing() {\n\ts.traceMu.Lock()\n\tdefer s.traceMu.Unlock()\n\n\ts.tracing = true\n}", "func startLogEmitter(bus eventbus.EventDispatcher, src source, sinks []sink, done func()) {\n\tif done != nil {\n\t\tdefer done()\n\t}\n\tscanner := bufio.NewScanner(src.in)\n\tfor scanner.Scan() {\n\t\tdata := scanner.Bytes()\n\t\tsrc.q.Add(string(data))\n\n\t\tpayload := LogEvent{\n\t\t\tTimestamp: time.Now().UTC(),\n\t\t\tLine: data,\n\t\t}\n\t\tevt, err := eventbus.NewEvent(LogLine, payload)\n\t\tif err != nil {\n\t\t\tnewError(bus, EventError{fmt.Errorf(\"unable to construct log event: %v\", err)})\n\t\t}\n\t\tbus.Dispatch(evt, LogTopic)\n\n\t\tfor _, s := range sinks {\n\t\t\tif _, err := s.out.Write(append(data, '\\n')); err != nil {\n\t\t\t\tnewError(bus, SinkError{fmt.Errorf(\"error writing to sink %s: %v\", src.name, err)})\n\t\t\t}\n\t\t}\n\t}\n}", "func FromEventSource(ch chan interface{}, opts ...options.Option) Observable {\n\treturn newHotObservableFromChannel(ch, opts...)\n}", "func (s *Streamer) Start() error {\n\ts.mu.Lock()\n\tif s.state != stateStopped {\n\t\ts.mu.Unlock()\n\t\treturn ErrRunning\n\t}\n\ts.state = stateStarting\n\ts.mu.Unlock()\n\n\terr := s.init()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// Attach to fs notification system\n\ts.threads.Add(2)\n\tgo s.sendChangeEvents()\n\tgo s.logNotifyErrors()\n\n\t// Start streaming service\n\ts.threads.Add(1)\n\tgo s.eventsRouter()\n\n\ts.mu.Lock()\n\ts.state = stateRunning\n\ts.mu.Unlock()\n\n\treturn nil\n}", "func StartEventSubscibe(conf *config.Config) (cancel context.CancelFunc, err error) {\n\n\tvar subEventFrom string\n\tes := make(chan error, 1024) //TODO 1024根据节点数需要修改\n\n\t// for _, qsconfig := range config.DefaultQscConfig() {\n\tfor _, qsconfig := range conf.Qscs {\n\t\tfor _, nodeAddr := range strings.Split(qsconfig.NodeAddress, \",\") {\n\t\t\tgo EventsSubscribe(conf, \"tcp://\"+nodeAddr, es)\n\t\t\tsubEventFrom += fmt.Sprintf(\"[%s] \", nodeAddr)\n\n\t\t}\n\t}\n\n\tif len(es) > 0 {\n\t\treturn nil, errors.New(\"subscibe events failed\")\n\t}\n\n\tlog.Infof(\"subscibed events from %s\", subEventFrom)\n\n\treturn\n}", "func (s *TestSource) Start(ctx context.Context) error {\n\tgo s.closer(ctx)\n\n\treturn nil\n}", "func newEventSource() *v1alpha1.AWSSNSSource {\n\tsrc := &v1alpha1.AWSSNSSource{\n\t\tSpec: v1alpha1.AWSSNSSourceSpec{\n\t\t\tARN: tTopicARN,\n\t\t\tSubscriptionAttributes: map[string]*string{\n\t\t\t\t\"DeliveryPolicy\": aws.String(`{\"healthyRetryPolicy\":{\"numRetries\":5}}`),\n\t\t\t},\n\t\t},\n\t}\n\n\t// assume finalizer is already set to prevent the generated reconciler\n\t// from generating an extra Patch action\n\tsrc.Finalizers = []string{sources.AWSSNSSourceResource.String()}\n\n\tPopulate(src)\n\n\treturn src\n}", "func (w *wsEvents) Start(ctx context.Context) error {\n\tif err := w.ws.Start(ctx); err != nil {\n\t\treturn err\n\t}\n\tgo w.eventListener(ctx)\n\treturn nil\n}", "func (es Streamer) Start(logs chan<- types.EventData, errs chan<- error) {\n\tapps := LoadApplications(es.RegistryPath)\n\n\tes.logs = logs\n\tes.errs = errs\n\n\tclient, err := ethclient.Dial(es.WebsocketURL)\n\tif err != nil {\n\t\tes.errs <- err\n\t}\n\n\tchainID, err := client.NetworkID(context.Background())\n\tif err != nil {\n\t\tes.errs <- err\n\t}\n\tlog.Info(fmt.Sprintf(\"Connected to Ethereum chain ID %s\\n\", chainID))\n\n\t// Start application subscriptions\n\tappEvents := make(chan ctypes.Log)\n\tfor _, app := range apps {\n\t\tquery := es.buildSubscriptionFilter(app)\n\n\t\t// Start the contract subscription\n\t\t_, err := client.SubscribeFilterLogs(context.Background(), query, appEvents)\n\t\tif err != nil {\n\t\t\tlog.Info(fmt.Sprintf(\"Failed to subscribe to app %s\\n\", app.ID))\n\t\t} else {\n\t\t\tlog.Info(fmt.Sprintf(\"Subscribed to app %s\\n\", app.ID))\n\t\t}\n\t}\n\n\tfor {\n\t\tselect {\n\t\t// case err := <-sub.Err(): // TODO: capture subscription errors\n\t\t// \tes.errs <- err\n\t\tcase vLog := <-appEvents:\n\t\t\tlog.Info(fmt.Sprintf(\"Witnessed tx %s on app %s\\n\", vLog.TxHash.Hex(), vLog.Address.Hex()))\n\t\t\teventData := types.NewEventData(vLog.Address, vLog)\n\t\t\tes.logs <- eventData\n\t\t}\n\t}\n}", "func (s *EventService) Start(producer EventProducer) {\n\ts.dispatcher.Start()\n\tproducer.Register(s.dispatcher.EventCh())\n}", "func (_LvRecordableStream *LvRecordableStreamTransactor) StartStream(opts *bind.TransactOpts, _handle string) (*types.Transaction, error) {\n\treturn _LvRecordableStream.contract.Transact(opts, \"startStream\", _handle)\n}", "func (ds *SingleTargetDataSink) Start() {\n\taddr := config.GetGlobalConfig().TopSQL.ReceiverAddress\n\tif addr != \"\" {\n\t\tds.curRPCAddr = addr\n\t\terr := ds.registerer.Register(ds)\n\t\tif err == nil {\n\t\t\tds.registered.Store(true)\n\t\t} else {\n\t\t\tlogutil.BgLogger().Warn(\"failed to register single target datasink\", zap.Error(err))\n\t\t}\n\t}\n\n\tgo ds.recoverRun()\n}", "func (s *Source) Start() {\n\t// spin off a goroutine to send on our channel so Start return immediately.\n\tgo func() {\n\t\tinterval := time.Duration(s.Sleep) * time.Millisecond\n\t\tfor {\n\t\t\ttime.Sleep(interval)\n\t\t\tmsg := Msg{Value: \"blah\"}\n\t\t\ts.SendChan <- msg\n\t\t}\n\t}()\n}", "func (_LvRecordableStream *LvRecordableStreamFilterer) FilterStartStream(opts *bind.FilterOpts) (*LvRecordableStreamStartStreamIterator, error) {\n\n\tlogs, sub, err := _LvRecordableStream.contract.FilterLogs(opts, \"StartStream\")\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &LvRecordableStreamStartStreamIterator{contract: _LvRecordableStream.contract, event: \"StartStream\", logs: logs, sub: sub}, nil\n}", "func (s *Service) CreateSource(ctx context.Context, src *influxdb.Source) error {\n\terr := s.kv.Update(ctx, func(tx Tx) error {\n\t\tsrc.ID = s.IDGenerator.ID()\n\n\t\t// Generating an organization id if it missing or invalid\n\t\tif !src.OrganizationID.Valid() {\n\t\t\tsrc.OrganizationID = s.IDGenerator.ID()\n\t\t}\n\n\t\treturn s.putSource(ctx, tx, src)\n\t})\n\tif err != nil {\n\t\treturn &influxdb.Error{\n\t\t\tErr: err,\n\t\t}\n\t}\n\treturn nil\n}", "func (sm *SinkManager) Start(newAppServiceChan, deletedAppServiceChan <-chan store.AppService) {\n\tgo sm.listenForNewAppServices(newAppServiceChan)\n\tgo sm.listenForDeletedAppServices(deletedAppServiceChan)\n\n\tsm.listenForErrorMessages()\n}", "func (t *Tracker) Start() {\n\tt.started.Store(true)\n\tlogrus.Info(\"Starting event tracker\")\n\tvar e *event.Event\n\tvar f QueryFunc\n\tfor {\n\t\tselect {\n\t\tcase e = <-t.inChan:\n\t\t\tt.trackEvent(e)\n\t\tcase f = <-t.queryChan:\n\t\t\tt.query(f)\n\t\t}\n\t}\n}", "func (s *Basegff3Listener) EnterStart(ctx *StartContext) {}", "func NewOnStreamStart() *OnStreamStart {\n\treturn &OnStreamStart{\n\t\tCommand: OnStreamStartEvent,\n\t}\n}", "func (s *Stream) StartStream(optionalQueryParams string) error {\n\tres, err := s.httpClient.GetSearchStream(optionalQueryParams)\n\n\tif err != nil {\n\t\treturn err\n\t}\n\n\ts.reader.setStreamResponseBody(res.Body)\n\n\tgo s.streamMessages(res)\n\n\treturn nil\n}", "func StartTracer(cb func(*probe.HostFlow)) error {\n\tt := &tcpTracer{}\n\tt.evChan = make(chan interface{})\n\ttr, err := tracer.NewTracer(t)\n\tif err != nil {\n\t\treturn xerrors.Errorf(\"failed to create an instance of tcp-tracer: %w\", err)\n\t}\n\n\ttr.Start()\n\n\t// TODO: scan /proc\n\t// Should tr.AddFdInstallWatcher be executed each listening process here?\n\n\tfor ev := range t.evChan {\n\t\tswitch v := ev.(type) {\n\t\tcase tracer.TcpV4:\n\t\t\tvar pgid int\n\t\t\tif v.Type == tracer.EventConnect || v.Type == tracer.EventAccept {\n\t\t\t\tvar err error\n\t\t\t\tpgid, err = syscall.Getpgid(int(v.Pid))\n\t\t\t\tif err != nil {\n\t\t\t\t\tpgid = int(v.Pid)\n\t\t\t\t}\n\t\t\t}\n\t\t\tproc := &probe.Process{Name: v.Comm, Pgid: pgid}\n\n\t\t\tif v.Type == tracer.EventConnect {\n\t\t\t\tcb(&probe.HostFlow{\n\t\t\t\t\tDirection: probe.FlowActive,\n\t\t\t\t\tLocal: &probe.AddrPort{Addr: v.SAddr.String(), Port: \"many\"},\n\t\t\t\t\tPeer: &probe.AddrPort{Addr: v.DAddr.String(), Port: fmt.Sprintf(\"%d\", v.DPort)},\n\t\t\t\t\tProcess: proc,\n\t\t\t\t})\n\t\t\t} else if v.Type == tracer.EventAccept {\n\t\t\t\tcb(&probe.HostFlow{\n\t\t\t\t\tDirection: probe.FlowPassive,\n\t\t\t\t\tLocal: &probe.AddrPort{Addr: v.SAddr.String(), Port: fmt.Sprintf(\"%d\", v.SPort)},\n\t\t\t\t\tPeer: &probe.AddrPort{Addr: v.DAddr.String(), Port: \"many\"},\n\t\t\t\t\tProcess: proc,\n\t\t\t\t})\n\t\t\t}\n\t\t\t// TODO: handling close\n\t\t}\n\t}\n\n\treturn nil\n}", "func New() *EventSource {\n\tinput := make(chan Event)\n\tcontrol := make(chan interface{})\n\n\tes := &EventSource{\n\t\tEvents: input,\n\t\tcontrol: control,\n\t}\n\n\tgo es.run(input, control)\n\treturn es\n}", "func (gatewayContext *GatewayContext) newEventSourceWatch(eventSourceRef *v1alpha1.EventSourceRef) *cache.ListWatch {\n\tclient := gatewayContext.eventSourceClient.ArgoprojV1alpha1().RESTClient()\n\tresource := \"eventsources\"\n\n\tif eventSourceRef.Namespace == \"\" {\n\t\teventSourceRef.Namespace = gatewayContext.namespace\n\t}\n\n\tfieldSelector := fields.ParseSelectorOrDie(fmt.Sprintf(\"metadata.name=%s\", eventSourceRef.Name))\n\n\tlistFunc := func(options metav1.ListOptions) (runtime.Object, error) {\n\t\toptions.FieldSelector = fieldSelector.String()\n\t\treq := client.Get().\n\t\t\tNamespace(eventSourceRef.Namespace).\n\t\t\tResource(resource).\n\t\t\tVersionedParams(&options, metav1.ParameterCodec)\n\t\treturn req.Do().Get()\n\t}\n\twatchFunc := func(options metav1.ListOptions) (watch.Interface, error) {\n\t\toptions.Watch = true\n\t\toptions.FieldSelector = fieldSelector.String()\n\t\treq := client.Get().\n\t\t\tNamespace(eventSourceRef.Namespace).\n\t\t\tResource(resource).\n\t\t\tVersionedParams(&options, metav1.ParameterCodec)\n\t\treturn req.Watch()\n\t}\n\treturn &cache.ListWatch{ListFunc: listFunc, WatchFunc: watchFunc}\n}", "func (client ServicesClient) StartSender(req *http.Request) (future ServicesStartFuture, err error) {\n\tvar resp *http.Response\n\tfuture.FutureAPI = &azure.Future{}\n\tresp, err = client.Send(req, azure.DoRetryWithRegistration(client.Client))\n\tif err != nil {\n\t\treturn\n\t}\n\tvar azf azure.Future\n\tazf, err = azure.NewFutureFromResponse(resp)\n\tfuture.FutureAPI = &azf\n\tfuture.Result = future.result\n\treturn\n}", "func AddSource(parameters *api.PublishEventParameters) (resp *api.SendEventParameters, err error) {\n\tif err := checkConf(); err != nil {\n\t\treturn nil, err\n\t}\n\n\tsendRequest := api.SendEventParameters{\n\t\tSourceID: conf.sourceID, // enrich the event with the sourceID\n\t\tEventType: parameters.Publishrequest.EventType,\n\t\tEventTypeVersion: parameters.Publishrequest.EventTypeVersion,\n\t\tEventID: parameters.Publishrequest.EventID,\n\t\tEventTime: parameters.Publishrequest.EventTime,\n\t\tData: parameters.Publishrequest.Data,\n\t}\n\n\treturn &sendRequest, nil\n}", "func (n *nats) Start(events chan job.Event) error {\n\tn.Log.Info(\"starting\", zap.String(\"url\", n.Stream.URL), zap.String(\"subject\", n.subject))\n\tnatsConn, err := natsio.Connect(n.Stream.URL)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"failed to connect to nats cluster url %s. Cause: %+v\", n.Stream.URL, err.Error())\n\t}\n\tn.natsSubscription, err = natsConn.ChanSubscribe(n.subject, n.msgCh)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"failed to subscribe to nats subject %s. Cause: %+v\", n.subject, err.Error())\n\t}\n\n\tgo n.listen(events)\n\treturn nil\n}", "func (s *ServerlessTraceAgent) Start(enabled bool, loadConfig Load, lambdaSpanChan chan<- *pb.Span, coldStartSpanId uint64) {\n\tif enabled {\n\t\t// Set the serverless config option which will be used to determine if\n\t\t// hostname should be resolved. Skipping hostname resolution saves >1s\n\t\t// in load time between gRPC calls and agent commands.\n\t\tddConfig.Datadog.Set(\"serverless.enabled\", true)\n\n\t\ttc, confErr := loadConfig.Load()\n\t\tif confErr != nil {\n\t\t\tlog.Errorf(\"Unable to load trace agent config: %s\", confErr)\n\t\t} else {\n\t\t\tcontext, cancel := context.WithCancel(context.Background())\n\t\t\ttc.Hostname = \"\"\n\t\t\ttc.SynchronousFlushing = true\n\t\t\ts.ta = agent.NewAgent(context, tc, telemetry.NewNoopCollector())\n\t\t\ts.spanModifier = &spanModifier{\n\t\t\t\tcoldStartSpanId: coldStartSpanId,\n\t\t\t\tlambdaSpanChan: lambdaSpanChan,\n\t\t\t}\n\n\t\t\ts.ta.ModifySpan = s.spanModifier.ModifySpan\n\t\t\ts.ta.DiscardSpan = filterSpanFromLambdaLibraryOrRuntime\n\t\t\ts.cancel = cancel\n\t\t\tgo s.ta.Run()\n\t\t}\n\t}\n}", "func AzsbEventSource(namespace, azsbSourceName, topic string) string {\n\treturn fmt.Sprintf(\"/apis/v1/namespaces/%s/azsbsources/%s/%s\", namespace, azsbSourceName, topic)\n}", "func (s *BaseGShellListener) EnterStart(ctx *StartContext) {}", "func (_LvRecordableStream *LvRecordableStreamFilterer) WatchStartStream(opts *bind.WatchOpts, sink chan<- *LvRecordableStreamStartStream) (event.Subscription, error) {\n\n\tlogs, sub, err := _LvRecordableStream.contract.WatchLogs(opts, \"StartStream\")\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn event.NewSubscription(func(quit <-chan struct{}) error {\n\t\tdefer sub.Unsubscribe()\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase log := <-logs:\n\t\t\t\t// New log arrived, parse the event and forward to the user\n\t\t\t\tevent := new(LvRecordableStreamStartStream)\n\t\t\t\tif err := _LvRecordableStream.contract.UnpackLog(event, \"StartStream\", log); err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\tevent.Raw = log\n\n\t\t\t\tselect {\n\t\t\t\tcase sink <- event:\n\t\t\t\tcase err := <-sub.Err():\n\t\t\t\t\treturn err\n\t\t\t\tcase <-quit:\n\t\t\t\t\treturn nil\n\t\t\t\t}\n\t\t\tcase err := <-sub.Err():\n\t\t\t\treturn err\n\t\t\tcase <-quit:\n\t\t\t\treturn nil\n\t\t\t}\n\t\t}\n\t}), nil\n}", "func (source *ipv6IpamSource) start(sink addressConfigSink) error {\n\tsource.sink = sink\n\treturn nil\n}", "func (c *Collector) Start() {\n\tgo c.Source.Start()\n\tc.collect()\n}", "func (e *syncEvent) EventStart(uint) {}", "func AcceptEventSource(l net.Listener) {\n\tconn, e := l.Accept()\n\n\tdefer signal()\n\tdefer l.Close()\n\tdefer conn.Close()\n\n\tif e != nil {\n\t\tlog.Printf(\"Failed to accept an event source connection with error: %s\\n\", e.Error())\n\t\treturn\n\t}\n\thandler := InitEventHandler(conn)\n\thandler.read()\n}", "func (sub EthereumSub) startContractEventSub(logs chan ctypes.Log, client *ethclient.Client,\n\tcontractName txs.ContractRegistry) (common.Address, ethereum.Subscription) {\n\t// Get the contract address for this subscription\n\tsubContractAddress, err := txs.GetAddressFromBridgeRegistry(client, sub.RegistryContractAddress, contractName)\n\tif err != nil {\n\t\tsub.Logger.Error(err.Error())\n\t}\n\n\t// We need the address in []bytes for the query\n\tsubQuery := ethereum.FilterQuery{\n\t\tAddresses: []common.Address{subContractAddress},\n\t}\n\n\t// Start the contract subscription\n\tcontractSub, err := client.SubscribeFilterLogs(context.Background(), subQuery, logs)\n\tif err != nil {\n\t\tsub.Logger.Error(err.Error())\n\t}\n\tsub.Logger.Info(fmt.Sprintf(\"Subscribed to %v contract at address: %s\", contractName, subContractAddress.Hex()))\n\treturn subContractAddress, contractSub\n}", "func (m *Manager) Start(isOffer bool, remoteUfrag, remotePwd string) error {\n\tm.isOffer = isOffer\n\n\t// Start the sctpAssociation\n\tm.sctpAssociation.Start(isOffer)\n\n\tif err := m.IceAgent.Start(isOffer, remoteUfrag, remotePwd); err != nil {\n\t\treturn err\n\t}\n\t// Start DTLS\n\tm.dtlsState.Start(isOffer)\n\n\treturn nil\n}", "func (g *Given) EventSource(text string) *Given {\n\tg.t.Helper()\n\tg.eventSource = &eventsourcev1alpha1.EventSource{}\n\tg.readResource(text, g.eventSource)\n\tl := g.eventSource.GetLabels()\n\tif l == nil {\n\t\tl = map[string]string{}\n\t}\n\tl[Label] = LabelValue\n\tg.eventSource.SetLabels(l)\n\tg.eventSource.Spec.EventBusName = EventBusName\n\treturn g\n}", "func (proc *schedulerProcess) start() {\t\n\tproc.server.Addr = fmt.Sprintf(\"%s:%d\", localIP4String(), nextTcpPort())\n\tproc.processId = newSchedProcID(proc.server.Addr)\n\tproc.registerEventHandlers()\n\tgo proc.server.ListenAndServe()\n}", "func (s *BaseDiceListener) EnterStart(ctx *StartContext) {}", "func (c *StreamerController) Start(tctx *tcontext.Context, location binlog.Location) error {\n\tc.Lock()\n\tdefer c.Unlock()\n\n\tc.meetError = false\n\tc.closed = false\n\tc.currentBinlogType = c.initBinlogType\n\n\tvar err error\n\tif c.serverIDUpdated {\n\t\terr = c.resetReplicationSyncer(tctx, location)\n\t} else {\n\t\terr = c.updateServerIDAndResetReplication(tctx, location)\n\t}\n\tif err != nil {\n\t\tc.close(tctx)\n\t\treturn err\n\t}\n\n\treturn nil\n}", "func (er *EventRelay) Start() {\n\t// Start in the background since we don't want to hold up the caller\n\tgo er.connectEventHub()\n}", "func (bep *buildEventPublisher) Start(ctx context.Context) {\n\tgo bep.run(ctx)\n}", "func (cs *ClientSocket) Start() {\n\tgo cs.RunEvents()\n\tgo cs.RunSocketSender()\n\tgo cs.RunSocketListener()\n}", "func (s *Scheduler) start() error {\n\ts.state.mu.Lock()\n\tdefer s.state.mu.Unlock()\n\tswitch s.state.value {\n\tcase srvStateActive:\n\t\treturn fmt.Errorf(\"asynq: the scheduler is already running\")\n\tcase srvStateClosed:\n\t\treturn fmt.Errorf(\"asynq: the scheduler has already been stopped\")\n\t}\n\ts.state.value = srvStateActive\n\treturn nil\n}", "func OnStart(ecb EventCallbackFunc, priority ...int) {\n\tAppEventStore().Subscribe(EventOnStart, EventCallback{\n\t\tCallback: ecb,\n\t\tCallOnce: true,\n\t\tpriority: parsePriority(priority...),\n\t})\n}", "func (ss *SNSServer) PrepareAndStart() {\n\n\tss.Subscribe()\n}", "func StartDate(v time.Time) predicate.FlowInstance {\n\treturn predicate.FlowInstance(func(s *sql.Selector) {\n\t\ts.Where(sql.EQ(s.C(FieldStartDate), v))\n\t})\n}", "func (EventReceiver) SpanStart(ctx context.Context, eventName, query string) context.Context {\n\tspan, ctx := ot.StartSpanFromContext(ctx, eventName)\n\totext.DBStatement.Set(span, query)\n\totext.DBType.Set(span, \"sql\")\n\treturn ctx\n}", "func NewSourceService(\n\tsrc music.Source,\n\topts ...basic.Option,\n) music.SourceService {\n\tcfg := basic.BuildOptions(opts...)\n\treturn sourceService{\n\t\tsrc: src,\n\t\tlog: logutil.WithComponent(cfg.Logger, (*sourceService)(nil)),\n\t\ttracer: cfg.Tracer,\n\t}\n}", "func (em EventManager[A]) StartCollector(ctx context.Context) {\n\tgo func() {\n\t\tfor {\n\t\t\tselect {\n\t\t\t// if context is done, return\n\t\t\tcase <-ctx.Done():\n\t\t\t\treturn\n\t\t\tcase evMsg, ok := <-em.evCh:\n\t\t\t\t// if the channel was closed, return\n\t\t\t\tif !ok {\n\t\t\t\t\tem.evDone = true\n\t\t\t\t\tdefer evMsg.eventProcessed()\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\tem.storeEvent(evMsg.getEvent())\n\t\t\t\tevMsg.eventProcessed()\n\t\t\t}\n\t\t}\n\t}()\n}", "func ExampleStreamingEndpointsClient_BeginStart() {\n\tcred, err := azidentity.NewDefaultAzureCredential(nil)\n\tif err != nil {\n\t\tlog.Fatalf(\"failed to obtain a credential: %v\", err)\n\t}\n\tctx := context.Background()\n\tclient, err := armmediaservices.NewStreamingEndpointsClient(\"0a6ec948-5a62-437d-b9df-934dc7c1b722\", cred, nil)\n\tif err != nil {\n\t\tlog.Fatalf(\"failed to create client: %v\", err)\n\t}\n\tpoller, err := client.BeginStart(ctx,\n\t\t\"mediaresources\",\n\t\t\"slitestmedia10\",\n\t\t\"myStreamingEndpoint1\",\n\t\tnil)\n\tif err != nil {\n\t\tlog.Fatalf(\"failed to finish the request: %v\", err)\n\t}\n\t_, err = poller.PollUntilDone(ctx, nil)\n\tif err != nil {\n\t\tlog.Fatalf(\"failed to pull the result: %v\", err)\n\t}\n}", "func (sb *StreamBuilder) Source(name string, source Source) *Stream {\n\tn := sb.tp.AddSource(name, source)\n\n\treturn newStream(sb.tp, []Node{n})\n}", "func (l *Launcher) Start(sourceProvider launchers.SourceProvider, pipelineProvider pipeline.Provider, registry auditor.Registry, tracker *tailers.TailerTracker) {\n\tl.sources = sourceProvider.GetAddedForType(config.JournaldType)\n\tl.pipelineProvider = pipelineProvider\n\tl.registry = registry\n\tgo l.run()\n}", "func (p *EventProducer) Start(ctx context.Context) error {\n\tevents, err := DumpCassandra(p.cassandraHost, p.cassandraPort, p.cassandraTimeout)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif err = events.Sort(); err != nil {\n\t\tp.log.WithError(err).Warn(\"Sorting events failed - ignoring\")\n\t}\n\n\tfor _, e := range events.Events {\n\t\tif _, err = p.Processor.Process(ctx, e); err != nil {\n\t\t\tp.log.WithError(err).WithFields(logrus.Fields{\n\t\t\t\t\"context\": ctx,\n\t\t\t\t\"event\": e,\n\t\t\t}).Warn(\"Processing event failed - ignoring\")\n\t\t}\n\t}\n\treturn p.WatchAMQP(ctx)\n}", "func (server *Server) Start(laddr *net.TCPAddr) error {\n\n\tlistener, err := server.dataStreamer.CreateListener(laddr)\n\tif err != nil {\n\t\tlog.Print(err)\n\t\treturn err\n\t}\n\tserver.dataStreamer = listener\n\n\tgo server.listen()\n\treturn nil\n\n}", "func (ec *EventsCache) start() {\n\tsafego.RunWithRestart(func() {\n\t\tfor {\n\t\t\tif ec.closed {\n\t\t\t\tbreak\n\t\t\t}\n\n\t\t\tcf := <-ec.originalCh\n\t\t\tec.put(cf.destinationId, cf.eventId, cf.eventFact)\n\t\t}\n\t})\n\n\tsafego.RunWithRestart(func() {\n\t\tfor {\n\t\t\tif ec.closed {\n\t\t\t\tbreak\n\t\t\t}\n\n\t\t\tcf := <-ec.succeedCh\n\t\t\tec.succeed(cf.destinationId, cf.eventId, cf.processed, cf.table, cf.types)\n\t\t}\n\t})\n\n\tsafego.RunWithRestart(func() {\n\t\tfor {\n\t\t\tif ec.closed {\n\t\t\t\tbreak\n\t\t\t}\n\n\t\t\tcf := <-ec.failedCh\n\t\t\tec.error(cf.destinationId, cf.eventId, cf.error)\n\t\t}\n\t})\n}", "func (b *GroupsEditBuilder) EventStartDate(v int) *GroupsEditBuilder {\n\tb.Params[\"event_start_date\"] = v\n\treturn b\n}", "func (q *eventQ) Start() error {\n\tif err := q.GoStart(); err != nil {\n\t\treturn err\n\t}\n\n\tselect {\n\tcase err := <-q.shutdownC:\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}", "func (s *RuleSummary) SetEventSourceName(v string) *RuleSummary {\n\ts.EventSourceName = &v\n\treturn s\n}", "func WithSource(ctx context.Context, s EventSource) context.Context {\n\treturn context.WithValue(ctx, sourceKey{}, s)\n}", "func (r *CheckpointStore) Start() error {\n\tif err := utils.LifecycleStart(r.log(), r.pubsub); err != nil {\n\t\treturn err\n\t}\n\n\tif err := r.pubsub.Subscribe(r.config.Topic); err != nil {\n\t\treturn err\n\t}\n\n\tstartupDone := make(chan struct{})\n\tstartTime := time.Now()\n\n\tgo r.consumerLoop(startupDone)\n\n\t<-startupDone\n\tcheckpointStartTimeTimer.UpdateSince(startTime)\n\n\treturn nil\n}", "func (s *ListRulesInput) SetEventSourceName(v string) *ListRulesInput {\n\ts.EventSourceName = &v\n\treturn s\n}", "func NewSource(sleep int, ch chan Msg) *Source {\n\tvar src Source\n\tsrc.Sleep = sleep\n\tsrc.SendChan = ch\n\treturn &src\n}", "func (eventNotifications *EventNotificationsV1) CreateSourcesWithContext(ctx context.Context, createSourcesOptions *CreateSourcesOptions) (result *SourceResponse, response *core.DetailedResponse, err error) {\n\terr = core.ValidateNotNil(createSourcesOptions, \"createSourcesOptions cannot be nil\")\n\tif err != nil {\n\t\treturn\n\t}\n\terr = core.ValidateStruct(createSourcesOptions, \"createSourcesOptions\")\n\tif err != nil {\n\t\treturn\n\t}\n\n\tpathParamsMap := map[string]string{\n\t\t\"instance_id\": *createSourcesOptions.InstanceID,\n\t}\n\n\tbuilder := core.NewRequestBuilder(core.POST)\n\tbuilder = builder.WithContext(ctx)\n\tbuilder.EnableGzipCompression = eventNotifications.GetEnableGzipCompression()\n\t_, err = builder.ResolveRequestURL(eventNotifications.Service.Options.URL, `/v1/instances/{instance_id}/sources`, pathParamsMap)\n\tif err != nil {\n\t\treturn\n\t}\n\n\tfor headerName, headerValue := range createSourcesOptions.Headers {\n\t\tbuilder.AddHeader(headerName, headerValue)\n\t}\n\n\tsdkHeaders := common.GetSdkHeaders(\"event_notifications\", \"V1\", \"CreateSources\")\n\tfor headerName, headerValue := range sdkHeaders {\n\t\tbuilder.AddHeader(headerName, headerValue)\n\t}\n\tbuilder.AddHeader(\"Accept\", \"application/json\")\n\tbuilder.AddHeader(\"Content-Type\", \"application/json\")\n\n\tbody := make(map[string]interface{})\n\tif createSourcesOptions.Name != nil {\n\t\tbody[\"name\"] = createSourcesOptions.Name\n\t}\n\tif createSourcesOptions.Description != nil {\n\t\tbody[\"description\"] = createSourcesOptions.Description\n\t}\n\tif createSourcesOptions.Enabled != nil {\n\t\tbody[\"enabled\"] = createSourcesOptions.Enabled\n\t}\n\t_, err = builder.SetBodyContentJSON(body)\n\tif err != nil {\n\t\treturn\n\t}\n\n\trequest, err := builder.Build()\n\tif err != nil {\n\t\treturn\n\t}\n\n\tvar rawResponse map[string]json.RawMessage\n\tresponse, err = eventNotifications.Service.Request(request, &rawResponse)\n\tif err != nil {\n\t\treturn\n\t}\n\tif rawResponse != nil {\n\t\terr = core.UnmarshalModel(rawResponse, \"\", &result, UnmarshalSourceResponse)\n\t\tif err != nil {\n\t\t\treturn\n\t\t}\n\t\tresponse.Result = result\n\t}\n\n\treturn\n}", "func (em *EventMgr) DNDStartEvent(e *mouse.DragEvent) {\n\tde := dnd.Event{EventBase: e.EventBase, Where: e.Where, Modifiers: e.Modifiers}\n\tde.Processed = false\n\tde.Action = dnd.Start\n\tde.DefaultMod() // based on current key modifiers\n\tem.SendEventSignal(&de, false) // popup = false: ignore any popups\n\t// now up to receiver to call StartDragNDrop if they want to..\n}", "func (swp *SourceWorkerPool) Start(ctx CnvContext) error {\n\tif swp.Mode == WorkerModeTransaction {\n\t\treturn swp.startTransactionMode(ctx)\n\t} else if swp.Mode == WorkerModeLoop {\n\t\treturn swp.startLoopMode(ctx)\n\t} else {\n\t\treturn ErrInvalidWorkerMode\n\t}\n}", "func (txn TxnProbe) SetStartTS(ts uint64) {\n\ttxn.startTS = ts\n}", "func (fes *FrontEndService) Start(ctx context.Context) <-chan error {\n\tlogrus.Infof(\"FrontEndService: Start\")\n\terrCh := make(chan error, 1)\n\tgo fes.start(ctx, errCh)\n\treturn errCh\n}", "func (s *OutputSendToDeviceEventConsumer) Start() error {\n\treturn jetstream.JetStreamConsumer(\n\t\ts.ctx, s.jetstream, s.topic, s.durable, s.onMessage,\n\t\tnats.DeliverAll(), nats.ManualAck(),\n\t)\n}", "func (r *RecordStream) Start() {\n\tif r.state == idle {\n\t\tr.err = nil\n\t\tr.c.c.Request(&proto.FlushRecordStream{StreamIndex: r.index}, nil)\n\t\tr.c.c.Request(&proto.CorkRecordStream{StreamIndex: r.index, Corked: false}, nil)\n\t\tr.state = running\n\t}\n}", "func (c *stcprClient) Start() error {\n\tif c.connListener != nil {\n\t\treturn ErrAlreadyListening\n\t}\n\tgo c.serve()\n\treturn nil\n}", "func (s *ListEventActionsInput) SetEventSourceId(v string) *ListEventActionsInput {\n\ts.EventSourceId = &v\n\treturn s\n}", "func (d *DataPointCollector) Start(output chan<- []pipeline.DataPoint) error {\n\n\tlistener, err := server.OpenListener(d.addr)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tgo func() {\n\t\tfor {\n\t\t\tconn, err := listener.Accept()\n\t\t\tif err != nil {\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\td.clientServer = publisherClientServer{\n\t\t\t\toutput: output,\n\t\t\t\tlistener: listener,\n\t\t\t}\n\n\t\t\tserver := rpc.NewServer()\n\t\t\tserver.RegisterName(\"PublisherClient\", &d.clientServer)\n\n\t\t\tcodec := jsonrpc.NewServerCodec(conn)\n\n\t\t\tgo server.ServeCodec(codec)\n\t\t}\n\t}()\n\n\treturn nil\n}", "func (s *streamStrategy) Start() {\n\tgo func() {\n\t\tfor msg := range s.inputChan {\n\t\t\tif msg.Origin != nil {\n\t\t\t\tmsg.Origin.LogSource.LatencyStats.Add(msg.GetLatency())\n\t\t\t}\n\t\t\ts.outputChan <- &message.Payload{Messages: []*message.Message{msg}, Encoded: msg.Content, UnencodedSize: len(msg.Content)}\n\t\t}\n\t\ts.done <- struct{}{}\n\t}()\n}", "func KeyboardEventSource() (*EventSource, error) {\n\tsource := C.al_get_keyboard_event_source()\n\tif source == nil {\n\t\treturn nil, errors.New(\"failed to get keyboard event source; did you call InstallKeyboard() first?\")\n\t}\n\treturn (*EventSource)(source), nil\n}", "func (t *Tracer) ConnectStart(network, addr string) {\n\t// If using dual-stack dialing, it's possible to get this\n\t// multiple times, so the atomic compareAndSwap ensures\n\t// that only the first call's time is recorded\n\tatomic.CompareAndSwapInt64(&t.connectStart, 0, now())\n}", "func newSource(opts ...SourceOption) *sourcesv1alpha1.HTTPSource {\n\tsrc := &sourcesv1alpha1.HTTPSource{\n\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\tNamespace: tNs,\n\t\t\tName: tName,\n\t\t\tUID: tUID,\n\t\t},\n\t\tSpec: sourcesv1alpha1.HTTPSourceSpec{\n\t\t\tSource: tSource,\n\t\t},\n\t}\n\n\tsrc.Status.InitializeConditions()\n\n\tfor _, opt := range opts {\n\t\topt(src)\n\t}\n\n\treturn src\n}", "func SmokeCloudSchedulerSourceSetup(t *testing.T, authConfig lib.AuthConfig) {\n\tclient := lib.Setup(t, true, authConfig.WorkloadIdentity)\n\tdefer lib.TearDown(client)\n\n\tsName := \"scheduler-test\"\n\n\tscheduler := kngcptesting.NewCloudSchedulerSource(sName, client.Namespace,\n\t\tkngcptesting.WithCloudSchedulerSourceLocation(\"us-central1\"),\n\t\tkngcptesting.WithCloudSchedulerSourceData(\"my test data\"),\n\t\tkngcptesting.WithCloudSchedulerSourceSchedule(\"* * * * *\"),\n\t\tkngcptesting.WithCloudSchedulerSourceSink(lib.ServiceGVK, \"event-display\"),\n\t\tkngcptesting.WithCloudSchedulerSourceGCPServiceAccount(authConfig.PubsubServiceAccount),\n\t)\n\n\tclient.CreateSchedulerOrFail(scheduler)\n\tclient.Core.WaitForResourceReadyOrFail(sName, lib.CloudSchedulerSourceTypeMeta)\n}", "func (p *Pingee) Start() (err error) {\n\terr = p.rc.Subscribe(p.pingChannel)\n\tif err != nil {\n\t\tlog.Error(\"Pingee \", p.name, \" failed to subscribe to \", p.pingChannel)\n\t\tlog.Error(err)\n\t\treturn err\n\t}\n\t// Listen for subscribed pings. Provide event handler method.\n\tgo func() {\n\t\t_ = p.rc.Listen(p.pingHandler)\n\t}()\n\tp.isStarted = true\n\treturn nil\n}", "func main() {\n\tsig := make(chan os.Signal, 1)\n\tsignal.Notify(sig, os.Interrupt, os.Kill)\n\n\te := events.NewStream(1000, 10)\n\tSource.Load(e)\n\n\tSource.Start()\n\n\tdefer Source.Stop()\n\n\t<-sig\n}", "func (p *proxyImpl) Start() error {\n\tif p.sourceListener != nil {\n\t\treturn errors.New(\"proxy is already started\")\n\t}\n\tvar err error\n\tp.sourceListener, err = net.ListenUnix(p.network, p.source)\n\tif err != nil {\n\t\tlogrus.Errorf(\"can't listen unix %v\", err)\n\t\treturn err\n\t}\n\tlogrus.Info(\"Listening source socket...\")\n\n\tp.stopCh = make(chan struct{}, 1)\n\tp.errCh = make(chan error, 1)\n\n\tgo func() {\n\t\tp.errCh <- p.proxy()\n\t\tif p.listener != nil {\n\t\t\tp.listener.OnStopped()\n\t\t}\n\t}()\n\treturn nil\n}", "func (r *RollDPoS) Start(ctx context.Context) error {\n\tif err := r.ctx.Start(ctx); err != nil {\n\t\treturn errors.Wrap(err, \"error when starting the roll dpos context\")\n\t}\n\tif err := r.cfsm.Start(ctx); err != nil {\n\t\treturn errors.Wrap(err, \"error when starting the consensus FSM\")\n\t}\n\tif _, err := r.cfsm.BackToPrepare(r.startDelay); err != nil {\n\t\treturn err\n\t}\n\tclose(r.ready)\n\treturn nil\n}", "func (s *BasevhdlListener) EnterSource_aspect(ctx *Source_aspectContext) {}", "func (c *Client) CreatePartnerEventSource(ctx context.Context, params *CreatePartnerEventSourceInput, optFns ...func(*Options)) (*CreatePartnerEventSourceOutput, error) {\n\tif params == nil {\n\t\tparams = &CreatePartnerEventSourceInput{}\n\t}\n\n\tresult, metadata, err := c.invokeOperation(ctx, \"CreatePartnerEventSource\", params, optFns, c.addOperationCreatePartnerEventSourceMiddlewares)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tout := result.(*CreatePartnerEventSourceOutput)\n\tout.ResultMetadata = metadata\n\treturn out, nil\n}", "func StartTrace(traceDir, identifier string) error {\n\t// Lock the trace lock so that only one profiler is running at a\n\t// time.\n\ttraceLock.Lock()\n\tif traceActive {\n\t\ttraceLock.Unlock()\n\t\treturn errors.New(\"cannot start trace, it is already running\")\n\t}\n\ttraceActive = true\n\ttraceLock.Unlock()\n\n\t// Start trace into the trace dir, using the identifer. The timestamp\n\t// of the start time of the trace will be included in the filename.\n\ttraceFile, err := os.Create(filepath.Join(traceDir, \"trace-\"+identifier+\"-\"+time.Now().Format(time.RFC3339Nano)+\".trace\"))\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn trace.Start(traceFile)\n}", "func (s *TrackerSuite) TestStartNewEvent() {\n\n\tevent := s.service.StartNew()\n\tassert.NotEqual(s.T(), nil, event)\n}", "func (e *ElkTimeseriesForwarder) start() {\n\n\tlog.L.Infof(\"Starting event forwarder for %v\", e.index())\n\tticker := time.NewTicker(e.interval)\n\n\tfor {\n\t\tselect {\n\t\tcase <-ticker.C:\n\t\t\t//send it off\n\t\t\tlog.L.Debugf(\"Sending bulk ELK update for %v\", e.index())\n\n\t\t\tgo forward(e.index(), e.url, e.buffer)\n\t\t\te.buffer = []ElkBulkUpdateItem{}\n\n\t\tcase event := <-e.incomingChannel:\n\t\t\te.bufferevent(event)\n\t\t}\n\t}\n}" ]
[ "0.7883973", "0.77757585", "0.77590185", "0.7744084", "0.7744084", "0.7744084", "0.71178484", "0.610572", "0.5880223", "0.5854096", "0.58206666", "0.57460594", "0.5692862", "0.5670011", "0.55274093", "0.5522732", "0.54867756", "0.54613596", "0.5384841", "0.53692126", "0.5346081", "0.5336875", "0.5327365", "0.5316179", "0.530865", "0.5276327", "0.52706397", "0.5217062", "0.51732355", "0.5162359", "0.5150749", "0.5149472", "0.51490694", "0.5142013", "0.514092", "0.5136625", "0.51304084", "0.51094127", "0.5093762", "0.5089427", "0.5082299", "0.5077931", "0.50732464", "0.50725365", "0.50639653", "0.5027127", "0.5022543", "0.50199723", "0.5016363", "0.50064665", "0.5003707", "0.50020605", "0.49958318", "0.49808818", "0.4976204", "0.49661028", "0.4965875", "0.4933577", "0.4932289", "0.492855", "0.49188522", "0.4911714", "0.4905449", "0.4895845", "0.48891324", "0.48846146", "0.4876951", "0.48721933", "0.48694736", "0.4867075", "0.48630044", "0.48559538", "0.4852456", "0.48516095", "0.48513958", "0.48480618", "0.4835243", "0.48328424", "0.4830066", "0.4825726", "0.4808744", "0.48079765", "0.48004013", "0.47939107", "0.47925714", "0.47894186", "0.47778043", "0.47746432", "0.4774128", "0.47704652", "0.47655305", "0.4758588", "0.47565132", "0.47553617", "0.47539526", "0.47434467", "0.4735282", "0.47317415", "0.47305173", "0.47301573" ]
0.7849933
1
FetchOrganization provides a mock function with given fields: filters
func (_m *OrganizationFetcher) FetchOrganization(filters []services.QueryFilter) (models.Organization, error) { ret := _m.Called(filters) var r0 models.Organization if rf, ok := ret.Get(0).(func([]services.QueryFilter) models.Organization); ok { r0 = rf(filters) } else { r0 = ret.Get(0).(models.Organization) } var r1 error if rf, ok := ret.Get(1).(func([]services.QueryFilter) error); ok { r1 = rf(filters) } else { r1 = ret.Error(1) } return r0, r1 }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func Test_OrganizationsRepo_Filter(t *testing.T) {\n\tq := query.NewQuery()\n\tresult, _, err := tc.organizationsRepo.Filter(context.Background(), q)\n\trequire.Nil(t, err, \"unexpected error\")\n\tassert.Len(t, result, len(f.Organizations), \"unexpected number of rows returned\")\n}", "func (m *MockOrganizationServiceClient) FetchOrganizationList(arg0 context.Context, arg1 *organization.Empty, arg2 ...grpc.CallOption) (*organization.OrganizationListResponse, error) {\n\tm.ctrl.T.Helper()\n\tvarargs := []interface{}{arg0, arg1}\n\tfor _, a := range arg2 {\n\t\tvarargs = append(varargs, a)\n\t}\n\tret := m.ctrl.Call(m, \"FetchOrganizationList\", varargs...)\n\tret0, _ := ret[0].(*organization.OrganizationListResponse)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}", "func (m *MockOrganizationServiceClient) FetchUserListByOrganization(arg0 context.Context, arg1 *organization.ByOrganizationRequest, arg2 ...grpc.CallOption) (*organization.UserListResponse, error) {\n\tm.ctrl.T.Helper()\n\tvarargs := []interface{}{arg0, arg1}\n\tfor _, a := range arg2 {\n\t\tvarargs = append(varargs, a)\n\t}\n\tret := m.ctrl.Call(m, \"FetchUserListByOrganization\", varargs...)\n\tret0, _ := ret[0].(*organization.UserListResponse)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}", "func (_m *ArticleRepository) Fetch(ctx context.Context, cursor string, num int64) ([]domain.domain, string, error) {\n\tret := _m.Called(ctx, cursor, num)\n\n\tvar r0 []domain.Change\n\tif rf, ok := ret.Get(0).(func(context.Context, string, int64) []domain.Change); ok {\n\t\tr0 = rf(ctx, cursor, num)\n\t} else {\n\t\tif ret.Get(0) != nil {\n\t\t\tr0 = ret.Get(0).([]domain.Change)\n\t\t}\n\t}\n\n\tvar r1 string\n\tif rf, ok := ret.Get(1).(func(context.Context, string, int64) string); ok {\n\t\tr1 = rf(ctx, cursor, num)\n\t} else {\n\t\tr1 = ret.Get(1).(string)\n\t}\n\n\tvar r2 error\n\tif rf, ok := ret.Get(2).(func(context.Context, string, int64) error); ok {\n\t\tr2 = rf(ctx, cursor, num)\n\t} else {\n\t\tr2 = ret.Error(2)\n\t}\n\n\treturn r0, r1, r2\n}", "func (_m *Usecase) Fetch(ctx context.Context, page *int, size *int,search string,trans bool,exp bool,merchantIds []string,sortBy string,promoId string) ([]*models.PromoDto, error) {\n\tret := _m.Called(ctx,page,size,search,trans,exp,merchantIds,sortBy,promoId)\n\n\tvar r0 []*models.PromoDto\n\tif rf, ok := ret.Get(0).(func(context.Context,*int,*int,string,bool,bool,[]string,string,string) []*models.PromoDto); ok {\n\t\tr0 = rf(ctx,page,size,search,trans,exp,merchantIds,sortBy,promoId)\n\t} else {\n\t\tif ret.Get(0) != nil {\n\t\t\tr0 = ret.Get(0).([]*models.PromoDto)\n\t\t}\n\t}\n\n\tvar r1 error\n\tif rf, ok := ret.Get(1).(func(context.Context,*int,*int,string,bool,bool,[]string,string,string) error); ok {\n\t\tr1 = rf(ctx,page,size,search,trans,exp,merchantIds,sortBy,promoId)\n\t} else {\n\t\tr1 = ret.Error(1)\n\t}\n\n\treturn r0, r1\n}", "func (m *MockDatabase) GetOrganizations() (*[]model.Organization, error) {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"GetOrganizations\")\n\tret0, _ := ret[0].(*[]model.Organization)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}", "func (m *Client) GetOrganizationFields(arg0 context.Context) ([]zendesk.OrganizationField, zendesk.Page, error) {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"GetOrganizationFields\", arg0)\n\tret0, _ := ret[0].([]zendesk.OrganizationField)\n\tret1, _ := ret[1].(zendesk.Page)\n\tret2, _ := ret[2].(error)\n\treturn ret0, ret1, ret2\n}", "func (m *MockDatabase) GetOrganizationByAgolaRef(organizationName string) (*model.Organization, error) {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"GetOrganizationByAgolaRef\", organizationName)\n\tret0, _ := ret[0].(*model.Organization)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}", "func (_m *Forge) Org(ctx context.Context, u *model.User, org string) (*model.Org, error) {\n\tret := _m.Called(ctx, u, org)\n\n\tvar r0 *model.Org\n\tvar r1 error\n\tif rf, ok := ret.Get(0).(func(context.Context, *model.User, string) (*model.Org, error)); ok {\n\t\treturn rf(ctx, u, org)\n\t}\n\tif rf, ok := ret.Get(0).(func(context.Context, *model.User, string) *model.Org); ok {\n\t\tr0 = rf(ctx, u, org)\n\t} else {\n\t\tif ret.Get(0) != nil {\n\t\t\tr0 = ret.Get(0).(*model.Org)\n\t\t}\n\t}\n\n\tif rf, ok := ret.Get(1).(func(context.Context, *model.User, string) error); ok {\n\t\tr1 = rf(ctx, u, org)\n\t} else {\n\t\tr1 = ret.Error(1)\n\t}\n\n\treturn r0, r1\n}", "func (_m *Repository) Fetch(ctx context.Context, limit int, offset int) ([]*models.Facilities, error) {\n\tret := _m.Called(ctx, limit, offset)\n\n\tvar r0 []*models.Facilities\n\tif rf, ok := ret.Get(0).(func(context.Context, int, int) []*models.Facilities); ok {\n\t\tr0 = rf(ctx, limit, offset)\n\t} else {\n\t\tif ret.Get(0) != nil {\n\t\t\tr0 = ret.Get(0).([]*models.Facilities)\n\t\t}\n\t}\n\n\tvar r1 error\n\tif rf, ok := ret.Get(1).(func(context.Context, int, int) error); ok {\n\t\tr1 = rf(ctx, limit, offset)\n\t} else {\n\t\tr1 = ret.Error(1)\n\t}\n\n\treturn r0, r1\n}", "func (_m *OfficialCompanyRepository) GetCompaniesList(_a0 uint, _a1 uint) ([]models.OfficialCompany, error) {\n\tret := _m.Called(_a0, _a1)\n\n\tvar r0 []models.OfficialCompany\n\tif rf, ok := ret.Get(0).(func(uint, uint) []models.OfficialCompany); ok {\n\t\tr0 = rf(_a0, _a1)\n\t} else {\n\t\tif ret.Get(0) != nil {\n\t\t\tr0 = ret.Get(0).([]models.OfficialCompany)\n\t\t}\n\t}\n\n\tvar r1 error\n\tif rf, ok := ret.Get(1).(func(uint, uint) error); ok {\n\t\tr1 = rf(_a0, _a1)\n\t} else {\n\t\tr1 = ret.Error(1)\n\t}\n\n\treturn r0, r1\n}", "func (_m *Repository) Fetch(ctx context.Context, cursor string, num int64) (res []*models.User, nextCursor string, err error) {\n\tret := _m.Called(ctx, cursor,num)\n\n\tvar r0 []*models.User\n\tif rf, ok := ret.Get(0).(func(context.Context, string,int64) []*models.User); ok {\n\t\tr0 = rf(ctx, cursor,num)\n\t} else {\n\t\tif ret.Get(0) != nil {\n\t\t\tr0 = ret.Get(0).([]*models.User)\n\t\t}\n\t}\n\n\tvar r1 string\n\tif rf, ok := ret.Get(1).(func(context.Context, string,int64) string); ok {\n\t\tr1 = rf(ctx, cursor,num)\n\t} else {\n\t\tr1 = ret.Get(1).(string)\n\t}\n\n\tvar r2 error\n\tif rf, ok := ret.Get(2).(func(context.Context, string,int64) error); ok {\n\t\tr2 = rf(ctx, cursor,num)\n\t} else {\n\t\tr2 = ret.Error(2)\n\t}\n\n\treturn r0, r1,r2\n}", "func (_m *IUserService) List(tenantId int, page common.Pagination, filters model.UserFilterList) (model.UserDtos, *common.PageResult, error) {\n\tret := _m.Called(tenantId, page, filters)\n\n\tvar r0 model.UserDtos\n\tif rf, ok := ret.Get(0).(func(int, common.Pagination, model.UserFilterList) model.UserDtos); ok {\n\t\tr0 = rf(tenantId, page, filters)\n\t} else {\n\t\tif ret.Get(0) != nil {\n\t\t\tr0 = ret.Get(0).(model.UserDtos)\n\t\t}\n\t}\n\n\tvar r1 *common.PageResult\n\tif rf, ok := ret.Get(1).(func(int, common.Pagination, model.UserFilterList) *common.PageResult); ok {\n\t\tr1 = rf(tenantId, page, filters)\n\t} else {\n\t\tif ret.Get(1) != nil {\n\t\t\tr1 = ret.Get(1).(*common.PageResult)\n\t\t}\n\t}\n\n\tvar r2 error\n\tif rf, ok := ret.Get(2).(func(int, common.Pagination, model.UserFilterList) error); ok {\n\t\tr2 = rf(tenantId, page, filters)\n\t} else {\n\t\tr2 = ret.Error(2)\n\t}\n\n\treturn r0, r1, r2\n}", "func (_m *API) SearchPeople(query string, page int) ([]model.PeopleSearch, int, error) {\n\tret := _m.Called(query, page)\n\n\tvar r0 []model.PeopleSearch\n\tif rf, ok := ret.Get(0).(func(string, int) []model.PeopleSearch); ok {\n\t\tr0 = rf(query, page)\n\t} else {\n\t\tif ret.Get(0) != nil {\n\t\t\tr0 = ret.Get(0).([]model.PeopleSearch)\n\t\t}\n\t}\n\n\tvar r1 int\n\tif rf, ok := ret.Get(1).(func(string, int) int); ok {\n\t\tr1 = rf(query, page)\n\t} else {\n\t\tr1 = ret.Get(1).(int)\n\t}\n\n\tvar r2 error\n\tif rf, ok := ret.Get(2).(func(string, int) error); ok {\n\t\tr2 = rf(query, page)\n\t} else {\n\t\tr2 = ret.Error(2)\n\t}\n\n\treturn r0, r1, r2\n}", "func (m *MockDatabase) GetOrganizationsByGitSource(gitSource string) (*[]model.Organization, error) {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"GetOrganizationsByGitSource\", gitSource)\n\tret0, _ := ret[0].(*[]model.Organization)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}", "func (m *MockOrganizationLister) Organizations(arg0 *mongodbatlas.ListOptions) (*mongodbatlas.Organizations, error) {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"Organizations\", arg0)\n\tret0, _ := ret[0].(*mongodbatlas.Organizations)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}", "func (_m *UserRepositoryI) FetchAll(tx database.TransactionI, params apidatabase.UsersSelectParams) ([]*models.UserPublicInfo, error) {\n\tret := _m.Called(tx, params)\n\n\tvar r0 []*models.UserPublicInfo\n\tif rf, ok := ret.Get(0).(func(database.TransactionI, apidatabase.UsersSelectParams) []*models.UserPublicInfo); ok {\n\t\tr0 = rf(tx, params)\n\t} else {\n\t\tif ret.Get(0) != nil {\n\t\t\tr0 = ret.Get(0).([]*models.UserPublicInfo)\n\t\t}\n\t}\n\n\tvar r1 error\n\tif rf, ok := ret.Get(1).(func(database.TransactionI, apidatabase.UsersSelectParams) error); ok {\n\t\tr1 = rf(tx, params)\n\t} else {\n\t\tr1 = ret.Error(1)\n\t}\n\n\treturn r0, r1\n}", "func (m *MockOrg) Index(filters ...OrgFilter) (api.Orgs, error) {\n\tvarargs := []interface{}{}\n\tfor _, a := range filters {\n\t\tvarargs = append(varargs, a)\n\t}\n\tret := m.ctrl.Call(m, \"Index\", varargs...)\n\tret0, _ := ret[0].(api.Orgs)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}", "func (_m *Usecase) Fetch(c context.Context, page int) (*[]models.Item, error) {\n\tret := _m.Called(c, page)\n\n\tvar r0 *[]models.Item\n\tif rf, ok := ret.Get(0).(func(context.Context, int) *[]models.Item); ok {\n\t\tr0 = rf(c, page)\n\t} else {\n\t\tif ret.Get(0) != nil {\n\t\t\tr0 = ret.Get(0).(*[]models.Item)\n\t\t}\n\t}\n\n\tvar r1 error\n\tif rf, ok := ret.Get(1).(func(context.Context, int) error); ok {\n\t\tr1 = rf(c, page)\n\t} else {\n\t\tr1 = ret.Error(1)\n\t}\n\n\treturn r0, r1\n}", "func (c *Campaigner) OrganizationFind(n string) (ResponseOrganizationList, error) {\n\t// Setup.\n\tvar (\n\t\tqs = fmt.Sprintf(\"%s=%s\", url.QueryEscape(\"filters[name]\"), url.QueryEscape(n))\n\t\tu = fmt.Sprintf(\"/api/3/organizations/?%s\", qs)\n\t\tresponse ResponseOrganizationList\n\t)\n\n\t// Error check.\n\tif len(strings.TrimSpace(n)) == 0 {\n\t\treturn response, fmt.Errorf(\"organization find failed, name is empty\")\n\t}\n\n\t// Send GET request.\n\tr, body, err := c.get(u)\n\tif err != nil {\n\t\treturn response, fmt.Errorf(\"organization find failed. HTTP failure: %s\", err)\n\t}\n\n\t// Response check.\n\tswitch r.StatusCode {\n\tcase http.StatusOK:\n\t\terr = json.Unmarshal(body, &response)\n\t\tif err != nil {\n\t\t\treturn response, fmt.Errorf(\"organization list failed, JSON failure: %s\", err)\n\t\t}\n\n\t\treturn response, nil\n\t}\n\n\treturn response, fmt.Errorf(\"organization find failed, unspecified error (%d); %s\", r.StatusCode, string(body))\n}", "func orgQueryFunc(opts *github.RepositoryListByOrgOptions) func(client *github.Client, name string, page int) ([]*github.Repository, *github.Response, error) {\n\treturn func(client *github.Client, name string, page int) ([]*github.Repository, *github.Response, error) {\n\t\topts.Page = page\n\n\t\treturn client.Repositories.ListByOrg(context.Background(), name, opts)\n\t}\n}", "func (osc *Client) SearchOrganization(ctx context.Context, orgName string, websiteName string, filter string) ([]*models.Organization, error) {\n\tf := logrus.Fields{\n\t\t\"functionName\": \"organization_service.SearchOrganization\",\n\t\tutils.XREQUESTID: ctx.Value(utils.XREQUESTID),\n\t\t\"orgName\": orgName,\n\t\t\"websiteName\": websiteName,\n\t\t\"filter\": filter,\n\t}\n\ttok, err := token.GetToken()\n\tif err != nil {\n\t\tlog.WithFields(f).WithError(err).Warn(\"unable to fetch token\")\n\t\treturn nil, err\n\t}\n\tvar offset int64\n\tvar pageSize int64 = 1000\n\tclientAuth := runtimeClient.BearerToken(tok)\n\tvar orgs []*models.Organization\n\tfor {\n\t\tparams := &organizations.SearchOrgParams{\n\t\t\tName: aws.String(orgName),\n\t\t\tWebsite: aws.StringValueSlice([]*string{&websiteName}),\n\t\t\tDollarFilter: aws.String(filter),\n\t\t\tOffset: aws.String(strconv.FormatInt(offset, 10)),\n\t\t\tPageSize: aws.String(strconv.FormatInt(pageSize, 10)),\n\t\t\tContext: context.TODO(),\n\t\t}\n\t\tresult, err := osc.cl.Organizations.SearchOrg(params, clientAuth)\n\t\tif err != nil {\n\t\t\tlog.WithFields(f).WithError(err).Warnf(\"unable to search organization with params: %+v\", params)\n\t\t\treturn nil, err\n\t\t}\n\t\torgs = append(orgs, result.Payload.Data...)\n\t\tif result.Payload.Metadata.TotalSize > offset+pageSize {\n\t\t\toffset += pageSize\n\t\t} else {\n\t\t\tbreak\n\t\t}\n\t}\n\treturn orgs, nil\n}", "func (m *MockCompany) Get(arg0 ...repository.Filter) ([]domain.Company, error) {\n\tm.ctrl.T.Helper()\n\tvarargs := []interface{}{}\n\tfor _, a := range arg0 {\n\t\tvarargs = append(varargs, a)\n\t}\n\tret := m.ctrl.Call(m, \"Get\", varargs...)\n\tret0, _ := ret[0].([]domain.Company)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}", "func (_m *OfficialCompanyRepository) SearchCompanies(_a0 models.CompanySearchParams) ([]models.OfficialCompany, error) {\n\tret := _m.Called(_a0)\n\n\tvar r0 []models.OfficialCompany\n\tif rf, ok := ret.Get(0).(func(models.CompanySearchParams) []models.OfficialCompany); ok {\n\t\tr0 = rf(_a0)\n\t} else {\n\t\tif ret.Get(0) != nil {\n\t\t\tr0 = ret.Get(0).([]models.OfficialCompany)\n\t\t}\n\t}\n\n\tvar r1 error\n\tif rf, ok := ret.Get(1).(func(models.CompanySearchParams) error); ok {\n\t\tr1 = rf(_a0)\n\t} else {\n\t\tr1 = ret.Error(1)\n\t}\n\n\treturn r0, r1\n}", "func (_m *Usecase) List(ctx context.Context,page, limit, offset int, search string,token string,trans bool,exp bool,merchantIds []string)(*models.PromoWithPagination,error) {\n\tret := _m.Called(ctx, page, limit, offset,search,token,trans,exp,merchantIds)\n\n\tvar r0 *models.PromoWithPagination\n\tif rf, ok := ret.Get(0).(func(context.Context, int, int, int,string,string,bool,bool,[]string) *models.PromoWithPagination); ok {\n\t\tr0 = rf(ctx, page, limit, offset,search,token,trans,exp,merchantIds)\n\t} else {\n\t\tif ret.Get(0) != nil {\n\t\t\tr0 = ret.Get(0).(*models.PromoWithPagination)\n\t\t}\n\t}\n\n\tvar r1 error\n\tif rf, ok := ret.Get(1).(func(context.Context, int, int, int,string,string,bool,bool,[]string) error); ok {\n\t\tr1 = rf(ctx, page, limit, offset,search,token,trans,exp,merchantIds)\n\t} else {\n\t\tr1 = ret.Error(1)\n\t}\n\n\treturn r0, r1\n}", "func GetOrganization(clients *common.ClientContainer, handler common.HandlerInterface) http.HandlerFunc {\n\treturn func(w http.ResponseWriter, r *http.Request) {\n\t\torglist, err := handler.GetOrganizations(clients)\n\t\tif err != nil {\n\t\t\tlog.Logger.Error(err)\n\t\t\tcommon.WriteErrorToResponse(w, http.StatusInternalServerError,\n\t\t\t\thttp.StatusText(http.StatusInternalServerError),\n\t\t\t\t\"Internal server error occured\")\n\t\t\treturn\n\t\t}\n\t\tw.Write(orglist)\n\t}\n}", "func (m *MockOrganizationDescriber) Organization(arg0 string) (*mongodbatlas.Organization, error) {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"Organization\", arg0)\n\tret0, _ := ret[0].(*mongodbatlas.Organization)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}", "func (_m *Client) Search(ctx context.Context, tenantId string, searchParams model.SearchParams) ([]model.InvDevice, int, error) {\n\tret := _m.Called(ctx, tenantId, searchParams)\n\n\tvar r0 []model.InvDevice\n\tif rf, ok := ret.Get(0).(func(context.Context, string, model.SearchParams) []model.InvDevice); ok {\n\t\tr0 = rf(ctx, tenantId, searchParams)\n\t} else {\n\t\tif ret.Get(0) != nil {\n\t\t\tr0 = ret.Get(0).([]model.InvDevice)\n\t\t}\n\t}\n\n\tvar r1 int\n\tif rf, ok := ret.Get(1).(func(context.Context, string, model.SearchParams) int); ok {\n\t\tr1 = rf(ctx, tenantId, searchParams)\n\t} else {\n\t\tr1 = ret.Get(1).(int)\n\t}\n\n\tvar r2 error\n\tif rf, ok := ret.Get(2).(func(context.Context, string, model.SearchParams) error); ok {\n\t\tr2 = rf(ctx, tenantId, searchParams)\n\t} else {\n\t\tr2 = ret.Error(2)\n\t}\n\n\treturn r0, r1, r2\n}", "func TestHandleGet(t *testing.T) {\n\tc, err := neuron.NewController(config.Default())\n\trequire.NoError(t, err)\n\n\terr = c.RegisterRepository(\"mock\", &config.Repository{DriverName: mocks.DriverName})\n\trequire.NoError(t, err)\n\n\terr = c.RegisterModels(Human{}, House{}, Car{}, HookChecker{})\n\trequire.NoError(t, err)\n\n\tt.Run(\"Valid\", func(t *testing.T) {\n\t\tt.Run(\"Fieldset\", func(t *testing.T) {\n\t\t\th := NewC(c)\n\n\t\t\treq, err := http.NewRequest(\"GET\", \"/houses/1?fields[houses]=address\", nil)\n\t\t\trequire.NoError(t, err)\n\n\t\t\treq.Header.Add(\"Accept\", jsonapi.MediaType)\n\t\t\treq.Header.Add(\"Accept-Encoding\", \"identity\")\n\t\t\treq = req.WithContext(context.WithValue(context.Background(), IDKey, \"1\"))\n\n\t\t\trepo, err := c.GetRepository(House{})\n\t\t\trequire.NoError(t, err)\n\n\t\t\thousesRepo, ok := repo.(*mocks.Repository)\n\t\t\trequire.True(t, ok)\n\n\t\t\thousesRepo.On(\"Get\", mock.Anything, mock.Anything).Once().Run(func(args mock.Arguments) {\n\t\t\t\ts, ok := args[1].(*query.Scope)\n\t\t\t\trequire.True(t, ok)\n\n\t\t\t\tprimaryFilters := s.PrimaryFilters\n\t\t\t\tif assert.Len(t, primaryFilters, 1) {\n\t\t\t\t\tfilter := primaryFilters[0]\n\t\t\t\t\tif assert.Len(t, filter.Values, 1) {\n\t\t\t\t\t\tv := filter.Values[0]\n\t\t\t\t\t\tassert.Equal(t, query.OpEqual, v.Operator)\n\t\t\t\t\t\tif assert.Len(t, v.Values, 1) {\n\t\t\t\t\t\t\tassert.Equal(t, 1, v.Values[0])\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t\tassert.Len(t, s.Fieldset, 2)\n\t\t\t\tassert.Contains(t, s.Fieldset, s.Struct().Primary().NeuronName())\n\t\t\t\taddressField, ok := s.Struct().Attribute(\"address\")\n\t\t\t\trequire.True(t, ok)\n\t\t\t\tassert.Contains(t, s.Fieldset, addressField.NeuronName())\n\n\t\t\t\tv, ok := s.Value.(*House)\n\t\t\t\trequire.True(t, ok)\n\n\t\t\t\tv.ID = 1\n\t\t\t\tv.Address = \"Main Rd 52\"\n\t\t\t}).Return(nil)\n\n\t\t\tresp := httptest.NewRecorder()\n\t\t\th.Get(House{}).ServeHTTP(resp, req)\n\n\t\t\t// the status should be 200.\n\t\t\trequire.Equal(t, http.StatusOK, resp.Code)\n\n\t\t\tif assert.Equal(t, jsonapi.MediaType, resp.Header().Get(\"Content-Type\")) {\n\t\t\t\thouse := &House{}\n\t\t\t\terr = jsonapi.UnmarshalC(c, resp.Body, house)\n\t\t\t\trequire.NoError(t, err)\n\n\t\t\t\tassert.Equal(t, 1, house.ID)\n\t\t\t\tassert.Equal(t, \"Main Rd 52\", house.Address)\n\t\t\t\tassert.Nil(t, house.Owner)\n\t\t\t}\n\t\t})\n\n\t\tt.Run(\"Include\", func(t *testing.T) {\n\t\t\th := NewC(c)\n\n\t\t\treq, err := http.NewRequest(\"GET\", \"/houses/1?include=owner\", nil)\n\t\t\trequire.NoError(t, err)\n\n\t\t\treq.Header.Add(\"Accept\", jsonapi.MediaType)\n\t\t\treq.Header.Add(\"Accept-Encoding\", \"identity\")\n\t\t\treq = req.WithContext(context.WithValue(context.Background(), IDKey, \"1\"))\n\n\t\t\trepo, err := c.GetRepository(House{})\n\t\t\trequire.NoError(t, err)\n\n\t\t\thousesRepo, ok := repo.(*mocks.Repository)\n\t\t\trequire.True(t, ok)\n\n\t\t\thousesRepo.On(\"Get\", mock.Anything, mock.Anything).Once().Run(func(args mock.Arguments) {\n\t\t\t\ts, ok := args[1].(*query.Scope)\n\t\t\t\trequire.True(t, ok)\n\n\t\t\t\tprimaryFilters := s.PrimaryFilters\n\t\t\t\tif assert.Len(t, primaryFilters, 1) {\n\t\t\t\t\tfilter := primaryFilters[0]\n\t\t\t\t\tif assert.Len(t, filter.Values, 1) {\n\t\t\t\t\t\tv := filter.Values[0]\n\t\t\t\t\t\tassert.Equal(t, query.OpEqual, v.Operator)\n\t\t\t\t\t\tif assert.Len(t, v.Values, 1) {\n\t\t\t\t\t\t\tassert.Equal(t, 1, v.Values[0])\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t\tassert.Len(t, s.Fieldset, 4)\n\n\t\t\t\tassert.Contains(t, s.Fieldset, s.Struct().Primary().NeuronName())\n\t\t\t\taddressField, ok := s.Struct().Attribute(\"address\")\n\t\t\t\trequire.True(t, ok)\n\t\t\t\tassert.Contains(t, s.Fieldset, addressField.NeuronName())\n\n\t\t\t\tv, ok := s.Value.(*House)\n\t\t\t\trequire.True(t, ok)\n\n\t\t\t\tv.ID = 1\n\t\t\t\tv.Address = \"Main Rd 52\"\n\t\t\t\tv.OwnerID = 4\n\t\t\t}).Return(nil)\n\n\t\t\trepo, err = c.GetRepository(Human{})\n\t\t\trequire.NoError(t, err)\n\n\t\t\thumanRepo, ok := repo.(*mocks.Repository)\n\t\t\trequire.True(t, ok)\n\n\t\t\tfor i := 0; i < 2; i++ {\n\t\t\t\thumanRepo.On(\"List\", mock.Anything, mock.Anything).Once().Run(func(args mock.Arguments) {\n\t\t\t\t\ts, ok := args[1].(*query.Scope)\n\t\t\t\t\trequire.True(t, ok)\n\n\t\t\t\t\tv := s.Value.(*[]*Human)\n\n\t\t\t\t\t*v = append(*v, &Human{ID: 4, Name: \"Elisabeth\", Age: 88})\n\t\t\t\t}).Return(nil)\n\t\t\t}\n\n\t\t\t// list elisabeth houses\n\t\t\thousesRepo.On(\"List\", mock.Anything, mock.Anything).Once().Run(func(args mock.Arguments) {\n\t\t\t\ts, ok := args[1].(*query.Scope)\n\t\t\t\trequire.True(t, ok)\n\n\t\t\t\tv, ok := s.Value.(*[]*House)\n\t\t\t\trequire.True(t, ok)\n\n\t\t\t\t*v = append(*v, &House{ID: 1}, &House{ID: 5})\n\t\t\t}).Return(nil)\n\n\t\t\tresp := httptest.NewRecorder()\n\t\t\th.Get(House{}).ServeHTTP(resp, req)\n\n\t\t\t// the status should be 200.\n\t\t\trequire.Equal(t, http.StatusOK, resp.Code)\n\n\t\t\tif assert.Equal(t, jsonapi.MediaType, resp.Header().Get(\"Content-Type\")) {\n\t\t\t\tbuf := &bytes.Buffer{}\n\n\t\t\t\ttee := io.TeeReader(resp.Body, buf)\n\t\t\t\thouse := &House{}\n\t\t\t\ts, err := jsonapi.UnmarshalSingleScopeC(c, tee, house)\n\t\t\t\trequire.NoError(t, err)\n\n\t\t\t\tassert.Equal(t, 1, house.ID)\n\t\t\t\tassert.Equal(t, \"Main Rd 52\", house.Address)\n\t\t\t\tif assert.NotNil(t, house.Owner) {\n\t\t\t\t\tassert.Equal(t, 4, house.Owner.ID)\n\t\t\t\t}\n\n\t\t\t\tinput := buf.String()\n\t\t\t\tassert.True(t, strings.Contains(input, \"include\"), input)\n\n\t\t\t\t// Unmarshal includes should be fixed in neuron-core#22\n\t\t\t\tt.Skipf(\"Waiting for NeuronCore#22\")\n\n\t\t\t\thumanValues, err := s.IncludedModelValues(&Human{})\n\t\t\t\trequire.NoError(t, err)\n\n\t\t\t\tif assert.Len(t, humanValues, 1) {\n\t\t\t\t\tv, ok := humanValues[house.Owner.ID]\n\t\t\t\t\trequire.True(t, ok)\n\n\t\t\t\t\thuman, ok := v.(*Human)\n\t\t\t\t\trequire.True(t, ok)\n\n\t\t\t\t\tassert.Equal(t, \"Elisabeth\", human.Name)\n\t\t\t\t}\n\t\t\t}\n\t\t})\n\n\t\th := NewC(c)\n\n\t\treq, err := http.NewRequest(\"GET\", \"/houses/1\", nil)\n\t\trequire.NoError(t, err)\n\n\t\treq.Header.Add(\"Accept\", jsonapi.MediaType)\n\t\treq.Header.Add(\"Accept-Encoding\", \"identity\")\n\t\treq = req.WithContext(context.WithValue(context.Background(), IDKey, \"1\"))\n\n\t\trepo, err := c.GetRepository(House{})\n\t\trequire.NoError(t, err)\n\n\t\thousesRepo, ok := repo.(*mocks.Repository)\n\t\trequire.True(t, ok)\n\n\t\thousesRepo.On(\"Get\", mock.Anything, mock.Anything).Once().Run(func(args mock.Arguments) {\n\t\t\ts, ok := args[1].(*query.Scope)\n\t\t\trequire.True(t, ok)\n\n\t\t\tprimaryFilters := s.PrimaryFilters\n\t\t\tif assert.Len(t, primaryFilters, 1) {\n\t\t\t\tfilter := primaryFilters[0]\n\t\t\t\tif assert.Len(t, filter.Values, 1) {\n\t\t\t\t\tv := filter.Values[0]\n\t\t\t\t\tassert.Equal(t, query.OpEqual, v.Operator)\n\t\t\t\t\tif assert.Len(t, v.Values, 1) {\n\t\t\t\t\t\tassert.Equal(t, 1, v.Values[0])\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tassert.Len(t, s.Fieldset, 4)\n\n\t\t\tassert.Contains(t, s.Fieldset, s.Struct().Primary().NeuronName())\n\t\t\taddressField, ok := s.Struct().Attribute(\"address\")\n\t\t\trequire.True(t, ok)\n\t\t\tassert.Contains(t, s.Fieldset, addressField.NeuronName())\n\n\t\t\tv, ok := s.Value.(*House)\n\t\t\trequire.True(t, ok)\n\n\t\t\tv.ID = 1\n\t\t\tv.Address = \"Main Rd 52\"\n\t\t\tv.OwnerID = 4\n\t\t}).Return(nil)\n\n\t\tresp := httptest.NewRecorder()\n\t\th.Get(House{}).ServeHTTP(resp, req)\n\n\t\t// the status should be 200.\n\t\trequire.Equal(t, http.StatusOK, resp.Code)\n\n\t\tif assert.Equal(t, jsonapi.MediaType, resp.Header().Get(\"Content-Type\")) {\n\t\t\thouse := &House{}\n\t\t\terr = jsonapi.UnmarshalC(c, resp.Body, house)\n\t\t\trequire.NoError(t, err)\n\n\t\t\tassert.Equal(t, 1, house.ID)\n\t\t\tassert.Equal(t, \"Main Rd 52\", house.Address)\n\t\t\tif assert.NotNil(t, house.Owner) {\n\t\t\t\tassert.Equal(t, 4, house.Owner.ID)\n\t\t\t}\n\t\t}\n\t})\n\n\tt.Run(\"NotFound\", func(t *testing.T) {\n\t\th := NewC(c)\n\n\t\treq, err := http.NewRequest(\"GET\", \"/houses/1\", nil)\n\t\trequire.NoError(t, err)\n\n\t\treq.Header.Add(\"Accept\", jsonapi.MediaType)\n\t\treq.Header.Add(\"Accept-Encoding\", \"identity\")\n\t\treq = req.WithContext(context.WithValue(context.Background(), IDKey, \"1\"))\n\n\t\trepo, err := c.GetRepository(House{})\n\t\trequire.NoError(t, err)\n\n\t\thousesRepo, ok := repo.(*mocks.Repository)\n\t\trequire.True(t, ok)\n\n\t\thousesRepo.On(\"Get\", mock.Anything, mock.Anything).Once().Run(func(args mock.Arguments) {\n\t\t\ts, ok := args[1].(*query.Scope)\n\t\t\trequire.True(t, ok)\n\n\t\t\tprimaryFilters := s.PrimaryFilters\n\t\t\tif assert.Len(t, primaryFilters, 1) {\n\t\t\t\tfilter := primaryFilters[0]\n\t\t\t\tif assert.Len(t, filter.Values, 1) {\n\t\t\t\t\tv := filter.Values[0]\n\t\t\t\t\tassert.Equal(t, query.OpEqual, v.Operator)\n\t\t\t\t\tif assert.Len(t, v.Values, 1) {\n\t\t\t\t\t\tassert.Equal(t, 1, v.Values[0])\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}).Return(errors.New(class.QueryValueNoResult, \"not found\"))\n\n\t\tresp := httptest.NewRecorder()\n\t\th.Get(House{}).ServeHTTP(resp, req)\n\n\t\t// the status should be 200.\n\t\trequire.Equal(t, http.StatusNotFound, resp.Code)\n\n\t\tif assert.Equal(t, jsonapi.MediaType, resp.Header().Get(\"Content-Type\")) {\n\t\t\tpayload, err := jsonapi.UnmarshalErrors(resp.Body)\n\t\t\trequire.NoError(t, err)\n\n\t\t\tif assert.Len(t, payload.Errors, 1) {\n\t\t\t\tpayloadErr := payload.Errors[0]\n\t\t\t\tcode, err := strconv.ParseInt(payloadErr.Code, 16, 32)\n\t\t\t\trequire.NoError(t, err)\n\n\t\t\t\tassert.Equal(t, class.QueryValueNoResult, errors.Class(code))\n\t\t\t}\n\t\t}\n\t})\n\n\tt.Run(\"NoID\", func(t *testing.T) {\n\t\th := NewC(c)\n\n\t\treq, err := http.NewRequest(\"GET\", \"/houses/ \", nil)\n\t\trequire.NoError(t, err)\n\n\t\treq.Header.Add(\"Accept\", jsonapi.MediaType)\n\t\treq.Header.Add(\"Accept-Encoding\", \"identity\")\n\t\treq = req.WithContext(context.WithValue(context.Background(), IDKey, \"\"))\n\n\t\tresp := httptest.NewRecorder()\n\t\th.Get(House{}).ServeHTTP(resp, req)\n\n\t\trequire.Equal(t, http.StatusBadRequest, resp.Code)\n\t})\n\n\tt.Run(\"Links\", func(t *testing.T) {\n\t\tt.Run(\"Invalid\", func(t *testing.T) {\n\t\t\th := NewC(c)\n\n\t\t\treq, err := http.NewRequest(\"GET\", \"/houses/1?links=invalid\", nil)\n\t\t\trequire.NoError(t, err)\n\n\t\t\treq.Header.Add(\"Accept\", jsonapi.MediaType)\n\t\t\treq.Header.Add(\"Accept-Encoding\", \"identity\")\n\t\t\treq = req.WithContext(context.WithValue(context.Background(), IDKey, \"1\"))\n\n\t\t\tresp := httptest.NewRecorder()\n\t\t\th.Get(House{}).ServeHTTP(resp, req)\n\n\t\t\trequire.Equal(t, http.StatusBadRequest, resp.Code)\n\t\t})\n\n\t\tt.Run(\"Valid\", func(t *testing.T) {\n\t\t\th := NewC(c)\n\n\t\t\treq, err := http.NewRequest(\"GET\", \"/houses/1?links=false\", nil)\n\t\t\trequire.NoError(t, err)\n\n\t\t\treq.Header.Add(\"Accept\", jsonapi.MediaType)\n\t\t\treq.Header.Add(\"Accept-Encoding\", \"identity\")\n\t\t\treq = req.WithContext(context.WithValue(context.Background(), IDKey, \"1\"))\n\n\t\t\trepo, err := c.GetRepository(House{})\n\t\t\trequire.NoError(t, err)\n\n\t\t\thousesRepo, ok := repo.(*mocks.Repository)\n\t\t\trequire.True(t, ok)\n\n\t\t\thousesRepo.On(\"Get\", mock.Anything, mock.Anything).Once().Run(func(args mock.Arguments) {\n\t\t\t\ts, ok := args[1].(*query.Scope)\n\t\t\t\trequire.True(t, ok)\n\n\t\t\t\tv, ok := s.Value.(*House)\n\t\t\t\trequire.True(t, ok)\n\n\t\t\t\tv.ID = 1\n\t\t\t\tv.Address = \"Main Rd 52\"\n\t\t\t\tv.OwnerID = 4\n\t\t\t}).Return(nil)\n\n\t\t\tresp := httptest.NewRecorder()\n\t\t\th.Get(House{}).ServeHTTP(resp, req)\n\n\t\t\trequire.Equal(t, http.StatusOK, resp.Code)\n\n\t\t\tif assert.Equal(t, jsonapi.MediaType, resp.Header().Get(\"Content-Type\")) {\n\t\t\t\thouse := &House{}\n\t\t\t\terr = jsonapi.UnmarshalC(c, resp.Body, house)\n\t\t\t\trequire.NoError(t, err)\n\n\t\t\t\tassert.Equal(t, 1, house.ID)\n\t\t\t\tassert.Equal(t, \"Main Rd 52\", house.Address)\n\t\t\t\tif assert.NotNil(t, house.Owner) {\n\t\t\t\t\tassert.Equal(t, 4, house.Owner.ID)\n\t\t\t\t}\n\t\t\t}\n\t\t})\n\t})\n\n\tt.Run(\"InvalidQueryParameter\", func(t *testing.T) {\n\t\th := NewC(c)\n\t\th.StrictQueriesMode = true\n\n\t\treq, err := http.NewRequest(\"GET\", \"/houses/1?invalid=query\", nil)\n\t\trequire.NoError(t, err)\n\n\t\treq.Header.Add(\"Accept\", jsonapi.MediaType)\n\t\treq.Header.Add(\"Accept-Encoding\", \"identity\")\n\t\treq = req.WithContext(context.WithValue(context.Background(), IDKey, \"1\"))\n\n\t\tresp := httptest.NewRecorder()\n\t\th.Get(House{}).ServeHTTP(resp, req)\n\n\t\trequire.Equal(t, http.StatusBadRequest, resp.Code)\n\t})\n\n\tt.Run(\"MultipleQueryValues\", func(t *testing.T) {\n\t\th := NewC(c)\n\t\th.StrictQueriesMode = true\n\n\t\treq, err := http.NewRequest(\"GET\", \"/houses/1?invalid=query&invalid=parameter\", nil)\n\t\trequire.NoError(t, err)\n\n\t\treq.Header.Add(\"Accept\", jsonapi.MediaType)\n\t\treq.Header.Add(\"Accept-Encoding\", \"identity\")\n\t\treq = req.WithContext(context.WithValue(context.Background(), IDKey, \"1\"))\n\n\t\tresp := httptest.NewRecorder()\n\t\th.Get(House{}).ServeHTTP(resp, req)\n\n\t\trequire.Equal(t, http.StatusBadRequest, resp.Code)\n\t})\n\n\tt.Run(\"ErrorLimit\", func(t *testing.T) {\n\n\t\th := NewC(c)\n\t\th.StrictQueriesMode = true\n\t\th.QueryErrorsLimit = 1\n\n\t\treq, err := http.NewRequest(\"GET\", \"/houses/1?invalid=query&filter[houses][invalid][$eq]=4\", nil)\n\t\trequire.NoError(t, err)\n\n\t\treq.Header.Add(\"Accept\", jsonapi.MediaType)\n\t\treq.Header.Add(\"Accept-Encoding\", \"identity\")\n\t\treq = req.WithContext(context.WithValue(context.Background(), IDKey, \"1\"))\n\n\t\tresp := httptest.NewRecorder()\n\t\th.Get(House{}).ServeHTTP(resp, req)\n\n\t\trequire.Equal(t, http.StatusBadRequest, resp.Code)\n\t})\n\n\tt.Run(\"Hooks\", func(t *testing.T) {\n\t\th := NewC(c)\n\t\tRegisterHookC(c, HookChecker{}, BeforeGet, hookCheckerBeforeGet)\n\t\tRegisterHookC(c, HookChecker{}, AfterGet, hookCheckerAfterGet)\n\n\t\treq, err := http.NewRequest(\"GET\", \"/hook_checkers/1\", nil)\n\t\trequire.NoError(t, err)\n\n\t\treq.Header.Add(\"Accept\", jsonapi.MediaType)\n\t\treq.Header.Add(\"Accept-Encoding\", \"identity\")\n\t\treq = req.WithContext(context.WithValue(context.Background(), IDKey, \"1\"))\n\n\t\trepo, err := c.GetRepository(HookChecker{})\n\t\trequire.NoError(t, err)\n\n\t\thookCheckersRepo, ok := repo.(*mocks.Repository)\n\t\trequire.True(t, ok)\n\n\t\thookCheckersRepo.On(\"Get\", mock.Anything, mock.Anything).Once().Run(func(args mock.Arguments) {\n\t\t\ts, ok := args[1].(*query.Scope)\n\t\t\trequire.True(t, ok)\n\n\t\t\tprimaryFilters := s.PrimaryFilters\n\t\t\tif assert.Len(t, primaryFilters, 1) {\n\t\t\t\tfilter := primaryFilters[0]\n\t\t\t\tif assert.Len(t, filter.Values, 1) {\n\t\t\t\t\tv := filter.Values[0]\n\t\t\t\t\tassert.Equal(t, query.OpEqual, v.Operator)\n\t\t\t\t\tif assert.Len(t, v.Values, 1) {\n\t\t\t\t\t\tassert.Equal(t, 1, v.Values[0])\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tassert.Len(t, s.Fieldset, 4)\n\t\t\tassert.Contains(t, s.Fieldset, s.Struct().Primary().NeuronName())\n\n\t\t\tv, ok := s.Value.(*HookChecker)\n\t\t\trequire.True(t, ok)\n\n\t\t\tv.ID = 1\n\t\t}).Return(nil)\n\n\t\tresp := httptest.NewRecorder()\n\t\th.Get(HookChecker{}).ServeHTTP(resp, req)\n\n\t\t// the status should be 200.\n\t\trequire.Equal(t, http.StatusOK, resp.Code)\n\n\t\tif assert.Equal(t, jsonapi.MediaType, resp.Header().Get(\"Content-Type\")) {\n\t\t\thc := &HookChecker{}\n\t\t\terr = jsonapi.UnmarshalC(c, resp.Body, hc)\n\t\t\trequire.NoError(t, err)\n\n\t\t\tassert.Equal(t, 1, hc.ID)\n\t\t\tassert.True(t, hc.After)\n\t\t}\n\t})\n}", "func (c *Campaigner) OrganizationList(limit int, offset int) (response ResponseOrganizationList, err error) {\n\t// Setup.\n\tqs := url.Values{}\n\tqs.Set(\"limit\", strconv.Itoa(limit))\n\tqs.Set(\"offset\", strconv.Itoa(offset))\n\tu := url.URL{Path: \"/api/3/organizations\", RawQuery: qs.Encode()}\n\n\t// GET request.\n\tr, body, err := c.get(u.String())\n\tif err != nil {\n\t\treturn response, fmt.Errorf(\"organization list failed, HTTP failure: %s\", err)\n\t}\n\n\t// Success.\n\t// TODO(doc-mismatch): 200 != 201\n\tif r.StatusCode == http.StatusOK {\n\t\terr = json.Unmarshal(body, &response)\n\t\tif err != nil {\n\t\t\treturn response, fmt.Errorf(\"organization list failed, JSON failure: %s\", err)\n\t\t}\n\n\t\treturn response, nil\n\t}\n\n\t// Failure (API docs are not clear about errors here).\n\treturn response, fmt.Errorf(\"organization list failed, unspecified error (%d): %s\", r.StatusCode, string(body))\n}", "func (_m *API) GetPeopleArticle(id int) ([]model.ArticleItem, int, error) {\n\tret := _m.Called(id)\n\n\tvar r0 []model.ArticleItem\n\tif rf, ok := ret.Get(0).(func(int) []model.ArticleItem); ok {\n\t\tr0 = rf(id)\n\t} else {\n\t\tif ret.Get(0) != nil {\n\t\t\tr0 = ret.Get(0).([]model.ArticleItem)\n\t\t}\n\t}\n\n\tvar r1 int\n\tif rf, ok := ret.Get(1).(func(int) int); ok {\n\t\tr1 = rf(id)\n\t} else {\n\t\tr1 = ret.Get(1).(int)\n\t}\n\n\tvar r2 error\n\tif rf, ok := ret.Get(2).(func(int) error); ok {\n\t\tr2 = rf(id)\n\t} else {\n\t\tr2 = ret.Error(2)\n\t}\n\n\treturn r0, r1, r2\n}", "func (_m *Repository) Fetch(ctx context.Context, limit int, offset int) ([]*models.Host, error) {\n\tret := _m.Called(ctx, limit, offset)\n\n\tvar r0 []*models.Host\n\tif rf, ok := ret.Get(0).(func(context.Context, int, int) []*models.Host); ok {\n\t\tr0 = rf(ctx, limit, offset)\n\t} else {\n\t\tif ret.Get(0) != nil {\n\t\t\tr0 = ret.Get(0).([]*models.Host)\n\t\t}\n\t}\n\n\tvar r1 error\n\tif rf, ok := ret.Get(1).(func(context.Context, int, int) error); ok {\n\t\tr1 = rf(ctx, limit, offset)\n\t} else {\n\t\tr1 = ret.Error(1)\n\t}\n\n\treturn r0, r1\n}", "func MockListOrganizationProjectsResponse() MockResponse {\n\treturn MockResponse{\n\t\tStatusCode: http.StatusOK,\n\t\tResponse: listOrganizationProjectsResponse,\n\t}\n}", "func (_m *MockSegmentManager) GetBy(filters ...SegmentFilter) []Segment {\n\t_va := make([]interface{}, len(filters))\n\tfor _i := range filters {\n\t\t_va[_i] = filters[_i]\n\t}\n\tvar _ca []interface{}\n\t_ca = append(_ca, _va...)\n\tret := _m.Called(_ca...)\n\n\tvar r0 []Segment\n\tif rf, ok := ret.Get(0).(func(...SegmentFilter) []Segment); ok {\n\t\tr0 = rf(filters...)\n\t} else {\n\t\tif ret.Get(0) != nil {\n\t\t\tr0 = ret.Get(0).([]Segment)\n\t\t}\n\t}\n\n\treturn r0\n}", "func (mr *MockOrganizationServiceClientMockRecorder) FetchOrganizationList(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call {\n\tmr.mock.ctrl.T.Helper()\n\tvarargs := append([]interface{}{arg0, arg1}, arg2...)\n\treturn mr.mock.ctrl.RecordCallWithMethodType(mr.mock, \"FetchOrganizationList\", reflect.TypeOf((*MockOrganizationServiceClient)(nil).FetchOrganizationList), varargs...)\n}", "func (m *MockOrganizationServiceClient) FetchUserList(arg0 context.Context, arg1 *organization.Empty, arg2 ...grpc.CallOption) (*organization.UserListResponse, error) {\n\tm.ctrl.T.Helper()\n\tvarargs := []interface{}{arg0, arg1}\n\tfor _, a := range arg2 {\n\t\tvarargs = append(varargs, a)\n\t}\n\tret := m.ctrl.Call(m, \"FetchUserList\", varargs...)\n\tret0, _ := ret[0].(*organization.UserListResponse)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}", "func (_m *Repository) List(ctx context.Context, limit, offset int, search string) ([]*models.User, error) {\n\tret := _m.Called(ctx, limit,offset,search)\n\n\tvar r0 []*models.User\n\tif rf, ok := ret.Get(0).(func(context.Context,int,int,string) []*models.User); ok {\n\t\tr0 = rf(ctx,limit,offset,search)\n\t} else {\n\t\tif ret.Get(0) != nil {\n\t\t\tr0 = ret.Get(0).([]*models.User)\n\t\t}\n\t}\n\n\tvar r1 error\n\tif rf, ok := ret.Get(1).(func(context.Context,int,int,string) error); ok {\n\t\tr1 = rf(ctx,limit,offset,search)\n\t} else {\n\t\tr1 = ret.Error(1)\n\t}\n\n\treturn r0, r1\n}", "func (_m *Fetcher) FetchAll(_a0 []interface{}) error {\n\tret := _m.Called(_a0)\n\n\tvar r0 error\n\tif rf, ok := ret.Get(0).(func([]interface{}) error); ok {\n\t\tr0 = rf(_a0)\n\t} else {\n\t\tr0 = ret.Error(0)\n\t}\n\n\treturn r0\n}", "func (a *api) h_GET_orgs_orgId_fields(c *gin.Context) {\n\torgId, err := parseInt64Param(c, \"orgId\")\n\ta.logger.Debug(\"GET /orgs/\", orgId, \"/fields\")\n\tif a.errorResponse(c, err) {\n\t\treturn\n\t}\n\n\taCtx := a.getAuthContext(c)\n\tif a.errorResponse(c, aCtx.AuthZHasOrgLevel(orgId, auth.AUTHZ_LEVEL_OU)) {\n\t\treturn\n\t}\n\n\tfis, err := a.Dc.GetFieldInfos(orgId)\n\tif a.errorResponse(c, err) {\n\t\treturn\n\t}\n\n\tc.JSON(http.StatusOK, a.fieldInfos2MetaInfos(fis))\n}", "func (m *MockHandler) GetArticles(username string, limit, offset int, filters []domain.ArticleFilter) (*domain.User, domain.ArticleCollection, int, error) {\n\tret := m.ctrl.Call(m, \"GetArticles\", username, limit, offset, filters)\n\tret0, _ := ret[0].(*domain.User)\n\tret1, _ := ret[1].(domain.ArticleCollection)\n\tret2, _ := ret[2].(int)\n\tret3, _ := ret[3].(error)\n\treturn ret0, ret1, ret2, ret3\n}", "func (_m *PaymentRequestListFetcher) FetchPaymentRequestList(officeUserID uuid.UUID, params *services.FetchPaymentRequestListParams) (*models.PaymentRequests, int, error) {\n\tret := _m.Called(officeUserID, params)\n\n\tvar r0 *models.PaymentRequests\n\tif rf, ok := ret.Get(0).(func(uuid.UUID, *services.FetchPaymentRequestListParams) *models.PaymentRequests); ok {\n\t\tr0 = rf(officeUserID, params)\n\t} else {\n\t\tif ret.Get(0) != nil {\n\t\t\tr0 = ret.Get(0).(*models.PaymentRequests)\n\t\t}\n\t}\n\n\tvar r1 int\n\tif rf, ok := ret.Get(1).(func(uuid.UUID, *services.FetchPaymentRequestListParams) int); ok {\n\t\tr1 = rf(officeUserID, params)\n\t} else {\n\t\tr1 = ret.Get(1).(int)\n\t}\n\n\tvar r2 error\n\tif rf, ok := ret.Get(2).(func(uuid.UUID, *services.FetchPaymentRequestListParams) error); ok {\n\t\tr2 = rf(officeUserID, params)\n\t} else {\n\t\tr2 = ret.Error(2)\n\t}\n\n\treturn r0, r1, r2\n}", "func (_m *Usecase) Search(ctx context.Context, query *string, offset int64, limit int64) ([]*models.Category, int64, error) {\n\tret := _m.Called(ctx, query, offset, limit)\n\n\tvar r0 []*models.Category\n\tif rf, ok := ret.Get(0).(func(context.Context, *string, int64, int64) []*models.Category); ok {\n\t\tr0 = rf(ctx, query, offset, limit)\n\t} else {\n\t\tif ret.Get(0) != nil {\n\t\t\tr0 = ret.Get(0).([]*models.Category)\n\t\t}\n\t}\n\n\tvar r1 int64\n\tif rf, ok := ret.Get(1).(func(context.Context, *string, int64, int64) int64); ok {\n\t\tr1 = rf(ctx, query, offset, limit)\n\t} else {\n\t\tr1 = ret.Get(1).(int64)\n\t}\n\n\tvar r2 error\n\tif rf, ok := ret.Get(2).(func(context.Context, *string, int64, int64) error); ok {\n\t\tr2 = rf(ctx, query, offset, limit)\n\t} else {\n\t\tr2 = ret.Error(2)\n\t}\n\n\treturn r0, r1, r2\n}", "func (_m *MockCommentRepo) ListAll(ctx context.Context, org string) ([]Comment, error) {\n\tret := _m.Called(ctx, org)\n\n\tvar r0 []Comment\n\tif rf, ok := ret.Get(0).(func(context.Context, string) []Comment); ok {\n\t\tr0 = rf(ctx, org)\n\t} else {\n\t\tr0 = ret.Get(0).([]Comment)\n\t}\n\n\tvar r1 error\n\tif rf, ok := ret.Get(1).(func(context.Context, string) error); ok {\n\t\tr1 = rf(ctx, org)\n\t} else {\n\t\tr1 = ret.Error(1)\n\t}\n\n\treturn r0, r1\n}", "func (_m *Repository) Fetch(_a0 *git.FetchOptions) error {\n\tret := _m.Called(_a0)\n\n\tvar r0 error\n\tif rf, ok := ret.Get(0).(func(*git.FetchOptions) error); ok {\n\t\tr0 = rf(_a0)\n\t} else {\n\t\tr0 = ret.Error(0)\n\t}\n\n\treturn r0\n}", "func (m *MockDatabase) GetOrganizationsRef() ([]string, error) {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"GetOrganizationsRef\")\n\tret0, _ := ret[0].([]string)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}", "func (_m *API) GetArticles(page int, tag string) ([]model.ArticleItem, int, error) {\n\tret := _m.Called(page, tag)\n\n\tvar r0 []model.ArticleItem\n\tif rf, ok := ret.Get(0).(func(int, string) []model.ArticleItem); ok {\n\t\tr0 = rf(page, tag)\n\t} else {\n\t\tif ret.Get(0) != nil {\n\t\t\tr0 = ret.Get(0).([]model.ArticleItem)\n\t\t}\n\t}\n\n\tvar r1 int\n\tif rf, ok := ret.Get(1).(func(int, string) int); ok {\n\t\tr1 = rf(page, tag)\n\t} else {\n\t\tr1 = ret.Get(1).(int)\n\t}\n\n\tvar r2 error\n\tif rf, ok := ret.Get(2).(func(int, string) error); ok {\n\t\tr2 = rf(page, tag)\n\t} else {\n\t\tr2 = ret.Error(2)\n\t}\n\n\treturn r0, r1, r2\n}", "func (_m *Delivery) FetchByIDs(c echo.Context) error {\n\tret := _m.Called(c)\n\n\tvar r0 error\n\tif rf, ok := ret.Get(0).(func(echo.Context) error); ok {\n\t\tr0 = rf(c)\n\t} else {\n\t\tr0 = ret.Error(0)\n\t}\n\n\treturn r0\n}", "func NewOrganizationGetter(db *gorm.DB) OrganizationGetter {\n\treturn OrganizationGetter{\n\t\tdb: db,\n\t}\n}", "func (t *IdentityData) queryInfoByOrg(stub shim.ChaincodeStubInterface, args []string) pb.Response {\n\n\t// 0\n\t// \"handle\"\n\tif len(args) < 1 {\n\t\treturn shim.Error(\"Incorrect number of arguments. Expecting 1\")\n\t}\n\n\torg_name := strings.ToLower(args[0])\n\n\tqueryString := fmt.Sprintf(\"{\\\"selector\\\":{\\\"docType\\\":\\\"org\\\",\\\"org_name\\\":\\\"%s\\\"}}\", org_name)\n\n\tqueryResults, err := getQueryResultForQueryString(stub, queryString)\n\tif err != nil {\n\t\treturn shim.Error(err.Error())\n\t}\n\treturn shim.Success(queryResults)\n}", "func (_m *OfficialCompanyRepository) GetOfficialCompany(_a0 uuid.UUID) (*models.OfficialCompany, error) {\n\tret := _m.Called(_a0)\n\n\tvar r0 *models.OfficialCompany\n\tif rf, ok := ret.Get(0).(func(uuid.UUID) *models.OfficialCompany); ok {\n\t\tr0 = rf(_a0)\n\t} else {\n\t\tif ret.Get(0) != nil {\n\t\t\tr0 = ret.Get(0).(*models.OfficialCompany)\n\t\t}\n\t}\n\n\tvar r1 error\n\tif rf, ok := ret.Get(1).(func(uuid.UUID) error); ok {\n\t\tr1 = rf(_a0)\n\t} else {\n\t\tr1 = ret.Error(1)\n\t}\n\n\treturn r0, r1\n}", "func (_m *MockORM) Omit(columns ...string) ORM {\n\tret := _m.Called(columns)\n\n\tvar r0 ORM\n\tif rf, ok := ret.Get(0).(func(...string) ORM); ok {\n\t\tr0 = rf(columns...)\n\t} else {\n\t\tif ret.Get(0) != nil {\n\t\t\tr0 = ret.Get(0).(ORM)\n\t\t}\n\t}\n\n\treturn r0\n}", "func (_m *MockStore) SearchUsersPermissions(ctx context.Context, orgID int64, options accesscontrol.SearchOptions) (map[int64][]accesscontrol.Permission, error) {\n\tret := _m.Called(ctx, orgID, options)\n\n\tvar r0 map[int64][]accesscontrol.Permission\n\tvar r1 error\n\tif rf, ok := ret.Get(0).(func(context.Context, int64, accesscontrol.SearchOptions) (map[int64][]accesscontrol.Permission, error)); ok {\n\t\treturn rf(ctx, orgID, options)\n\t}\n\tif rf, ok := ret.Get(0).(func(context.Context, int64, accesscontrol.SearchOptions) map[int64][]accesscontrol.Permission); ok {\n\t\tr0 = rf(ctx, orgID, options)\n\t} else {\n\t\tif ret.Get(0) != nil {\n\t\t\tr0 = ret.Get(0).(map[int64][]accesscontrol.Permission)\n\t\t}\n\t}\n\n\tif rf, ok := ret.Get(1).(func(context.Context, int64, accesscontrol.SearchOptions) error); ok {\n\t\tr1 = rf(ctx, orgID, options)\n\t} else {\n\t\tr1 = ret.Error(1)\n\t}\n\n\treturn r0, r1\n}", "func (m *MockRepository) Fetch(ctx context.Context, page, perpage int) ([]transaction_header.Domain, int, error) {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"Fetch\", ctx, page, perpage)\n\tret0, _ := ret[0].([]transaction_header.Domain)\n\tret1, _ := ret[1].(int)\n\tret2, _ := ret[2].(error)\n\treturn ret0, ret1, ret2\n}", "func (_m *MockUserProvider) Find(criteria *UserCriteria) ([]*UserEntity, error) {\n\tret := _m.Called(criteria)\n\n\tvar r0 []*UserEntity\n\tif rf, ok := ret.Get(0).(func(*UserCriteria) []*UserEntity); ok {\n\t\tr0 = rf(criteria)\n\t} else {\n\t\tif ret.Get(0) != nil {\n\t\t\tr0 = ret.Get(0).([]*UserEntity)\n\t\t}\n\t}\n\n\tvar r1 error\n\tif rf, ok := ret.Get(1).(func(*UserCriteria) error); ok {\n\t\tr1 = rf(criteria)\n\t} else {\n\t\tr1 = ret.Error(1)\n\t}\n\n\treturn r0, r1\n}", "func (m *MockUsecase) Fetch(ctx context.Context, page, perpage int) ([]transaction_header.Domain, int, error) {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"Fetch\", ctx, page, perpage)\n\tret0, _ := ret[0].([]transaction_header.Domain)\n\tret1, _ := ret[1].(int)\n\tret2, _ := ret[2].(error)\n\treturn ret0, ret1, ret2\n}", "func (_m *AuthorController) Fetch(c *fiber.Ctx) error {\n\tret := _m.Called(c)\n\n\tvar r0 error\n\tif rf, ok := ret.Get(0).(func(*fiber.Ctx) error); ok {\n\t\tr0 = rf(c)\n\t} else {\n\t\tr0 = ret.Error(0)\n\t}\n\n\treturn r0\n}", "func TestFetch(t *testing.T) {\n\tvar mockOrder models.Order\n\tuid := uuid.New()\n\tmockOrder.ID = int64(10)\n\tmockOrder.UUID = uid\n\tmockOrder.Status = 0\n\n\tmockUCase := new(mocks.Usecase)\n\tmockListOrder := make([]*models.Order, 0)\n\tmockListOrder = append(mockListOrder, &mockOrder)\n\tlimit := 10\n\toffset := 0\n\tmockUCase.On(\"Fetch\", mock.Anything, limit, offset).Return(mockListOrder, nil)\n\n\te := echo.New()\n\treq, err := http.NewRequest(echo.GET, \"/orders\", strings.NewReader(\"\"))\n\tassert.NoError(t, err)\n\n\trec := httptest.NewRecorder()\n\tc := e.NewContext(req, rec)\n\thandler := orderHttp.OrderHandler{\n\t\tOUsecase: mockUCase,\n\t}\n\terr = handler.FetchOrder(c)\n\trequire.NoError(t, err)\n\n\tassert.Equal(t, http.StatusOK, rec.Code)\n\tassert.JSONEq(t, `[{\"distance\":0, \"id\":\"`+uid.String()+`\", \"status\":\"UNASSIGNED\"}]`, rec.Body.String())\n\t//assert.\n\tmockUCase.AssertExpectations(t)\n}", "func (_m *Repository) FindAll() ([]*entity.Person, error) {\n\tret := _m.Called()\n\n\tvar r0 []*entity.Person\n\tif rf, ok := ret.Get(0).(func() []*entity.Person); ok {\n\t\tr0 = rf()\n\t} else {\n\t\tif ret.Get(0) != nil {\n\t\t\tr0 = ret.Get(0).([]*entity.Person)\n\t\t}\n\t}\n\n\tvar r1 error\n\tif rf, ok := ret.Get(1).(func() error); ok {\n\t\tr1 = rf()\n\t} else {\n\t\tr1 = ret.Error(1)\n\t}\n\n\treturn r0, r1\n}", "func (_m *MockORM) Or(query interface{}, args ...interface{}) ORM {\n\tret := _m.Called(query, args)\n\n\tvar r0 ORM\n\tif rf, ok := ret.Get(0).(func(interface{}, ...interface{}) ORM); ok {\n\t\tr0 = rf(query, args...)\n\t} else {\n\t\tif ret.Get(0) != nil {\n\t\t\tr0 = ret.Get(0).(ORM)\n\t\t}\n\t}\n\n\treturn r0\n}", "func (_m *ArticleUsecase) GetArticles(ctx context.Context, author string, query string) ([]domain.Article, error) {\n\tret := _m.Called(ctx, author, query)\n\n\tvar r0 []domain.Article\n\tif rf, ok := ret.Get(0).(func(context.Context, string, string) []domain.Article); ok {\n\t\tr0 = rf(ctx, author, query)\n\t} else {\n\t\tif ret.Get(0) != nil {\n\t\t\tr0 = ret.Get(0).([]domain.Article)\n\t\t}\n\t}\n\n\tvar r1 error\n\tif rf, ok := ret.Get(1).(func(context.Context, string, string) error); ok {\n\t\tr1 = rf(ctx, author, query)\n\t} else {\n\t\tr1 = ret.Error(1)\n\t}\n\n\treturn r0, r1\n}", "func (mr *MockOrganizationServiceClientMockRecorder) FetchUserListByOrganization(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call {\n\tmr.mock.ctrl.T.Helper()\n\tvarargs := append([]interface{}{arg0, arg1}, arg2...)\n\treturn mr.mock.ctrl.RecordCallWithMethodType(mr.mock, \"FetchUserListByOrganization\", reflect.TypeOf((*MockOrganizationServiceClient)(nil).FetchUserListByOrganization), varargs...)\n}", "func (_m *Fetcher) Fetch(_a0 interface{}) (bool, error) {\n\tret := _m.Called(_a0)\n\n\tvar r0 bool\n\tif rf, ok := ret.Get(0).(func(interface{}) bool); ok {\n\t\tr0 = rf(_a0)\n\t} else {\n\t\tr0 = ret.Get(0).(bool)\n\t}\n\n\tvar r1 error\n\tif rf, ok := ret.Get(1).(func(interface{}) error); ok {\n\t\tr1 = rf(_a0)\n\t} else {\n\t\tr1 = ret.Error(1)\n\t}\n\n\treturn r0, r1\n}", "func (a *OrganizationsApiService) OrganizationsOrganizationsRead(ctx _context.Context, id string, localVarOptionals *OrganizationsOrganizationsReadOpts) (*_nethttp.Response, error) {\n\tvar (\n\t\tlocalVarHTTPMethod = _nethttp.MethodGet\n\t\tlocalVarPostBody interface{}\n\t\tlocalVarFormFileName string\n\t\tlocalVarFileName string\n\t\tlocalVarFileBytes []byte\n\t)\n\n\t// create path and map variables\n\tlocalVarPath := a.client.cfg.BasePath + \"/api/v2/organizations/{id}/\"\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"id\"+\"}\", _neturl.QueryEscape(parameterToString(id, \"\")), -1)\n\n\tlocalVarHeaderParams := make(map[string]string)\n\tlocalVarQueryParams := _neturl.Values{}\n\tlocalVarFormParams := _neturl.Values{}\n\n\tif localVarOptionals != nil && localVarOptionals.Search.IsSet() {\n\t\tlocalVarQueryParams.Add(\"search\", parameterToString(localVarOptionals.Search.Value(), \"\"))\n\t}\n\t// to determine the Content-Type header\n\tlocalVarHTTPContentTypes := []string{}\n\n\t// set Content-Type header\n\tlocalVarHTTPContentType := selectHeaderContentType(localVarHTTPContentTypes)\n\tif localVarHTTPContentType != \"\" {\n\t\tlocalVarHeaderParams[\"Content-Type\"] = localVarHTTPContentType\n\t}\n\n\t// to determine the Accept header\n\tlocalVarHTTPHeaderAccepts := []string{\"application/json\"}\n\n\t// set Accept header\n\tlocalVarHTTPHeaderAccept := selectHeaderAccept(localVarHTTPHeaderAccepts)\n\tif localVarHTTPHeaderAccept != \"\" {\n\t\tlocalVarHeaderParams[\"Accept\"] = localVarHTTPHeaderAccept\n\t}\n\tr, err := a.client.prepareRequest(ctx, localVarPath, localVarHTTPMethod, localVarPostBody, localVarHeaderParams, localVarQueryParams, localVarFormParams, localVarFormFileName, localVarFileName, localVarFileBytes)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tlocalVarHTTPResponse, err := a.client.callAPI(r)\n\tif err != nil || localVarHTTPResponse == nil {\n\t\treturn localVarHTTPResponse, err\n\t}\n\n\tif localVarHTTPResponse.StatusCode >= 300 {\n\t\tlocalVarBody, err := _ioutil.ReadAll(localVarHTTPResponse.Body)\n\t\tlocalVarHTTPResponse.Body.Close()\n\t\tif err != nil {\n\t\t\treturn localVarHTTPResponse, err\n\t\t}\n\n\t\tnewErr := GenericOpenAPIError{\n\t\t\tbody: localVarBody,\n\t\t\terror: localVarHTTPResponse.Status,\n\t\t}\n\t\treturn localVarHTTPResponse, newErr\n\t}\n\n\treturn localVarHTTPResponse, nil\n}", "func (m *Client) GetOrganizations(arg0 context.Context, arg1 *zendesk.OrganizationListOptions) ([]zendesk.Organization, zendesk.Page, error) {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"GetOrganizations\", arg0, arg1)\n\tret0, _ := ret[0].([]zendesk.Organization)\n\tret1, _ := ret[1].(zendesk.Page)\n\tret2, _ := ret[2].(error)\n\treturn ret0, ret1, ret2\n}", "func (_m *Repository) GetAllCompanies(ctx context.Context, f models.CompanyFilters) ([]*models.Company, error) {\n\tret := _m.Called(ctx, f)\n\n\tvar r0 []*models.Company\n\tif rf, ok := ret.Get(0).(func(context.Context, models.CompanyFilters) []*models.Company); ok {\n\t\tr0 = rf(ctx, f)\n\t} else {\n\t\tif ret.Get(0) != nil {\n\t\t\tr0 = ret.Get(0).([]*models.Company)\n\t\t}\n\t}\n\n\tvar r1 error\n\tif rf, ok := ret.Get(1).(func(context.Context, models.CompanyFilters) error); ok {\n\t\tr1 = rf(ctx, f)\n\t} else {\n\t\tr1 = ret.Error(1)\n\t}\n\n\treturn r0, r1\n}", "func (_m *MockUserGroupsProvider) Find(criteria *UserGroupsCriteria) ([]*UserGroupsEntity, error) {\n\tret := _m.Called(criteria)\n\n\tvar r0 []*UserGroupsEntity\n\tif rf, ok := ret.Get(0).(func(*UserGroupsCriteria) []*UserGroupsEntity); ok {\n\t\tr0 = rf(criteria)\n\t} else {\n\t\tif ret.Get(0) != nil {\n\t\t\tr0 = ret.Get(0).([]*UserGroupsEntity)\n\t\t}\n\t}\n\n\tvar r1 error\n\tif rf, ok := ret.Get(1).(func(*UserGroupsCriteria) error); ok {\n\t\tr1 = rf(criteria)\n\t} else {\n\t\tr1 = ret.Error(1)\n\t}\n\n\treturn r0, r1\n}", "func (m *MockRecout) Fetch(ctx context.Context, accountID string, offset, limit int) ([]entity.Recout, error) {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"Fetch\", ctx, accountID, offset, limit)\n\tret0, _ := ret[0].([]entity.Recout)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}", "func getOrganizationsFromResponse(list []*OrganizationResponse) []*Organization {\n\tvar results []*Organization\n\tfor _, val := range list {\n\t\tif strings.ToLower(val.SubRequestStatus) == \"success\" {\n\t\t\tresults = append(results, &val.Organization)\n\t\t}\n\t}\n\treturn results\n}", "func (m *Client) GetOrganizationByExternalID(arg0 context.Context, arg1 string) ([]zendesk.Organization, zendesk.Page, error) {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"GetOrganizationByExternalID\", arg0, arg1)\n\tret0, _ := ret[0].([]zendesk.Organization)\n\tret1, _ := ret[1].(zendesk.Page)\n\tret2, _ := ret[2].(error)\n\treturn ret0, ret1, ret2\n}", "func (m *MockArticleRepository) Fetch(cursor string, num int64) ([]*article.Article, error) {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"Fetch\", cursor, num)\n\tret0, _ := ret[0].([]*article.Article)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}", "func (_m *MockGroupProvider) Find(c *GroupCriteria) ([]*GroupEntity, error) {\n\tret := _m.Called(c)\n\n\tvar r0 []*GroupEntity\n\tif rf, ok := ret.Get(0).(func(*GroupCriteria) []*GroupEntity); ok {\n\t\tr0 = rf(c)\n\t} else {\n\t\tif ret.Get(0) != nil {\n\t\t\tr0 = ret.Get(0).([]*GroupEntity)\n\t\t}\n\t}\n\n\tvar r1 error\n\tif rf, ok := ret.Get(1).(func(*GroupCriteria) error); ok {\n\t\tr1 = rf(c)\n\t} else {\n\t\tr1 = ret.Error(1)\n\t}\n\n\treturn r0, r1\n}", "func (_m *MockStore) GetUsersBasicRoles(ctx context.Context, userFilter []int64, orgID int64) (map[int64][]string, error) {\n\tret := _m.Called(ctx, userFilter, orgID)\n\n\tvar r0 map[int64][]string\n\tvar r1 error\n\tif rf, ok := ret.Get(0).(func(context.Context, []int64, int64) (map[int64][]string, error)); ok {\n\t\treturn rf(ctx, userFilter, orgID)\n\t}\n\tif rf, ok := ret.Get(0).(func(context.Context, []int64, int64) map[int64][]string); ok {\n\t\tr0 = rf(ctx, userFilter, orgID)\n\t} else {\n\t\tif ret.Get(0) != nil {\n\t\t\tr0 = ret.Get(0).(map[int64][]string)\n\t\t}\n\t}\n\n\tif rf, ok := ret.Get(1).(func(context.Context, []int64, int64) error); ok {\n\t\tr1 = rf(ctx, userFilter, orgID)\n\t} else {\n\t\tr1 = ret.Error(1)\n\t}\n\n\treturn r0, r1\n}", "func (c *organizationsRESTClient) SearchOrganizations(ctx context.Context, req *resourcemanagerpb.SearchOrganizationsRequest, opts ...gax.CallOption) *OrganizationIterator {\n\tit := &OrganizationIterator{}\n\treq = proto.Clone(req).(*resourcemanagerpb.SearchOrganizationsRequest)\n\tunm := protojson.UnmarshalOptions{AllowPartial: true, DiscardUnknown: true}\n\tit.InternalFetch = func(pageSize int, pageToken string) ([]*resourcemanagerpb.Organization, string, error) {\n\t\tresp := &resourcemanagerpb.SearchOrganizationsResponse{}\n\t\tif pageToken != \"\" {\n\t\t\treq.PageToken = pageToken\n\t\t}\n\t\tif pageSize > math.MaxInt32 {\n\t\t\treq.PageSize = math.MaxInt32\n\t\t} else if pageSize != 0 {\n\t\t\treq.PageSize = int32(pageSize)\n\t\t}\n\t\tbaseUrl, err := url.Parse(c.endpoint)\n\t\tif err != nil {\n\t\t\treturn nil, \"\", err\n\t\t}\n\t\tbaseUrl.Path += fmt.Sprintf(\"/v3/organizations:search\")\n\n\t\tparams := url.Values{}\n\t\tparams.Add(\"$alt\", \"json;enum-encoding=int\")\n\t\tif req.GetPageSize() != 0 {\n\t\t\tparams.Add(\"pageSize\", fmt.Sprintf(\"%v\", req.GetPageSize()))\n\t\t}\n\t\tif req.GetPageToken() != \"\" {\n\t\t\tparams.Add(\"pageToken\", fmt.Sprintf(\"%v\", req.GetPageToken()))\n\t\t}\n\t\tif req.GetQuery() != \"\" {\n\t\t\tparams.Add(\"query\", fmt.Sprintf(\"%v\", req.GetQuery()))\n\t\t}\n\n\t\tbaseUrl.RawQuery = params.Encode()\n\n\t\t// Build HTTP headers from client and context metadata.\n\t\thds := append(c.xGoogHeaders, \"Content-Type\", \"application/json\")\n\t\theaders := gax.BuildHeaders(ctx, hds...)\n\t\te := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {\n\t\t\tif settings.Path != \"\" {\n\t\t\t\tbaseUrl.Path = settings.Path\n\t\t\t}\n\t\t\thttpReq, err := http.NewRequest(\"GET\", baseUrl.String(), nil)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\thttpReq.Header = headers\n\n\t\t\thttpRsp, err := c.httpClient.Do(httpReq)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tdefer httpRsp.Body.Close()\n\n\t\t\tif err = googleapi.CheckResponse(httpRsp); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\tbuf, err := io.ReadAll(httpRsp.Body)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\tif err := unm.Unmarshal(buf, resp); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\treturn nil\n\t\t}, opts...)\n\t\tif e != nil {\n\t\t\treturn nil, \"\", e\n\t\t}\n\t\tit.Response = resp\n\t\treturn resp.GetOrganizations(), resp.GetNextPageToken(), nil\n\t}\n\n\tfetch := func(pageSize int, pageToken string) (string, error) {\n\t\titems, nextPageToken, err := it.InternalFetch(pageSize, pageToken)\n\t\tif err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\t\tit.items = append(it.items, items...)\n\t\treturn nextPageToken, nil\n\t}\n\n\tit.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf)\n\tit.pageInfo.MaxSize = int(req.GetPageSize())\n\tit.pageInfo.Token = req.GetPageToken()\n\n\treturn it\n}", "func (_m *Usecase) FetchByIDs(c context.Context, id []uuid.UUID) (*[]models.Item, error) {\n\tret := _m.Called(c, id)\n\n\tvar r0 *[]models.Item\n\tif rf, ok := ret.Get(0).(func(context.Context, []uuid.UUID) *[]models.Item); ok {\n\t\tr0 = rf(c, id)\n\t} else {\n\t\tif ret.Get(0) != nil {\n\t\t\tr0 = ret.Get(0).(*[]models.Item)\n\t\t}\n\t}\n\n\tvar r1 error\n\tif rf, ok := ret.Get(1).(func(context.Context, []uuid.UUID) error); ok {\n\t\tr1 = rf(c, id)\n\t} else {\n\t\tr1 = ret.Error(1)\n\t}\n\n\treturn r0, r1\n}", "func (c *organizationsRESTClient) GetOrganization(ctx context.Context, req *resourcemanagerpb.GetOrganizationRequest, opts ...gax.CallOption) (*resourcemanagerpb.Organization, error) {\n\tbaseUrl, err := url.Parse(c.endpoint)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tbaseUrl.Path += fmt.Sprintf(\"/v3/%v\", req.GetName())\n\n\tparams := url.Values{}\n\tparams.Add(\"$alt\", \"json;enum-encoding=int\")\n\n\tbaseUrl.RawQuery = params.Encode()\n\n\t// Build HTTP headers from client and context metadata.\n\thds := []string{\"x-goog-request-params\", fmt.Sprintf(\"%s=%v\", \"name\", url.QueryEscape(req.GetName()))}\n\n\thds = append(c.xGoogHeaders, hds...)\n\thds = append(hds, \"Content-Type\", \"application/json\")\n\theaders := gax.BuildHeaders(ctx, hds...)\n\topts = append((*c.CallOptions).GetOrganization[0:len((*c.CallOptions).GetOrganization):len((*c.CallOptions).GetOrganization)], opts...)\n\tunm := protojson.UnmarshalOptions{AllowPartial: true, DiscardUnknown: true}\n\tresp := &resourcemanagerpb.Organization{}\n\te := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {\n\t\tif settings.Path != \"\" {\n\t\t\tbaseUrl.Path = settings.Path\n\t\t}\n\t\thttpReq, err := http.NewRequest(\"GET\", baseUrl.String(), nil)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\thttpReq = httpReq.WithContext(ctx)\n\t\thttpReq.Header = headers\n\n\t\thttpRsp, err := c.httpClient.Do(httpReq)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tdefer httpRsp.Body.Close()\n\n\t\tif err = googleapi.CheckResponse(httpRsp); err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tbuf, err := io.ReadAll(httpRsp.Body)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tif err := unm.Unmarshal(buf, resp); err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\treturn nil\n\t}, opts...)\n\tif e != nil {\n\t\treturn nil, e\n\t}\n\treturn resp, nil\n}", "func (m *MockArticlesLogic) GetArticles(username string, limit, offset int, filters []domain.ArticleFilter) (*domain.User, domain.ArticleCollection, int, error) {\n\tret := m.ctrl.Call(m, \"GetArticles\", username, limit, offset, filters)\n\tret0, _ := ret[0].(*domain.User)\n\tret1, _ := ret[1].(domain.ArticleCollection)\n\tret2, _ := ret[2].(int)\n\tret3, _ := ret[3].(error)\n\treturn ret0, ret1, ret2, ret3\n}", "func NewMock(middleware []Middleware) OrganizationService {\n\tvar svc OrganizationService = NewBasicOrganizationServiceServiceMock()\n\tfor _, m := range middleware {\n\t\tsvc = m(svc)\n\t}\n\treturn svc\n}", "func (_m *API) GetClubs(page int) ([]model.ClubSearch, int, error) {\n\tret := _m.Called(page)\n\n\tvar r0 []model.ClubSearch\n\tif rf, ok := ret.Get(0).(func(int) []model.ClubSearch); ok {\n\t\tr0 = rf(page)\n\t} else {\n\t\tif ret.Get(0) != nil {\n\t\t\tr0 = ret.Get(0).([]model.ClubSearch)\n\t\t}\n\t}\n\n\tvar r1 int\n\tif rf, ok := ret.Get(1).(func(int) int); ok {\n\t\tr1 = rf(page)\n\t} else {\n\t\tr1 = ret.Get(1).(int)\n\t}\n\n\tvar r2 error\n\tif rf, ok := ret.Get(2).(func(int) error); ok {\n\t\tr2 = rf(page)\n\t} else {\n\t\tr2 = ret.Error(2)\n\t}\n\n\treturn r0, r1, r2\n}", "func (_e *MockSegmentManager_Expecter) GetBy(filters ...interface{}) *MockSegmentManager_GetBy_Call {\n\treturn &MockSegmentManager_GetBy_Call{Call: _e.mock.On(\"GetBy\",\n\t\tappend([]interface{}{}, filters...)...)}\n}", "func (m *MockLoggingClient) Fetch(arg0 context.Context, arg1 *logging.QueryRequest, arg2 ...grpc.CallOption) (*logging.QueryResponse, error) {\n\tvarargs := []interface{}{arg0, arg1}\n\tfor _, a := range arg2 {\n\t\tvarargs = append(varargs, a)\n\t}\n\tret := m.ctrl.Call(m, \"Fetch\", varargs...)\n\tret0, _ := ret[0].(*logging.QueryResponse)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}", "func (_m *MockORM) Find(out interface{}, where ...interface{}) ORM {\n\tret := _m.Called(out, where)\n\n\tvar r0 ORM\n\tif rf, ok := ret.Get(0).(func(interface{}, ...interface{}) ORM); ok {\n\t\tr0 = rf(out, where...)\n\t} else {\n\t\tif ret.Get(0) != nil {\n\t\t\tr0 = ret.Get(0).(ORM)\n\t\t}\n\t}\n\n\treturn r0\n}", "func (_m *OfficialCompanyRepository) UpdateOfficialCompany(_a0 models.OfficialCompany, _a1 uuid.UUID) (*models.OfficialCompany, error) {\n\tret := _m.Called(_a0, _a1)\n\n\tvar r0 *models.OfficialCompany\n\tif rf, ok := ret.Get(0).(func(models.OfficialCompany, uuid.UUID) *models.OfficialCompany); ok {\n\t\tr0 = rf(_a0, _a1)\n\t} else {\n\t\tif ret.Get(0) != nil {\n\t\t\tr0 = ret.Get(0).(*models.OfficialCompany)\n\t\t}\n\t}\n\n\tvar r1 error\n\tif rf, ok := ret.Get(1).(func(models.OfficialCompany, uuid.UUID) error); ok {\n\t\tr1 = rf(_a0, _a1)\n\t} else {\n\t\tr1 = ret.Error(1)\n\t}\n\n\treturn r0, r1\n}", "func PopulateQueryParamForOrganizationID(queryParamMap map[string]string) map[string]string {\n\n\tif queryParamMap == nil {\n\t\tqueryParamMap = make(map[string]string)\n\t}\n\tconf, _ := config.ReadConfigs()\n\tif conf.GlobalAdapter.Enabled {\n\t\tqueryParamMap[organizationID] = commonOrganizationIDValue\n\t}\n\treturn queryParamMap\n}", "func (a *api) h_GET_orgs(c *gin.Context) {\n\ta.logger.Debug(\"GET /orgs\")\n\n\taCtx := a.getAuthContext(c)\n\tif aCtx.UserLogin() == \"\" {\n\t\tc.Status(http.StatusUnauthorized)\n\t\treturn\n\t}\n\n\tods, err := a.Dc.GetOrgDescs(aCtx)\n\tif a.errorResponse(c, err) {\n\t\treturn\n\t}\n\tc.JSON(http.StatusOK, a.morgs2orgs(ods))\n}", "func (_m *ChannelStore) AutocompleteInTeamForSearch(teamID string, userID string, term string, includeDeleted bool) (model.ChannelList, error) {\n\tret := _m.Called(teamID, userID, term, includeDeleted)\n\n\tvar r0 model.ChannelList\n\tif rf, ok := ret.Get(0).(func(string, string, string, bool) model.ChannelList); ok {\n\t\tr0 = rf(teamID, userID, term, includeDeleted)\n\t} else {\n\t\tif ret.Get(0) != nil {\n\t\t\tr0 = ret.Get(0).(model.ChannelList)\n\t\t}\n\t}\n\n\tvar r1 error\n\tif rf, ok := ret.Get(1).(func(string, string, string, bool) error); ok {\n\t\tr1 = rf(teamID, userID, term, includeDeleted)\n\t} else {\n\t\tr1 = ret.Error(1)\n\t}\n\n\treturn r0, r1\n}", "func (_m *ArticleUsecase) GetArticles(ctx context.Context, payload domain.ArticleSearchPayload) ([]domain.Article, error) {\n\tret := _m.Called(ctx, payload)\n\n\tvar r0 []domain.Article\n\tif rf, ok := ret.Get(0).(func(context.Context, domain.ArticleSearchPayload) []domain.Article); ok {\n\t\tr0 = rf(ctx, payload)\n\t} else {\n\t\tif ret.Get(0) != nil {\n\t\t\tr0 = ret.Get(0).([]domain.Article)\n\t\t}\n\t}\n\n\tvar r1 error\n\tif rf, ok := ret.Get(1).(func(context.Context, domain.ArticleSearchPayload) error); ok {\n\t\tr1 = rf(ctx, payload)\n\t} else {\n\t\tr1 = ret.Error(1)\n\t}\n\n\treturn r0, r1\n}", "func GetOrgByOrgnameEndpoint(w http.ResponseWriter, r *http.Request) {\n\tparams := mux.Vars(r)\n\tname := params[\"name\"]\n\n\tvar org models.Organization\n\t_ = json.NewDecoder(r.Body).Decode(&org)\n\n\torg = db.GetOrgByName(name)\n\n\tw.Header().Set(\"Content-Type\", \"application/json\")\n\tjson.NewEncoder(w).Encode(org)\n\n}", "func (_m *MockORM) Having(query string, values ...interface{}) ORM {\n\tret := _m.Called(query, values)\n\n\tvar r0 ORM\n\tif rf, ok := ret.Get(0).(func(string, ...interface{}) ORM); ok {\n\t\tr0 = rf(query, values...)\n\t} else {\n\t\tif ret.Get(0) != nil {\n\t\t\tr0 = ret.Get(0).(ORM)\n\t\t}\n\t}\n\n\treturn r0\n}", "func (a *OrganizationsApiService) GetOrganization(ctx _context.Context, organizationGuid string) ApiGetOrganizationRequest {\n\treturn ApiGetOrganizationRequest{\n\t\tApiService: a,\n\t\tctx: ctx,\n\t\torganizationGuid: organizationGuid,\n\t}\n}", "func (_m *Repository) GetData(appid string) (steamapis.Domain, error) {\n\tret := _m.Called(appid)\n\n\tvar r0 steamapis.Domain\n\tif rf, ok := ret.Get(0).(func(string) steamapis.Domain); ok {\n\t\tr0 = rf(appid)\n\t} else {\n\t\tr0 = ret.Get(0).(steamapis.Domain)\n\t}\n\n\tvar r1 error\n\tif rf, ok := ret.Get(1).(func(string) error); ok {\n\t\tr1 = rf(appid)\n\t} else {\n\t\tr1 = ret.Error(1)\n\t}\n\n\treturn r0, r1\n}", "func (m *MockRepository) Fetch(bucketName, name string, model db.Model) (bool, error) {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"Fetch\", bucketName, name, model)\n\tret0, _ := ret[0].(bool)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}", "func (c *OrganizationsGetCall) Fields(s ...googleapi.Field) *OrganizationsGetCall {\n\tc.urlParams_.Set(\"fields\", googleapi.CombineFields(s))\n\treturn c\n}", "func (c *OrganizationsGetCall) Fields(s ...googleapi.Field) *OrganizationsGetCall {\n\tc.urlParams_.Set(\"fields\", googleapi.CombineFields(s))\n\treturn c\n}", "func (m *MockFeiraStore) Search(ctx context.Context, distrito string) ([]*model.Feira, error) {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"Search\", ctx, distrito)\n\tret0, _ := ret[0].([]*model.Feira)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}", "func (_m *Service) Get(_a0 uint) apifarm.Query {\n\tret := _m.Called(_a0)\n\n\tvar r0 apifarm.Query\n\tif rf, ok := ret.Get(0).(func(uint) apifarm.Query); ok {\n\t\tr0 = rf(_a0)\n\t} else {\n\t\tr0 = ret.Get(0).(apifarm.Query)\n\t}\n\n\treturn r0\n}", "func (m *MockOrg) Get(orgKey api.OrgKey) (api.Org, error) {\n\tret := m.ctrl.Call(m, \"Get\", orgKey)\n\tret0, _ := ret[0].(api.Org)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}", "func (_m *API) GetTopPeople(page int) ([]model.TopPeople, int, error) {\n\tret := _m.Called(page)\n\n\tvar r0 []model.TopPeople\n\tif rf, ok := ret.Get(0).(func(int) []model.TopPeople); ok {\n\t\tr0 = rf(page)\n\t} else {\n\t\tif ret.Get(0) != nil {\n\t\t\tr0 = ret.Get(0).([]model.TopPeople)\n\t\t}\n\t}\n\n\tvar r1 int\n\tif rf, ok := ret.Get(1).(func(int) int); ok {\n\t\tr1 = rf(page)\n\t} else {\n\t\tr1 = ret.Get(1).(int)\n\t}\n\n\tvar r2 error\n\tif rf, ok := ret.Get(2).(func(int) error); ok {\n\t\tr2 = rf(page)\n\t} else {\n\t\tr2 = ret.Error(2)\n\t}\n\n\treturn r0, r1, r2\n}", "func FetchOrganizations() ([]*mono_models.Organization, error) {\n\tparams := clientOrgs.NewListOrganizationsParams()\n\tmemberOnly := true\n\tparams.SetMemberOnly(&memberOnly)\n\tres, err := authentication.Client().Organizations.ListOrganizations(params, authentication.ClientAuth())\n\n\tif err != nil {\n\t\treturn nil, processOrgErrorResponse(err)\n\t}\n\n\treturn res.Payload, nil\n}", "func (_m *API) GetPeopleNews(id int) ([]model.NewsItem, int, error) {\n\tret := _m.Called(id)\n\n\tvar r0 []model.NewsItem\n\tif rf, ok := ret.Get(0).(func(int) []model.NewsItem); ok {\n\t\tr0 = rf(id)\n\t} else {\n\t\tif ret.Get(0) != nil {\n\t\t\tr0 = ret.Get(0).([]model.NewsItem)\n\t\t}\n\t}\n\n\tvar r1 int\n\tif rf, ok := ret.Get(1).(func(int) int); ok {\n\t\tr1 = rf(id)\n\t} else {\n\t\tr1 = ret.Get(1).(int)\n\t}\n\n\tvar r2 error\n\tif rf, ok := ret.Get(2).(func(int) error); ok {\n\t\tr2 = rf(id)\n\t} else {\n\t\tr2 = ret.Error(2)\n\t}\n\n\treturn r0, r1, r2\n}", "func (_m *MockUserRepositoryProvider) FindUsers(sb *UserSearchBuilder) ([]model.User, int, int, error) {\n\tret := _m.Called(sb)\n\n\tvar r0 []model.User\n\tif rf, ok := ret.Get(0).(func(*UserSearchBuilder) []model.User); ok {\n\t\tr0 = rf(sb)\n\t} else {\n\t\tif ret.Get(0) != nil {\n\t\t\tr0 = ret.Get(0).([]model.User)\n\t\t}\n\t}\n\n\tvar r1 int\n\tif rf, ok := ret.Get(1).(func(*UserSearchBuilder) int); ok {\n\t\tr1 = rf(sb)\n\t} else {\n\t\tr1 = ret.Get(1).(int)\n\t}\n\n\tvar r2 int\n\tif rf, ok := ret.Get(2).(func(*UserSearchBuilder) int); ok {\n\t\tr2 = rf(sb)\n\t} else {\n\t\tr2 = ret.Get(2).(int)\n\t}\n\n\tvar r3 error\n\tif rf, ok := ret.Get(3).(func(*UserSearchBuilder) error); ok {\n\t\tr3 = rf(sb)\n\t} else {\n\t\tr3 = ret.Error(3)\n\t}\n\n\treturn r0, r1, r2, r3\n}" ]
[ "0.6240677", "0.6059997", "0.58044446", "0.5616137", "0.55219644", "0.5459781", "0.5447272", "0.54235154", "0.54216474", "0.53972083", "0.53289837", "0.5325005", "0.5287012", "0.5278028", "0.5263327", "0.5252575", "0.52465487", "0.5238369", "0.51930016", "0.5192341", "0.5154519", "0.513359", "0.51296675", "0.5107689", "0.5096002", "0.5087762", "0.50735074", "0.50632894", "0.50629115", "0.5027658", "0.501559", "0.50024855", "0.49883682", "0.49868852", "0.49812624", "0.4975104", "0.4970255", "0.49250886", "0.49078578", "0.4901846", "0.48821637", "0.48774844", "0.48702922", "0.4865235", "0.48591778", "0.48448488", "0.48116377", "0.48068458", "0.47978237", "0.47876808", "0.47749725", "0.4767784", "0.4761333", "0.47534588", "0.47517857", "0.47482824", "0.47427857", "0.47407386", "0.47391474", "0.47353387", "0.47333768", "0.47293046", "0.47271574", "0.47187036", "0.47159433", "0.47082978", "0.46989354", "0.4696331", "0.46956393", "0.46946546", "0.4690891", "0.46895713", "0.46803954", "0.46795797", "0.46756312", "0.46738192", "0.46702015", "0.4667102", "0.46603665", "0.46531698", "0.4652639", "0.4650653", "0.46278897", "0.4625271", "0.46226886", "0.46215835", "0.4620741", "0.462032", "0.4614456", "0.46121037", "0.46098825", "0.4605945", "0.4605945", "0.46051294", "0.46035704", "0.45967886", "0.45962638", "0.45943686", "0.45909017", "0.45876536" ]
0.738788
0
Returns a string with a HMAC signature.
func SignString(value, key string) string { return fmt.Sprintf("%x----%s", hmac.New(sha1.New, []byte(key)).Sum([]byte(value)), value) }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func getSignature(signingKey []byte, stringToSign string) string {\n\treturn hex.EncodeToString(sumHMAC(signingKey, []byte(stringToSign)))\n}", "func (k Key) HMAC() []byte {\n\treturn k[len(k)/2:]\n}", "func HMAC(algo HashAlgo, s, key string) string {\n\tvar mac hash.Hash\n\n\tswitch algo {\n\tcase AlgoMD5:\n\t\tmac = hmac.New(md5.New, []byte(key))\n\tcase AlgoSha1:\n\t\tmac = hmac.New(sha1.New, []byte(key))\n\tcase AlgoSha224:\n\t\tmac = hmac.New(sha256.New224, []byte(key))\n\tcase AlgoSha256:\n\t\tmac = hmac.New(sha256.New, []byte(key))\n\tcase AlgoSha384:\n\t\tmac = hmac.New(sha512.New384, []byte(key))\n\tcase AlgoSha512:\n\t\tmac = hmac.New(sha512.New, []byte(key))\n\tdefault:\n\t\treturn s\n\t}\n\n\tmac.Write([]byte(s))\n\n\treturn hex.EncodeToString(mac.Sum(nil))\n}", "func getHS256Signature(encHeader string, encPayload string, pubKeyHexa string) string {\n\topenssl := exec.Command(\"openssl\", \"dgst\", \"-sha256\", \"-mac\", \"HMAC\", \"-macopt\", \"hexkey:\"+pubKeyHexa)\n\n\topenssl.Stdin = bytes.NewReader([]byte(encHeader + \".\" + encPayload))\n\n\tcmdOutput := &bytes.Buffer{}\n\topenssl.Stdout = cmdOutput\n\topenssl.Start()\n\topenssl.Wait()\n\thmac := string(cmdOutput.Bytes())\n\treturn hex.EncodeToString([]byte(hmac))\n}", "func getHMAC(sessionID string) string {\n\th := hmac.New(sha512.New, key)\n\th.Write([]byte(sessionID))\n\tfmt.Printf(\"%x\\n\", h.Sum(nil))\n\t// return signature and sessionID with separator\n\treturn fmt.Sprintf(\"%x\", h.Sum(nil)) + \"|\" + sessionID\n}", "func GetSignature(key string, method string, req map[string]string, fcResource string) string {\n\theader := &headers{}\n\tlowerKeyHeaders := map[string]string{}\n\tfor k, v := range req {\n\t\tlowerKey := strings.ToLower(k)\n\t\tif strings.HasPrefix(lowerKey, HTTPHeaderPrefix) {\n\t\t\theader.Keys = append(header.Keys, lowerKey)\n\t\t\theader.Vals = append(header.Vals, v)\n\t\t}\n\t\tlowerKeyHeaders[lowerKey] = v\n\t}\n\tsort.Sort(header)\n\n\tfcHeaders := \"\"\n\tfor i := range header.Keys {\n\t\tfcHeaders += header.Keys[i] + \":\" + header.Vals[i] + \"\\n\"\n\t}\n\n\tdate := req[HTTPHeaderDate]\n\tif expires, ok := getExpiresFromURLQueries(fcResource); ok {\n\t\tdate = expires\n\t}\n\n\tsignStr := method + \"\\n\" + lowerKeyHeaders[strings.ToLower(HTTPHeaderContentMD5)] + \"\\n\" + lowerKeyHeaders[strings.ToLower(HTTPHeaderContentType)] + \"\\n\" + date + \"\\n\" + fcHeaders + fcResource\n\n\th := hmac.New(func() hash.Hash { return sha256.New() }, []byte(key))\n\tio.WriteString(h, signStr)\n\tsignedStr := base64.StdEncoding.EncodeToString(h.Sum(nil))\n\n\treturn signedStr\n}", "func HMAC(in string, key []byte) (string, error) {\n\th := hmac.New(sha1.New, key)\n\tn, err := h.Write([]byte(in))\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tif got, want := n, len(in); got < want {\n\t\treturn \"\", fmt.Errorf(\"only hashed %d of %d bytes\", got, want)\n\t}\n\tdig := h.Sum(nil)\n\treturn fmt.Sprintf(\"%x\", dig), nil\n}", "func createSignature(c *Credentials, formattedShortTime, stringToSign string) string {\n\th1 := makeHmac([]byte(\"AWS4\"+c.SecretAccessKey), []byte(formattedShortTime))\n\th2 := makeHmac(h1, []byte(c.Region))\n\th3 := makeHmac(h2, []byte(\"s3\"))\n\th4 := makeHmac(h3, []byte(\"aws4_request\"))\n\tsignature := makeHmac(h4, []byte(stringToSign))\n\treturn hex.EncodeToString(signature)\n}", "func (c *Client) GetSignature(req *http.Request) string {\n\t// Sort fcHeaders.\n\theaders := &fcHeaders{}\n\tfor k := range req.Header {\n\t\tif strings.HasPrefix(strings.ToLower(k), \"x-fc-\") {\n\t\t\theaders.Keys = append(headers.Keys, strings.ToLower(k))\n\t\t\theaders.Values = append(headers.Values, req.Header.Get(k))\n\t\t}\n\t}\n\tsort.Sort(headers)\n\tfcHeaders := \"\"\n\tfor i := range headers.Keys {\n\t\tfcHeaders += headers.Keys[i] + \":\" + headers.Values[i] + \"\\n\"\n\t}\n\n\thttpMethod := req.Method\n\tcontentMd5 := req.Header.Get(\"Content-MD5\")\n\tcontentType := req.Header.Get(\"Content-Type\")\n\tdate := req.Header.Get(\"Date\")\n\tfcResource := req.URL.Path\n\n\tsignStr := httpMethod + \"\\n\" + contentMd5 + \"\\n\" + contentType + \"\\n\" + date + \"\\n\" + fcHeaders + fcResource\n\n\th := hmac.New(func() hash.Hash { return sha256.New() }, []byte(c.accessKeySecret))\n\t_, _ = io.WriteString(h, signStr)\n\treturn base64.StdEncoding.EncodeToString(h.Sum(nil))\n}", "func (s Sign) getSignature(signingKey []byte, stringToSign string) string {\n\treturn hex.EncodeToString(sumHMAC(signingKey, []byte(stringToSign)))\n}", "func createHMACKey() string {\n\tkey := make([]byte, 49)\n\trand.Reader.Read(key)\n\tvar cooked = base64.StdEncoding.EncodeToString(key)\n\treturn cooked\n}", "func (r *RPCRequest) GenerateSig(key, secret string) error {\n\tif len(key) == 0 || len(secret) == 0 {\n\t\treturn errors.New(\"You must supply an access key and an access secret\")\n\t}\n\tnonce := time.Now().UnixNano() / int64(time.Millisecond)\n\tsigString := fmt.Sprintf(\"_=%d&_ackey=%s&_acsec=%s&_action=%s\", nonce, key, secret, r.Action)\n\n\t// Append args if present\n\tif len(r.Arguments) != 0 {\n\t\tvar argsString string\n\n\t\t// We have to do this to sort by keys\n\t\tkeys := make([]string, 0, len(r.Arguments))\n\t\tfor key := range r.Arguments {\n\t\t\tkeys = append(keys, key)\n\t\t}\n\t\tsort.Strings(keys)\n\n\t\tfor _, k := range keys {\n\t\t\tv := r.Arguments[k]\n\t\t\tvar s string\n\n\t\t\tswitch t := v.(type) {\n\t\t\tcase []SubscriptionEvent:\n\t\t\t\tvar str = make([]string, len(t))\n\t\t\t\tfor _, j := range t {\n\t\t\t\t\tstr = append(str, string(j))\n\t\t\t\t}\n\t\t\t\ts = strings.Join(str, \"\")\n\t\t\tcase []string:\n\t\t\t\ts = strings.Join(t, \"\")\n\t\t\tcase bool:\n\t\t\t\ts = strconv.FormatBool(t)\n\t\t\tcase int:\n\t\t\t\ts = strconv.FormatInt(int64(t), 10)\n\t\t\tcase int64:\n\t\t\t\ts = strconv.FormatInt(t, 10)\n\t\t\tcase float64:\n\t\t\t\ts = strconv.FormatFloat(t, 'f', -1, 64)\n\t\t\tcase string:\n\t\t\t\ts = t\n\t\t\tdefault:\n\t\t\t\t// Absolutely panic here\n\t\t\t\tpanic(fmt.Sprintf(\"Cannot generate sig string: Unable to handle arg of type %T\", t))\n\t\t\t}\n\t\t\targsString += fmt.Sprintf(\"&%s=%s\", k, s)\n\t\t}\n\t\tsigString += argsString\n\t}\n\thasher := sha256.New()\n\thasher.Write([]byte(sigString))\n\tsigHash := base64.StdEncoding.EncodeToString(hasher.Sum(nil))\n\tr.Sig = fmt.Sprintf(\"%s.%d.%s\", key, nonce, sigHash)\n\treturn nil\n}", "func createHMAC(sharedKey,\n\tsecretKey,\n\tdate,\n\thttpMethod,\n\trequestURL,\n\tcontentType,\n\tcontentMD5,\n\tnepApplicationKey,\n\tnepCorrelationID,\n\tnepOrganization,\n\tnepServiceVersion string) (string, error) {\n\tparsedDate, err := http.ParseTime(date)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tdate = parsedDate.Format(dateTimeFormat)\n\toneTimeSecret := secretKey + date\n\tu, err := url.Parse(requestURL)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\ttoSign := httpMethod + \"\\n\" + u.RequestURI()\n\tif contentType != \"\" {\n\t\ttoSign += \"\\n\" + contentType\n\t}\n\tif contentMD5 != \"\" {\n\t\ttoSign += \"\\n\" + contentMD5\n\t}\n\tif nepApplicationKey != \"\" {\n\t\ttoSign += \"\\n\" + nepApplicationKey\n\t}\n\tif nepCorrelationID != \"\" {\n\t\ttoSign += \"\\n\" + nepCorrelationID\n\t}\n\tif nepOrganization != \"\" {\n\t\ttoSign += \"\\n\" + nepOrganization\n\t}\n\tif nepServiceVersion != \"\" {\n\t\ttoSign += \"\\n\" + nepServiceVersion\n\t}\n\n\tkey := hmac.New(sha512.New, []byte(oneTimeSecret))\n\tkey.Write([]byte(toSign))\n\ttoken := \"AccessKey \" + sharedKey + \":\" + base64.StdEncoding.EncodeToString(key.Sum(nil))\n\treturn token, nil\n}", "func signature(req *http.Request, awsSecretAccessKey string) string {\n\treturn signWithKey(stringToSign(req), awsSecretAccessKey)\n}", "func Signature(method string) []byte {\n\t// hash method\n\thasher := sha3.NewLegacyKeccak256()\n\thasher.Write([]byte(method))\n\tb := hasher.Sum(nil)\n\treturn b[:4]\n}", "func generateSignature(postData, secret []byte) string {\n\tmac := hmac.New(md5.New, secret)\n\tmac.Write(postData)\n\tsignature := fmt.Sprintf(\"%x\", mac.Sum(nil))\n\n\treturn signature\n}", "func sign(credentials Credentials, req Request, option *SignOption) string {\n\tsigningKey := getSigningKey(credentials, option)\n\treq.prepareHeaders(option)\n\tcanonicalRequest := req.canonical(option)\n\tsignature := util.HmacSha256Hex(signingKey, canonicalRequest)\n\n\treturn signature\n}", "func GenerateSignature( resourceURL, method, consumerKey, consumerSecret string, params map[string]string ) ( signature string ){\n\t\n\tEscapeParamValues( params )\n\n\tparams[ \"oauth_nonce\" ] = GenerateNonce( 10 )\n\tparams[ \"oauth_timestamp\" ] = GetTimestamp()\n\tparams[ \"oauth_signature_method\" ] = \"HMAC-SHA1\"\n\tparams[ \"oauth_consumer_key\" ] = consumerKey\n\n\tbaseString := method\n\tbaseString += \"&\"\n\tbaseString += UrlEncode( resourceURL )\n\tbaseString += \"&\"\n\tbaseString += UrlEncode( GetOrderedParamString( params ) )\n\t\n\tsignature = HashString( []byte( UrlEncode( consumerSecret ) + \"&\" ), baseString )\n\t// log.Println( \"Signature: \", signature )\n\treturn\n}", "func GenerateUserEmailHMAC(userEmail string) string { return api.GenerateUserEmailHMAC(userEmail) }", "func Sign(key string, qs string) string {\n\tmac := hmac.New(sha1.New, []byte(key))\n\tmac.Write([]byte(qs))\n\n\tbyteArray := mac.Sum(nil)\n\n\treturn hex.EncodeToString(byteArray)\n}", "func createHMAC(message, key []byte) []byte {\n\n\thash := hmac.New(sha256.New, key)\n\n\thash.Write(message)\n\treturn hash.Sum(nil)\n}", "func (signature Signature) String() string {\n\treturn base58.Encode(signature[:])\n}", "func signStringToSign(stringToSign string, signingKey []byte) (string, error) {\n\thm, err := hmacsha256(signingKey, stringToSign)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\treturn hex.EncodeToString(hm), nil\n}", "func HMAC(key, message []byte) []byte {\n\tmac := hmac.New(sha256.New, key)\n\n\tif _, err := mac.Write(message); err != nil {\n\t\t// A write to sha256 hasher can never fail.\n\t\tpanic(err)\n\t}\n\n\treturn mac.Sum(nil)\n}", "func ComputeSignature(authToken, uri string, params map[string]string) string {\n\toriginalString := fmt.Sprintf(\"%s%s\", uri, headersWithSep(params, \"\", \"\", false))\n\tmac := hmac.New(sha1.New, []byte(authToken))\n\tmac.Write([]byte(originalString))\n\treturn base64.StdEncoding.EncodeToString(mac.Sum(nil))\n}", "func (s Signature) String() string {\n\treturn fmt.Sprintf(\"Signature(hash=%v): %v\", s.Hash, Bytes(s.Data))\n}", "func (t *Transaction) Signature() string {\n\treturn utils.EncodeToBase64(t.signature)\n}", "func (a *Authorization) Signature() ([]byte, error) {\n\treturn a.signature, nil\n}", "func CreateSignature(stringToSignature, accessKeySecret string) string {\n\t// Crypto by HMAC-SHA1\n\thmacSha1 := hmac.New(sha1.New, []byte(accessKeySecret))\n\thmacSha1.Write([]byte(stringToSignature))\n\tsign := hmacSha1.Sum(nil)\n\n\t// Encode to Base64\n\tbase64Sign := base64.StdEncoding.EncodeToString(sign)\n\n\treturn base64Sign\n}", "func createSignature(publicKey string, privateKey string) string {\n\t// Build payload\n\tpayload := fmt.Sprintf(\"%d.%s\", time.Now().Unix(), publicKey)\n\n\t// Generate the HMAC-sha256 signature\n\t// As per the docs, do not decode the key base64, but do encode the output\n\tmac := hmac.New(sha256.New, []byte(privateKey))\n\tmac.Write([]byte(payload))\n\tsignature := hex.EncodeToString(mac.Sum(nil))\n\n\t// Return the final payload\n\treturn fmt.Sprintf(\"%s.%s\", payload, signature)\n}", "func Sign(payload string, secretKey string) string {\n\tmac := hmac.New(sha256.New, []byte(secretKey))\n\tmac.Write([]byte(payload))\n\treturn hex.EncodeToString(mac.Sum(nil))\n}", "func GenerateSignature(url, body, key string) string {\n\tmac := hmac.New(sha1.New, []byte(key))\n\tmac.Write([]byte(url + body))\n\treturn base64.StdEncoding.EncodeToString(mac.Sum(nil))\n}", "func SignatureHash(signature string) string {\n\treturn fmt.Sprintf(\"%x\", sha512.Sum384([]byte(signature)))\n}", "func GetSignature(signedParams map[string]*string, secret *string) (_result *string) {\n\tstringToSign := buildStringToSign(signedParams)\n\tsignature := sign(stringToSign, tea.StringValue(secret))\n\treturn tea.String(signature)\n}", "func (sig *Signature) String() string {\n\tif sig == nil || len(sig.bytes) == 0 {\n\t\treturn \"[empty]\"\n\t}\n\tif sig.hasV {\n\t\treturn \"0x\" + hex.EncodeToString(sig.bytes)\n\t}\n\treturn \"0x\" + hex.EncodeToString(sig.bytes[:SignatureLenRaw]) + \"[no V]\"\n}", "func (m EncMessage) Signature() []byte {\n\treturn m.Sig\n}", "func HmacSha256Signature( params string, key string ) (string, error) {\n\t// formalize the param then output to stdin of openssl command\n\tformalizeParamsCmd := exec.Command(\"echo\", \"-n\", strings.TrimSpace(params))\n\t// openssl command: sha256 message digest algorithm, create hashed MAC with key\n\tsignatureMsgCmd := exec.Command(\"openssl\", \"dgst\", \"-sha256\", \"-hmac\", key)\n\n\t// create new pipe.\n\tpipeReader, pipeWriter := io.Pipe()\n\n\t// wire up the pipe b/w 2 commands as below:\n\n\t// assign stdout of first cmd to pipe Writer\n\tformalizeParamsCmd.Stdout = pipeWriter\n\n\t// assign stdin of second cmd to pipe reader.\n\tsignatureMsgCmd.Stdin = pipeReader\n\n\t// assign the os stdout to the second cmd.\n\tsignatureMsgCmd.Stdout = os.Stdout\n\n\t// run the first cmd.\n\terr := formalizeParamsCmd.Start()\n\tif err != nil {\n\t\tlogrus.Errorf(\"Failed to execute the echo command: param = %s, err = %s\", params, err)\n\t\treturn \"\", err\n\t}\n\n\t// run the second cmd.\n\tvar b bytes.Buffer\n\tsignatureMsgCmd.Stdout = &b\n\terr = signatureMsgCmd.Start()\n\tif err != nil {\n\t\tlogrus.Errorf(\"Failed to execute the openssl dgst command: param = %s, err = %s\", params, err)\n\t\treturn \"\", err\n\t}\n\n\t// make a new go routine to wait for the first command finished.\n\tgo func() {\n\t\t// defer util the go routine done.\n\t\tdefer pipeWriter.Close()\n\t\t// wait util finished.\n\t\terr = formalizeParamsCmd.Wait()\n\t\tif err != nil {\n\t\t\tlogrus.Errorf(\"Failed to run the echo finished with err %s %s\", formalizeParamsCmd.Stdout, err)\n\t\t}\n\t\t// done now can close the pipeWriter.\n\t}()\n\t// wait util the second done.\n\terr = signatureMsgCmd.Wait()\n\tif err != nil {\n\t\tlogrus.Errorf(\"Failed to run the openssl finished with %s err %s\", signatureMsgCmd.Stdout, err)\n\t\treturn \"\", err\n\t}\n\t// return the result from stdout.\n\treturn strings.Trim(b.String(), \"\\n\"), nil\n}", "func getHmacCode(s string) string {\n\th := hmac.New(sha256.New, []byte(Cfg.DingTalkSecret))\n\th.Write([]byte(s))\n\treturn base64.StdEncoding.EncodeToString(h.Sum(nil))\n}", "func (sig Signature) String() string {\n\tdata := \"notYetImplementedInStringMethod\"\n\tif sig.Algorithm == Ed25519 {\n\t\tif sig.Data == nil {\n\t\t\tdata = \"nil\"\n\t\t} else {\n\t\t\tdata = hex.EncodeToString(sig.Data.([]byte))\n\t\t}\n\t}\n\treturn fmt.Sprintf(\"{KS=%d AT=%d VS=%d VU=%d KP=%d data=%s}\",\n\t\tsig.KeySpace, sig.Algorithm, sig.ValidSince, sig.ValidUntil, sig.KeyPhase, data)\n}", "func (is *Signer) BuildSignature(request *http.Request) (string, error) {\n\tstringToSign, err := is.BuildStringToSign(request)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\th := hmac.New(sha256.New, []byte(is.SecretAccessKey))\n\th.Write([]byte(stringToSign))\n\n\tsignature := strings.TrimSpace(base64.StdEncoding.EncodeToString(h.Sum(nil)))\n\tsignature = strings.Replace(signature, \" \", \"+\", -1)\n\tsignature = url.QueryEscape(signature)\n\n\tlogger.Debug(fmt.Sprintf(\n\t\t\"QingCloud signature: [%d] %s\",\n\t\tutils.StringToUnixInt(request.Header.Get(\"Date\"), \"RFC 822\"),\n\t\tsignature))\n\tif request.Method == \"GET\" {\n\t\tis.BuiltURL += \"&signature=\" + signature\n\t} else if request.Method == \"POST\" {\n\t\tis.BuiltForm += \"&signature=\" + signature\n\t}\n\n\treturn signature, nil\n}", "func (pv *ParamsVerification) Sign(p []byte) string {\n\t// Generate hash code\n\tmac := hmac.New(sha256.New, []byte(pv.ClientSecret))\n\t_, _ = mac.Write(p)\n\texpectedMAC := mac.Sum(nil)\n\n\t// Generate base64\n\tbase64Sign := base64.StdEncoding.EncodeToString(expectedMAC)\n\tbase64Sign = strings.ReplaceAll(base64Sign, \"+\", \"-\")\n\tbase64Sign = strings.ReplaceAll(base64Sign, \"/\", \"_\")\n\tbase64Sign = strings.TrimRight(base64Sign, \"=\")\n\n\treturn base64Sign\n}", "func generateSignatureFromKey(requestBody []byte, privateKey *ecdsa.PrivateKey) (string, error) {\n\t// Follows the Sila example for Golang\n\t// Generate the message hash using the Keccak 256 algorithm.\n\tmsgHash := crypto.Keccak256(requestBody)\n\n\t// Create a signature using your private key and hashed message.\n\tsigBytes, err := crypto.Sign(msgHash, privateKey)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\t// The signature just created is off by -27 from what the API\n\t// will expect. Correct that by converting the signature bytes\n\t// to a big int and adding 27.\n\tvar offset int64 = 27\n\tvar bigSig = new(big.Int).SetBytes(sigBytes)\n\tsigBytes = bigSig.Add(bigSig, big.NewInt(offset)).Bytes()\n\n\t// The big library takes out any padding, but the resultant\n\t// signature must be 130 characters (65 bytes) long. In some\n\t// cases, you might find that sigBytes now has a length of 64 or\n\t// less, so you can fix that in this way (this prepends the hex\n\t// value with \"0\" until the requisite length is reached).\n\t// Example: if two digits were required but the value was 1, you'd\n\t// pass in 01.\n\tvar sigBytesLength = 65 // length of a valid signature byte array\n\tvar arr = make([]byte, sigBytesLength)\n\tcopy(arr[(sigBytesLength-len(sigBytes)):], sigBytes)\n\n\t// Encode the bytes to a hex string.\n\treturn hex.EncodeToString(arr), nil\n}", "func (a Asset) signature() string {\n\tif !a.Config.URL.SignURL {\n\t\treturn \"\"\n\t}\n\n\talgo, length := a.getSignatureAlgorithmAndLength()\n\n\ttoSign := joinUrl([]interface{}{a.Transformation, a.PublicID})\n\n\treturn signature.SignURL(toSign, a.Config.Cloud.APISecret, algo, length)\n}", "func _hmac(key []byte, a ...[]byte) []byte {\n\ts := sha256.New\n\th := hmac.New(s, key)\n\n\tfor _, w := range a {\n\t\th.Write(w)\n\t}\n\n\treturn h.Sum(nil)\n}", "func signSubscription(decoded SubscriptionConfirmation, fullURL string) []byte {\n\t// get timestamp+token hash first\n\tfirstMAC := hmac.New(sha256.New, []byte(decoded.Token))\n\tfirstMAC.Write([]byte(decoded.Timestamp))\n\tfirstHash := firstMAC.Sum(nil)\n\t// then combine that with TopicArn\n\tsecondMAC := hmac.New(sha256.New, firstHash)\n\tsecondMAC.Write([]byte(decoded.TopicArn))\n\tsecondHash := secondMAC.Sum(nil)\n\t// then combine that with full URL\n\tthirdMAC := hmac.New(sha256.New, secondHash)\n\tthirdMAC.Write([]byte(fullURL))\n\tthirdHash := thirdMAC.Sum(nil)\n\treturn thirdHash\n}", "func BuildRequestSignature(requestMethod, requestPath string, requestBody []byte) (string, string) {\n\tsecret, err := base64.StdEncoding.DecodeString(secrets.CoinbaseAPISecret())\n\tif err != nil {\n\t\tpanic(errors.Wrap(err, \"base64 decoding error\"))\n\t}\n\n\ttimestamp := time.Now().Unix()\n\ttimestampStr := fmt.Sprintf(\"%d\", timestamp)\n\n\tprehashStr := timestampStr + strings.ToUpper(requestMethod) + requestPath\n\tif requestBody != nil {\n\t\tprehashStr += string(requestBody)\n\t}\n\n\tprehashBytes := []byte(prehashStr)\n\n\tmac := hmac.New(sha256.New, secret)\n\t_, err = mac.Write(prehashBytes)\n\tif err != nil {\n\t\tpanic(errors.Wrap(err, \"error attempting to write prehash bytes to hmac\"))\n\t}\n\n\tsigBytes := mac.Sum(nil)\n\n\tsigEncoded := base64.StdEncoding.EncodeToString(sigBytes)\n\n\treturn sigEncoded, timestampStr\n}", "func Sign(params Params, key string) string {\n\tsort.Sort(params)\n\tpreSignWithKey := params.ToQueryString() + \"&key=\" + key\n\treturn fmt.Sprintf(\"%X\", md5.Sum([]byte(preSignWithKey)))\n}", "func Sign(params Params, key string) string {\n\tsort.Sort(params)\n\tpreSignWithKey := params.ToQueryString() + \"&key=\" + key\n\treturn fmt.Sprintf(\"%X\", md5.Sum([]byte(preSignWithKey)))\n}", "func (client *BaseClient) GetSignature(request *tea.Request, secret string) string {\n\tstringToSign := buildRpcStringToSign(request)\n\tsignature := client.Sign(stringToSign, secret, \"&\")\n\treturn signature\n}", "func getTokenSignature(header *Header, body *Body, secret *pbauth.Secret) (string, error) {\n\tif err := ValidateHeader(header); err != nil {\n\t\treturn \"\", err\n\t}\n\tif err := ValidateBody(body); err != nil {\n\t\treturn \"\", err\n\t}\n\tif err := ValidateSecret(secret); err != nil {\n\t\treturn \"\", err\n\t}\n\tif body.Permission == Admin && header.Alg != Hs512 {\n\t\treturn \"\", consts.ErrInvalidPermission\n\t}\n\tif header.TokenTyp != Jwt && header.TokenTyp != Jet {\n\t\treturn \"\", consts.ErrUnknownTokenType\n\t}\n\t// Token Signature = <encoded header>.<encoded body>.<hashed(<encoded header>.<encoded body>)>\n\t// 1. Encode the header\n\tencodedHeader, err := base64Encode(header)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\t// 2. Encode the body\n\tencodedBody, err := base64Encode(body)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\t// 3. Build <encoded header>.<encoded body>\n\t// 4. Build <hashed(<encoded header>.<encoded body>)>\n\t// 5. Build Token Signature = <encoded header>.<encoded body>.<hashed(<encoded header>.<encoded body>)>\n\treturn buildTokenSignature(encodedHeader, encodedBody, header.Alg, secret)\n}", "func SaltedHMAC(key, value, secret string) string {\n\thsh := hmac.New(sha1.New, []byte(key + secret))\n\thsh.Write([]byte(value))\n\treturn hex.EncodeToString(hsh.Sum(nil))\n}", "func (o *OAuth1) createSignature(urlParams, bodyParams, authParams map[string]string, url, method string, token *Token) string {\n\n\tsignatureBaseString := o.makeSignatureBaseString(urlParams, bodyParams, authParams, url, method)\n\tsigningKey := o.makeSigningKey(token)\n\n\thmacSha1 := hmac.New(sha1.New, []byte(signingKey))\n\tio.WriteString(hmacSha1, signatureBaseString)\n\n\treturn base64.StdEncoding.EncodeToString(hmacSha1.Sum(nil))\n}", "func signMac(w io.Writer, name string, data string) error {\n\t// name := \"projects/my-project/locations/us-east1/keyRings/my-key-ring/cryptoKeys/my-key/cryptoKeyVersions/123\"\n\t// data := \"my data to sign\"\n\n\t// Create the client.\n\tctx := context.Background()\n\tclient, err := kms.NewKeyManagementClient(ctx)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"failed to create kms client: %w\", err)\n\t}\n\tdefer client.Close()\n\n\t// Build the request.\n\treq := &kmspb.MacSignRequest{\n\t\tName: name,\n\t\tData: []byte(data),\n\t}\n\n\t// Generate HMAC of data.\n\tresult, err := client.MacSign(ctx, req)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"failed to hmac sign: %w\", err)\n\t}\n\n\t// The data comes back as raw bytes, which may include non-printable\n\t// characters. This base64-encodes the result so it can be printed below.\n\tencodedSignature := base64.StdEncoding.EncodeToString(result.Mac)\n\n\tfmt.Fprintf(w, \"Signature: %s\", encodedSignature)\n\treturn nil\n}", "func buildTokenSignature(encodedHeader string, encodedBody string, alg Algorithm, secret *pbauth.Secret) (string, error) {\n\tif strings.TrimSpace(encodedHeader) == \"\" {\n\t\treturn \"\", consts.ErrInvalidEncodedHeader\n\t}\n\tif strings.TrimSpace(encodedBody) == \"\" {\n\t\treturn \"\", consts.ErrInvalidEncodedBody\n\t}\n\tif err := ValidateSecret(secret); err != nil {\n\t\treturn \"\", err\n\t}\n\t// 3. Build <encoded header>.<encoded body>\n\tvar bufferHeaderBody bytes.Buffer\n\tbufferHeaderBody.WriteString(encodedHeader)\n\tbufferHeaderBody.WriteString(\".\")\n\tbufferHeaderBody.WriteString(encodedBody)\n\tencodedHeaderBody := bufferHeaderBody.String()\n\t// 4. Build <hashed(<encoded header>.<encoded body>)>\n\tencodedSignature, err := hashSignature(alg, encodedHeaderBody, secret)\n\tif err != nil {\n\t\treturn \"\", nil\n\t}\n\t// 5. Build Token Signature = <encoded header>.<encoded body>.<hashed(<encoded header>.<encoded body>)>\n\tvar bufferTokenSignature bytes.Buffer\n\tbufferTokenSignature.WriteString(encodedHeaderBody)\n\tbufferTokenSignature.WriteString(\".\")\n\tbufferTokenSignature.WriteString(encodedSignature)\n\treturn bufferTokenSignature.String(), nil\n}", "func (s CodeSigningSignature) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (v vector) signature() string {\n\tbuf := &bytes.Buffer{}\n\tfor _, v := range v {\n\t\tbinary.Write(buf, binary.LittleEndian, v)\n\t}\n\n\treturn base64.URLEncoding.EncodeToString(buf.Bytes())\n}", "func (cs CommitSig) String() string {\n\treturn fmt.Sprintf(\"CommitSig{%X by %X on %v @ %s}\",\n\t\ttmbytes.Fingerprint(cs.Signature),\n\t\ttmbytes.Fingerprint(cs.ValidatorAddress),\n\t\tcs.BlockIDFlag,\n\t\tCanonicalTime(cs.Timestamp))\n}", "func TestHMAC(t *testing.T) {\n\t// Generate 1MB of random bytes\n\trandomBytes := make([]byte, 1<<20)\n\tif _, err := rand.Read(randomBytes); err != nil {\n\t\tt.Fatalf(\"%v reading random bytes\", err)\n\t}\n\tinput := bytes.NewReader(randomBytes)\n\n\t// Compute the source data's key\n\tkey, err := ComputeKey(input, \"\")\n\tif err != nil {\n\t\tt.Fatalf(\"%v computing key\", err)\n\t}\n\n\t// Create a writer to encrypt the data with key\n\twriter, err := NewWriter(input, key)\n\tif err != nil {\n\t\tt.Fatalf(\"%v creating Writer\", err)\n\t}\n\n\t// Capture the encrypted output in a buffer.\n\tvar output bytes.Buffer\n\tmac, err := writer.Encrypt(&output)\n\tif err != nil {\n\t\tt.Fatalf(\"%v encrypting input\", err)\n\t}\n\n\t// Wrap the output bytes in a type that implements io.ReadSeeker\n\toutputReader := bytes.NewReader(output.Bytes())\n\n\t// Seek to and read the tail bytes of the output to get the embedded HMAC\n\tif _, err := outputReader.Seek(-int64(len(mac)), io.SeekEnd); err != nil {\n\t\tt.Fatalf(\"%v seeking to HMAC suffix\", err)\n\t}\n\n\toutputHMAC := make([]byte, len(mac))\n\tif l, err := outputReader.Read(outputHMAC); l != len(mac) || err != nil {\n\t\tt.Fatalf(\"%v reading HMAC suffix\", err)\n\t}\n\n\t// Fail if the returned and embedded signatures differ\n\tif !hmac.Equal(mac, outputHMAC) {\n\t\tt.Fatal(\"Returned hash differs from embedded hash\")\n\t}\n}", "func HMAC_SHA256(src, key string) string {\n\tm := hmac.New(sha256.New, []byte(key))\n\tm.Write([]byte(src))\n\treturn hex.EncodeToString(m.Sum(nil))\n}", "func (t *Token) Signature() []byte {\n\treturn t.signature\n}", "func (c *Client) Hmac(msg string) string {\n\tmac := hmac.New(sha512.New, []byte(c.apiKey))\n\tmac.Write([]byte(msg))\n\tout := mac.Sum(nil)\n\treturn hex.EncodeToString(out)\n}", "func OAuthSignature(ck, cs, method, reqURL string) (string, *url.Values) {\n\thash := \"HMAC-SHA256\"\n\n\tp := url.Values{} // URL params\n\tp.Add(\"ck\", ck)\n\tp.Add(\"nonce\", newNonce())\n\tp.Add(\"sig_method\", hash)\n\tp.Add(\"timestamp\", strconv.Itoa(int(time.Now().Unix())))\n\n\turl := strings.Join([]string{strings.ToUpper(method), url.QueryEscape(reqURL), url.QueryEscape(p.Encode())}, \"&\")\n\n\treturn newSignature(cs, url), &p\n}", "func (t *Twitter) generateSignatureBase(m *RestMethod) string {\n\tvar buffer bytes.Buffer\n\n\t// create OAuth params\n\tif m.Params == nil {\n\t\tm.Params = map[string]string{\n\t\t\t\"oauth_consumer_key\": t.ConsumerKey,\n\t\t\t\"oauth_nonce\": getNonce(),\n\t\t\t\"oauth_signature_method\": \"HMAC-SHA1\",\n\t\t\t\"oauth_timestamp\": fmt.Sprintf(\"%d\", time.Now().Unix()),\n\t\t\t\"oauth_token\": t.OAuthToken,\n\t\t\t\"oauth_version\": \"1.0\",\n\t\t}\n\t}\n\n\tsplitUrl := strings.Split(m.Url, \"?\")\n\turl := splitUrl[0]\n\n\tif len(splitUrl) == 2 {\n\t\t// parse parameters from query string\n\t\tqueryString := splitUrl[1]\n\t\tfor k, v := range mapFromQueryString(queryString) {\n\t\t\tm.Params[k] = v\n\t\t}\n\t}\n\n\t// write method and url to buffer\n\tbuffer.WriteString(m.Method + \"&\")\n\tbuffer.WriteString(encode(url) + \"&\")\n\n\t// sort map keys\n\tsortedKeys := sortMapKeys(m.Params)\n\n\t// write each parameter to buffer\n\tfor _, v := range sortedKeys {\n\t\tbuffer.WriteString(encode(fmt.Sprintf(\"%s=%s&\", v, m.Params[v])))\n\t}\n\n\tvar out string\n\tif m.Data != \"\" {\n\t\t// append Data to buffer\n\t\tbuffer.WriteString(encode(m.Data))\n\t\tout = buffer.String()\n\t} else {\n\t\t// remove trailing %26 (&)\n\t\tout = buffer.String()\n\t\tout = out[:len(out)-3]\n\t}\n\t// return signature base\n\treturn out\n}", "func SHA1HMAC(salt, message []byte) string {\n\t// GitHub creates a SHA1 HMAC, where the key is the GitHub secret and the\n\t// message is the JSON body.\n\tdigest := hmac.New(sha1.New, salt)\n\tdigest.Write(message)\n\tsum := digest.Sum(nil)\n\treturn fmt.Sprintf(\"sha1=%x\", sum)\n}", "func stringToSign(canonicalRequest, credentialScope string, t time.Time) string {\n\thash := sha256.New()\n\thash.Write([]byte(canonicalRequest))\n\tresult := bytes.Buffer{}\n\tresult.WriteString(Algorithm)\n\tresult.WriteString(LINE_SEPARATOR)\n\tresult.WriteString(t.UTC().Format(BasicDateFormat))\n\tresult.WriteString(LINE_SEPARATOR)\n\tresult.WriteString(credentialScope)\n\tresult.WriteString(LINE_SEPARATOR)\n\tresult.WriteString(hex.EncodeToString(hash.Sum(nil)))\n\treturn result.String()\n}", "func GetAuthStr(accessKeyID string, accessKeySecret string, method string, header map[string]string, resource string) string {\n\treturn \"FC \" + accessKeyID + \":\" + GetSignature(accessKeySecret, method, header, resource)\n}", "func (s *HmacSigner) Sign(payload []byte) string {\n\tmac := hmac.New(s.h, s.key)\n\tmac.Write(payload)\n\treturn s.e(mac.Sum(nil))\n}", "func Sign(plainText string, secretKey string) (sign string) {\n\thmacObj := hmac.New(sha1.New, []byte(secretKey))\n\thmacObj.Write([]byte(plainText))\n\tsignObj := string(hmacObj.Sum(nil)) + plainText\n\tsign = base64.StdEncoding.EncodeToString([]byte(signObj))\n\treturn\n}", "func GenerateSign(requestData []byte, requestTime int64, secret string) (string, error) {\n\tvar rdata map[string]interface{}\n\terr := json.Unmarshal(requestData, &rdata)\n\tif err != nil {\n\t\treturn constant.EmptyStr, err\n\t}\n\n\tstr := serialize(rdata)\n\tserial := ext.StringSplice(secret, str.(string), secret, strconv.FormatInt(int64(requestTime), 10))\n\turlencodeSerial := url.QueryEscape(serial)\n\turlencodeBase64Serial := base64.StdEncoding.EncodeToString([]byte(urlencodeSerial))\n\tsign, err := crypto.Sha1(urlencodeBase64Serial)\n\tif err != nil {\n\t\treturn constant.EmptyStr, err\n\t}\n\tsign, err = crypto.MD5(sign)\n\tif err != nil {\n\t\treturn constant.EmptyStr, err\n\t}\n\n\treturn strings.ToUpper(sign), nil\n}", "func GetSign(srcdata interface{}, bizkey string) string {\n\tmd5ctx := md5.New()\n\n\tswitch v := reflect.ValueOf(srcdata); v.Kind() {\n\tcase reflect.String:\n\t\tmd5ctx.Write([]byte(v.String() + bizkey))\n\t\treturn hex.EncodeToString(md5ctx.Sum(nil))\n\tcase reflect.Map:\n\t\torderStr := orderParam(v.Interface(), bizkey)\n\t\tmd5ctx.Write([]byte(orderStr))\n\t\treturn hex.EncodeToString(md5ctx.Sum(nil))\n\tcase reflect.Struct:\n\t\torderStr := Struct2map(v.Interface(), bizkey)\n\t\tmd5ctx.Write([]byte(orderStr))\n\t\treturn hex.EncodeToString(md5ctx.Sum(nil))\n\tdefault:\n\t\treturn \"\"\n\t}\n}", "func Sign(message string) (string, error) {\n\n\t// TODO check length on string\n\t// Sign\n\tvar h hash.Hash\n\th = sha256.New()\n\n\tio.WriteString(h, message)\n\tsignhash := h.Sum(nil)\n\n\trsaKey, err := loadPrivateKeyFromFile()\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\trsaSignature, err := rsa.SignPKCS1v15(rand.Reader, rsaKey, crypto.SHA256, signhash)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t\treturn \"\", err\n\t}\n\n\tsEnc := base64.StdEncoding.EncodeToString(rsaSignature)\n\treturn sEnc, nil\n}", "func (m *SigningMethodHMAC) Sign(signingString string, key interface{}) ([]byte, error) {\n\tif keyBytes, ok := key.([]byte); ok {\n\t\tif !m.Hash.Available() {\n\t\t\treturn nil, ErrHashUnavailable\n\t\t}\n\n\t\thasher := hmac.New(m.Hash.New, keyBytes)\n\t\thasher.Write([]byte(signingString))\n\n\t\treturn hasher.Sum(nil), nil\n\t}\n\n\treturn nil, ErrInvalidKeyType\n}", "func parseHMAC(ss string) (string, error) {\n\txS := strings.SplitN(ss, \"|\", 2)\n\tif len(xS) < 2 {\n\t\terr := errors.New(\"Error in parseHMAC while splitting\")\n\t\treturn \"\", err\n\t}\n\tsignature := xS[0]\n\tsID := xS[1]\n\t// check if this is a sessionID\n\tif _, ok := sessions[sID]; !ok {\n\t\terr := errors.New(\"Error in parseHMAC while verifying session\")\n\t\treturn \"\", err\n\t}\n\t// Create HMAC from sessionID to verify against\n\th := hmac.New(sha512.New, key) // create hasher\n\th.Write([]byte(sID)) // sign the sessionid\n\tnewSig := h.Sum(nil) // store the hash as byte slice\n\t// decode signature from hex - it was stores as a hex string - to byte slice\n\toldSig, err := hex.DecodeString(signature)\n\tif err != nil {\n\t\terr = errors.New(\"Error in parseHMAC while decoding\")\n\t\treturn \"\", err\n\t}\n\t// Compare the new signature to the old one\n\tif !hmac.Equal(oldSig, newSig) {\n\t\tfmt.Printf(\"passed in signature: %v\\n newly generated signature: %v\\n\", oldSig, newSig)\n\t\tfmt.Println()\n\t\terr := errors.New(\"Error in parseHMAC while comparing\")\n\t\treturn \"\", err\n\t}\n\treturn sID, nil\n}", "func (s Sign) getStringToSign(canonicalRequest string, t time.Time) string {\n\tstringToSign := signV4Algorithm + \"\\n\" + t.Format(iso8601Format) + \"\\n\"\n\tstringToSign = stringToSign + s.getScope(t) + \"\\n\"\n\tcanonicalRequestBytes := sha256.Sum256([]byte(canonicalRequest))\n\tstringToSign = stringToSign + hex.EncodeToString(canonicalRequestBytes[:])\n\treturn stringToSign\n}", "func (k *key) getArtSignature() string {\n\tusr, err := user.Current()\n\tif err != nil {\n\t\treturn \"--- path err ---\"\n\t}\n\n\tvar pyPath string\n\n\tif runtime.GOOS == \"darwin\" {\n\t\tpyPath = fmt.Sprintf(\"%s/.pyenv/shims/python\", usr.HomeDir)\n\t} else if runtime.GOOS == \"linux\" {\n\t\tpyPath = \"/usr/bin/python\"\n\t}\n\n\tcmd := exec.Command(\n\t\tpyPath,\n\t\t\"tmp/drunken_bishop.py\",\n\t\t\"--mode\",\n\t\t\"sha256\",\n\t\tk.FingerprintSHA,\n\t)\n\n\tvar stdout, stderr bytes.Buffer\n\tcmd.Stdout = &stdout\n\tcmd.Stderr = &stderr\n\n\tif err := cmd.Run(); err != nil {\n\t\treturn \"--- run err ---\"\n\t}\n\n\toutStr, outErr := string(stdout.Bytes()), string(stderr.Bytes())\n\tif outErr != \"\" {\n\t\treturn fmt.Sprintf(\"--- %s ---\", outErr)\n\t}\n\n\treturn outStr\n}", "func PayloadSignature(payload []byte, key []byte) string {\n\tmac := hmac.New(sha1.New, key)\n\tmac.Write(payload)\n\tsum := mac.Sum(nil)\n\treturn \"sha1=\" + hex.EncodeToString(sum)\n}", "func SignatureV2(req *http.Request, Auth interface{}) (err error) {\n\tauth, _ := Auth.(map[string]string)\n\tqueryVals := req.URL.Query()\n\tqueryVals.Set(\"AWSAccessKeyId\", auth[\"AccessKey\"])\n\tqueryVals.Set(\"SignatureVersion\", \"2\")\n\tqueryVals.Set(\"SignatureMethod\", \"HmacSHA256\")\n\n\tqueryStr, err := canonicalQueryString(queryVals)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tpath := req.URL.Path\n\tif path == \"\" {\n\t\tpath = \"/\"\n\t}\n\n\tpayload := new(bytes.Buffer)\n\n\tpayloadstring := checkrequestMethod(req.Method) + \"\\n\" + req.Host + \"\\n\" + path + \"\\n\" + queryStr\n\n\tfmt.Fprintf(payload, \"%s\", payloadstring)\n\n\thash := hmac.New(sha256.New, []byte(auth[\"SecretKey\"]))\n\n\thash.Write(payload.Bytes())\n\n\tsignature := make([]byte, base64.StdEncoding.EncodedLen(hash.Size()))\n\n\tbase64.StdEncoding.Encode(signature, hash.Sum(nil))\n\n\tqueryVals.Set(\"Signature\", string(signature))\n\n\treq.URL.RawQuery = queryVals.Encode()\n\n\treturn nil\n}", "func (s *Signature) String() string {\n\treturn fmt.Sprintf(\"%s %s %s\", s.Value, s.PublicKey, s.Endorsement)\n}", "func signature(accessKeySecret, method, uri string,\n\theaders map[string]string) (digest string, err error) {\n\tvar contentMD5, contentType, date, canoHeaders, canoResource string\n\tvar slsHeaderKeys sort.StringSlice\n\n\tif val, ok := headers[\"Content-MD5\"]; ok {\n\t\tcontentMD5 = val\n\t}\n\n\tif val, ok := headers[\"Content-Type\"]; ok {\n\t\tcontentType = val\n\t}\n\n\tdate, ok := headers[\"Date\"]\n\tif !ok {\n\t\terr = fmt.Errorf(\"Can't find 'Date' header\")\n\t\treturn\n\t}\n\n\t// Calc CanonicalizedSLSHeaders\n\tslsHeaders := make(map[string]string, len(headers))\n\tfor k, v := range headers {\n\t\tl := strings.TrimSpace(strings.ToLower(k))\n\t\tif strings.HasPrefix(l, \"x-log-\") || strings.HasPrefix(l, \"x-acs-\") {\n\t\t\tslsHeaders[l] = strings.TrimSpace(v)\n\t\t\tslsHeaderKeys = append(slsHeaderKeys, l)\n\t\t}\n\t}\n\n\tsort.Sort(slsHeaderKeys)\n\tfor i, k := range slsHeaderKeys {\n\t\tcanoHeaders += k + \":\" + slsHeaders[k]\n\t\tif i+1 < len(slsHeaderKeys) {\n\t\t\tcanoHeaders += \"\\n\"\n\t\t}\n\t}\n\n\t// Calc CanonicalizedResource\n\tu, err := url.Parse(uri)\n\tif err != nil {\n\t\treturn\n\t}\n\n\tcanoResource += u.EscapedPath()\n\tif u.RawQuery != \"\" {\n\t\tvar keys sort.StringSlice\n\n\t\tvals := u.Query()\n\t\tfor k := range vals {\n\t\t\tkeys = append(keys, k)\n\t\t}\n\n\t\tsort.Sort(keys)\n\t\tcanoResource += \"?\"\n\t\tfor i, k := range keys {\n\t\t\tif i > 0 {\n\t\t\t\tcanoResource += \"&\"\n\t\t\t}\n\n\t\t\tfor _, v := range vals[k] {\n\t\t\t\tcanoResource += k + \"=\" + v\n\t\t\t}\n\t\t}\n\t}\n\n\tsignStr := method + \"\\n\" +\n\t\tcontentMD5 + \"\\n\" +\n\t\tcontentType + \"\\n\" +\n\t\tdate + \"\\n\" +\n\t\tcanoHeaders + \"\\n\" +\n\t\tcanoResource\n\n\t// Signature = base64(hmac-sha1(UTF8-Encoding-Of(SignString),AccessKeySecret))\n\tmac := hmac.New(sha1.New, []byte(accessKeySecret))\n\t_, err = mac.Write([]byte(signStr))\n\tif err != nil {\n\t\treturn\n\t}\n\tdigest = base64.StdEncoding.EncodeToString(mac.Sum(nil))\n\treturn\n}", "func xHubSignature(message, key []byte) string {\n\tmac := hmac.New(sha512.New, key)\n\tmac.Write(message)\n\tsignature := mac.Sum(nil)\n\n\thexSignature := make([]byte, hex.EncodedLen(len(signature)))\n\thex.Encode(hexSignature, signature)\n\treturn \"sha512=\" + string(hexSignature)\n}", "func formatSigTag(imageDesc v1.Descriptor) string {\n\tdigest := imageDesc.Digest\n\treturn fmt.Sprint(digest.Algorithm(), \"-\", digest.Encoded(), \".\", signatureTagSuffix)\n}", "func (client ClientImpl) generateAuthSignature(message []byte) (string, error) {\n\treturn generateSignatureFromKey(message, client.privateKey)\n}", "func hashSignature(alg Algorithm, signatureValue string, secret *pbauth.Secret) (string, error) {\n\tif strings.TrimSpace(signatureValue) == \"\" {\n\t\treturn \"\", consts.ErrInvalidSignatureValue\n\t}\n\tif err := ValidateSecret(secret); err != nil {\n\t\treturn \"\", err\n\t}\n\tkey := []byte(secret.Key)\n\tvar h hash.Hash\n\tswitch alg {\n\tcase Hs256:\n\t\th = hmac.New(sha256.New, key)\n\tcase Hs512:\n\t\th = hmac.New(sha512.New, key)\n\tdefault:\n\t\treturn \"\", consts.ErrNoHashAlgorithm\n\t}\n\th.Write([]byte(signatureValue))\n\treturn base64.URLEncoding.EncodeToString(h.Sum(nil)), nil\n}", "func createSignature(privateKeyHex string, plainText string) string {\n\n\ts := \"START createSignature() - Creates a ECDSA Digital Signature\"\n\tlog.Debug(\"WALLET: GUTS \" + s)\n\n\t// DECODE PRIVATE KEY\n\tprivateKeyPEM, _ := hex.DecodeString(privateKeyHex)\n\tblock, _ := pem.Decode([]byte(privateKeyPEM))\n\tprivateKeyx509Encoded := block.Bytes\n\tprivateKeyRaw, _ := x509.ParseECPrivateKey(privateKeyx509Encoded)\n\n\t// HASH plainText\n\thashedPlainText := sha256.Sum256([]byte(plainText))\n\thashedPlainTextByte := hashedPlainText[:]\n\n\tr := big.NewInt(0)\n\tss := big.NewInt(0)\n\n\t// CREATE SIGNATURE\n\tr, ss, err := ecdsa.Sign(\n\t\trand.Reader,\n\t\tprivateKeyRaw,\n\t\thashedPlainTextByte,\n\t)\n\tcheckErr(err)\n\n\tsignatureByte := r.Bytes()\n\tsignatureByte = append(signatureByte, ss.Bytes()...)\n\n\t// ENCODE - RETURN HEX\n\tsignature := hex.EncodeToString(signatureByte)\n\n\ts = \"END createSignature() - Creates a ECDSA Digital Signature\"\n\tlog.Debug(\"WALLET: GUTS \" + s)\n\n\treturn signature\n\n}", "func prettySIG(sig string) string {\n\tparts := strings.Split(sig, \"-\")\n\tfor i, part := range parts {\n\t\tswitch part {\n\t\tcase \"vsphere\":\n\t\t\tparts[i] = \"vSphere\"\n\t\tcase \"vmware\":\n\t\t\tparts[i] = \"VMWare\"\n\t\tcase \"openstack\":\n\t\t\tparts[i] = \"OpenStack\"\n\t\tcase \"api\", \"aws\", \"cli\", \"gcp\":\n\t\t\tparts[i] = strings.ToUpper(part)\n\t\tdefault:\n\t\t\tparts[i] = cases.Title(language.English).String(part)\n\t\t}\n\t}\n\treturn strings.Join(parts, \" \")\n}", "func ComputeSignatureV2(authToken, uri string, nonce string) string {\n\tparsedUrl, err := url.Parse(uri)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\tvar originalString string = parsedUrl.Scheme + \"://\" + parsedUrl.Host + parsedUrl.Path + nonce\n\tmac := hmac.New(sha256.New, []byte(authToken))\n\tmac.Write([]byte(originalString))\n\tvar messageMAC string = base64.StdEncoding.EncodeToString(mac.Sum(nil))\n\treturn messageMAC\n}", "func CreateSignature(input []byte, key []byte) []byte {\n\th := hmac.New(sha1.New, signatureKey)\n\th.Write(input)\n\n\th.Sum(nil)\n}", "func signature() string {\n\treturn fmt.Sprintf(\"www.example.com\")\n}", "func fnHmac(ctx Context, doc *JDoc, params []string) interface{} {\n\tstats := ctx.Value(EelTotalStats).(*ServiceStats)\n\tif params == nil || len(params) != 3 {\n\t\tctx.Log().Error(\"error_type\", \"func_hmac\", \"op\", \"hmac\", \"cause\", \"wrong_number_of_parameters\", \"params\", params)\n\t\tstats.IncErrors()\n\t\tAddError(ctx, SyntaxError{fmt.Sprintf(\"wrong number of parameters in call to hmac function\"), \"curl\", params})\n\t\treturn nil\n\t} else {\n\t\thashFunc := extractStringParam(params[0])\n\t\tinput := extractStringParam(params[1])\n\t\tkey := extractStringParam(params[2])\n\t\tif hashFunc == \"SHA1\" {\n\t\t\tkey_for_sign := []byte(key)\n\t\t\th := hmac.New(sha1.New, key_for_sign)\n\t\t\th.Write([]byte(input))\n\t\t\treturn base64.StdEncoding.EncodeToString(h.Sum(nil))\n\t\t} else {\n\t\t\tctx.Log().Error(\"error_type\", \"func_hmac\", \"op\", \"hmac\", \"cause\", \"hash_func_not_yet_support\", \"params\", params)\n\t\t\treturn nil\n\t\t}\n\t}\n\treturn nil\n}", "func (s Sign) getSigningKey(t time.Time) []byte {\n\tsecret := s.secretAccessKey\n\tdate := sumHMAC([]byte(\"AWS4\"+secret), []byte(t.Format(yyyymmdd)))\n\tregion := sumHMAC(date, []byte(s.region))\n\tservice := sumHMAC(region, []byte(\"s3\"))\n\tsigningKey := sumHMAC(service, []byte(\"aws4_request\"))\n\treturn signingKey\n}", "func (c *HMACStrategy) Generate() (string, string, error) {\n\tif len(c.GlobalSecret) < minimumSecretLength/2 {\n\t\treturn \"\", \"\", errors.New(\"Secret is not strong enough\")\n\t}\n\n\tif c.AuthCodeEntropy < minimumEntropy {\n\t\tc.AuthCodeEntropy = minimumEntropy\n\t}\n\n\t// When creating secrets not intended for usage by human users (e.g.,\n\t// client secrets or token handles), the authorization server should\n\t// include a reasonable level of entropy in order to mitigate the risk\n\t// of guessing attacks. The token value should be >=128 bits long and\n\t// constructed from a cryptographically strong random or pseudo-random\n\t// number sequence (see [RFC4086] for best current practice) generated\n\t// by the authorization server.\n\tkey, err := RandomBytes(c.AuthCodeEntropy)\n\tif err != nil {\n\t\treturn \"\", \"\", errors.WithStack(err)\n\t}\n\n\tif len(key) < c.AuthCodeEntropy {\n\t\treturn \"\", \"\", errors.New(\"Could not read enough random data for key generation\")\n\t}\n\n\tuseSecret := append([]byte{}, c.GlobalSecret...)\n\tmac := hmac.New(sha256.New, useSecret)\n\t_, err = mac.Write(key)\n\tif err != nil {\n\t\treturn \"\", \"\", errors.WithStack(err)\n\t}\n\n\tsignature := mac.Sum([]byte{})\n\tencodedSignature := b64.EncodeToString(signature)\n\tencodedToken := fmt.Sprintf(\"%s.%s\", b64.EncodeToString(key), encodedSignature)\n\treturn encodedToken, encodedSignature, nil\n}", "func hexCreator(Secret, requestTipe, path, expired, data string) string {\n\tconcat := requestTipe + path + expired + data\n\n\th := hmac.New(sha256.New, []byte(Secret))\n\th.Write([]byte(concat))\n\n\thexResult := hex.EncodeToString(\n\t\th.Sum(nil),\n\t)\n\n\treturn hexResult\n}", "func (k *RSAPrivKey) Signature(payload []byte) (string, error) {\n\tif k.key == nil {\n\t\treturn \"\", ErrorKeyUninitialized\n\t}\n\n\tsha256 := crypto.SHA256.New()\n\t_, err := sha256.Write(payload)\n\tif err != nil {\n\t\treturn \"\", errors.AddStack(err)\n\t}\n\n\thashed := sha256.Sum(nil)\n\n\tsig, err := rsa.SignPSS(rand.Reader, k.key, crypto.SHA256, hashed, nil)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\treturn base64.StdEncoding.EncodeToString(sig), nil\n}", "func SignSha256(data, secretKey string) string {\n\tdata, err := url.QueryUnescape(data)\n\tif err != nil {\n\t\treturn \"\"\n\t}\n\thm := hmac.New(sha256.New, []byte(secretKey))\n\thm.Write([]byte(data + \"&key=\" + secretKey))\n\treturn fmt.Sprintf(\"%X\", hm.Sum(nil))\n}", "func hmacHash(msg, key []byte) uint64 {\n\tmac := hmac.New(sha256.New, key)\n\tmac.Write(msg)\n\tres := binary.BigEndian.Uint64(mac.Sum(nil))\n\treturn res\n}", "func (k Key) Signature() uint32 {\n\treturn uint32(k[8])<<24 | uint32(k[9])<<16 | uint32(k[10])<<8 | uint32(k[11])\n}", "func (h *HmacSha256) Sign(msg string, secret string) ([]byte, error) {\n\tmac := hmac.New(sha256.New, []byte(secret))\n\tif _, err := mac.Write([]byte(msg)); err != nil {\n\t\treturn nil, err\n\t}\n\treturn mac.Sum(nil), nil\n}", "func getSigningKey(secret, loc string, t time.Time) []byte {\n\tdate := sumHMAC([]byte(\"AWS4\"+secret), []byte(t.Format(yyyymmdd)))\n\tlocation := sumHMAC(date, []byte(loc))\n\tservice := sumHMAC(location, []byte(\"s3\"))\n\tsigningKey := sumHMAC(service, []byte(\"aws4_request\"))\n\treturn signingKey\n}", "func (records *Records) GetHMACKey() (key []byte, err error) {\n\treturn records.HmacKey, nil\n}", "func signV2( r *rest.Rest, accessKeyID, secretAccessKey string ) (string,error) {\n\n\t// Calculate HMAC for secretAccessKey.\n\tstringToSign := stringToSignV2( r )\n\n\thm := hmac.New(sha1.New, []byte(secretAccessKey))\n\thm.Write([]byte(stringToSign))\n\n\t// Prepare auth header.\n\tauthHeader := new(bytes.Buffer)\n\tauthHeader.WriteString(fmt.Sprintf(\"%s %s:\", signV2Algorithm, accessKeyID))\n\n\tencoder := base64.NewEncoder(base64.StdEncoding, authHeader)\n\tencoder.Write(hm.Sum(nil))\n\tencoder.Close()\n\n\t// Authorization header.\n return authHeader.String(), nil\n}" ]
[ "0.71829104", "0.7052024", "0.70093495", "0.6933789", "0.6795556", "0.67467207", "0.6733188", "0.66797465", "0.6621314", "0.66114676", "0.64573663", "0.6335612", "0.63065124", "0.6270065", "0.6256581", "0.6239818", "0.6217827", "0.620237", "0.6164223", "0.6156821", "0.6121654", "0.611864", "0.6101995", "0.608692", "0.60724294", "0.60721374", "0.60518956", "0.60183114", "0.59990484", "0.59759617", "0.5951763", "0.59297776", "0.59243375", "0.588404", "0.5878609", "0.5866291", "0.5847814", "0.5832733", "0.5805474", "0.58029944", "0.57826585", "0.5769788", "0.5769499", "0.57692707", "0.57513607", "0.57390845", "0.5732605", "0.5732605", "0.571148", "0.5698468", "0.5689621", "0.56758434", "0.56679976", "0.5667526", "0.5666381", "0.5654729", "0.56481427", "0.5647321", "0.56273997", "0.5625821", "0.56199425", "0.56179994", "0.5603406", "0.5598554", "0.55783266", "0.5568031", "0.55651075", "0.5558966", "0.55453336", "0.5542507", "0.5536289", "0.5524265", "0.5518344", "0.5517684", "0.5509559", "0.5507801", "0.55066484", "0.54860234", "0.54731077", "0.5472094", "0.5462463", "0.54523104", "0.544376", "0.54374146", "0.5417868", "0.5417602", "0.5416755", "0.54161435", "0.5412396", "0.54106575", "0.540883", "0.5397966", "0.539548", "0.53875226", "0.53861445", "0.5381296", "0.5373029", "0.537073", "0.5350575", "0.5349532" ]
0.60221684
27
Returns a string if HMAC signature is valid.
func ValidateSignedString(value, key string) (string, error) { parts := strings.SplitN(value, "----", 2) if parts[0] == fmt.Sprintf("%x", hmac.New(sha1.New, []byte(key)).Sum([]byte(parts[1]))) { return parts[1], nil } return "", errors.New("data is tampered") }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func checkSignature(s string) ([]byte, error) {\n\tif 128 != len(s) {\n\t\treturn nil, fault.TransactionIdIsRequired\n\t}\n\th, err := hex.DecodeString(s)\n\tif nil != err {\n\t\treturn nil, err\n\n\t}\n\treturn h, nil\n}", "func getSignature(signingKey []byte, stringToSign string) string {\n\treturn hex.EncodeToString(sumHMAC(signingKey, []byte(stringToSign)))\n}", "func validSignature(body, key []byte, sig string) bool {\n\tconst prefix = \"sha1=\"\n\tif len(sig) < len(prefix) {\n\t\treturn false\n\t}\n\tsig = sig[len(prefix):]\n\tmac := hmac.New(sha1.New, key)\n\tmac.Write(body)\n\tb, err := hex.DecodeString(sig)\n\tif err != nil {\n\t\treturn false\n\t}\n\n\t// Use hmac.Equal to avoid timing attacks.\n\treturn hmac.Equal(mac.Sum(nil), b)\n}", "func (s Sign) getSignature(signingKey []byte, stringToSign string) string {\n\treturn hex.EncodeToString(sumHMAC(signingKey, []byte(stringToSign)))\n}", "func VerifyHmac(next http.HandlerFunc) http.HandlerFunc {\n\treturn func(w http.ResponseWriter, r *http.Request) {\n\t\tw.Header().Set(\"Content-Type\", \"text/json\")\n\t\tif r.URL.Query().Get(\"accessToken\") != \"\" {\n\t\t\tif ok := functions.ValidMAC(os.Getenv(\"HMAC_MESSAGE\"), r.URL.Query().Get(\"accessToken\"), os.Getenv(\"HMAC_SECRET\")); ok {\n\t\t\t\tnext(w, r)\n\t\t\t} else {\n\t\t\t\thttp.Error(w, \"Authenticated failed\", http.StatusNetworkAuthenticationRequired)\n\t\t\t\treturn\n\t\t\t}\n\t\t} else {\n\t\t\thttp.Error(w, \"accessToken is missing\", http.StatusNetworkAuthenticationRequired)\n\t\t\treturn\n\t\t}\n\t}\n}", "func (k Key) HMAC() []byte {\n\treturn k[len(k)/2:]\n}", "func (sig *Signature) String() string {\n\tif sig == nil || len(sig.bytes) == 0 {\n\t\treturn \"[empty]\"\n\t}\n\tif sig.hasV {\n\t\treturn \"0x\" + hex.EncodeToString(sig.bytes)\n\t}\n\treturn \"0x\" + hex.EncodeToString(sig.bytes[:SignatureLenRaw]) + \"[no V]\"\n}", "func getHS256Signature(encHeader string, encPayload string, pubKeyHexa string) string {\n\topenssl := exec.Command(\"openssl\", \"dgst\", \"-sha256\", \"-mac\", \"HMAC\", \"-macopt\", \"hexkey:\"+pubKeyHexa)\n\n\topenssl.Stdin = bytes.NewReader([]byte(encHeader + \".\" + encPayload))\n\n\tcmdOutput := &bytes.Buffer{}\n\topenssl.Stdout = cmdOutput\n\topenssl.Start()\n\topenssl.Wait()\n\thmac := string(cmdOutput.Bytes())\n\treturn hex.EncodeToString([]byte(hmac))\n}", "func parseHMAC(ss string) (string, error) {\n\txS := strings.SplitN(ss, \"|\", 2)\n\tif len(xS) < 2 {\n\t\terr := errors.New(\"Error in parseHMAC while splitting\")\n\t\treturn \"\", err\n\t}\n\tsignature := xS[0]\n\tsID := xS[1]\n\t// check if this is a sessionID\n\tif _, ok := sessions[sID]; !ok {\n\t\terr := errors.New(\"Error in parseHMAC while verifying session\")\n\t\treturn \"\", err\n\t}\n\t// Create HMAC from sessionID to verify against\n\th := hmac.New(sha512.New, key) // create hasher\n\th.Write([]byte(sID)) // sign the sessionid\n\tnewSig := h.Sum(nil) // store the hash as byte slice\n\t// decode signature from hex - it was stores as a hex string - to byte slice\n\toldSig, err := hex.DecodeString(signature)\n\tif err != nil {\n\t\terr = errors.New(\"Error in parseHMAC while decoding\")\n\t\treturn \"\", err\n\t}\n\t// Compare the new signature to the old one\n\tif !hmac.Equal(oldSig, newSig) {\n\t\tfmt.Printf(\"passed in signature: %v\\n newly generated signature: %v\\n\", oldSig, newSig)\n\t\tfmt.Println()\n\t\terr := errors.New(\"Error in parseHMAC while comparing\")\n\t\treturn \"\", err\n\t}\n\treturn sID, nil\n}", "func (sig Signature) String() string {\n\tdata := \"notYetImplementedInStringMethod\"\n\tif sig.Algorithm == Ed25519 {\n\t\tif sig.Data == nil {\n\t\t\tdata = \"nil\"\n\t\t} else {\n\t\t\tdata = hex.EncodeToString(sig.Data.([]byte))\n\t\t}\n\t}\n\treturn fmt.Sprintf(\"{KS=%d AT=%d VS=%d VU=%d KP=%d data=%s}\",\n\t\tsig.KeySpace, sig.Algorithm, sig.ValidSince, sig.ValidUntil, sig.KeyPhase, data)\n}", "func (s SlackHandler) validateSignature(r *http.Request, body string) bool {\n\ttimestamp := r.Header.Get(\"X-Slack-Request-Timestamp\")\n\trequestSignature := r.Header.Get(\"X-Slack-Signature\")\n\tcompiled := fmt.Sprintf(\"%v:%v:%v\", requestVersion, timestamp, body)\n\tmac := hmac.New(sha256.New, []byte(s.SigningKey))\n\tmac.Write([]byte(compiled))\n\texpectedSignature := mac.Sum(nil)\n\treturn hmac.Equal(expectedSignature, []byte(requestSignature))\n}", "func validateSignature(signature, secretKey string, payload []byte) error {\n\tsum := SHA1HMAC([]byte(secretKey), payload)\n\tif subtle.ConstantTimeCompare([]byte(sum), []byte(signature)) != 1 {\n\t\tlog.Printf(\"Expected signature %q (sum), got %q (hub-signature)\", sum, signature)\n\t\treturn errors.New(\"payload signature check failed\")\n\t}\n\treturn nil\n}", "func CheckPayloadSignature(payload []byte, secret string, signature string) (string, error) {\n\tif strings.HasPrefix(signature, \"sha1=\") {\n\t\tsignature = signature[5:]\n\t}\n\n\tmac := hmac.New(sha1.New, []byte(secret))\n\t_, err := mac.Write(payload)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\texpectedMAC := hex.EncodeToString(mac.Sum(nil))\n\n\tif !hmac.Equal([]byte(signature), []byte(expectedMAC)) {\n\t\treturn expectedMAC, &SignatureError{expectedMAC}\n\t}\n\treturn expectedMAC, err\n}", "func (c *Client) GetSignature(req *http.Request) string {\n\t// Sort fcHeaders.\n\theaders := &fcHeaders{}\n\tfor k := range req.Header {\n\t\tif strings.HasPrefix(strings.ToLower(k), \"x-fc-\") {\n\t\t\theaders.Keys = append(headers.Keys, strings.ToLower(k))\n\t\t\theaders.Values = append(headers.Values, req.Header.Get(k))\n\t\t}\n\t}\n\tsort.Sort(headers)\n\tfcHeaders := \"\"\n\tfor i := range headers.Keys {\n\t\tfcHeaders += headers.Keys[i] + \":\" + headers.Values[i] + \"\\n\"\n\t}\n\n\thttpMethod := req.Method\n\tcontentMd5 := req.Header.Get(\"Content-MD5\")\n\tcontentType := req.Header.Get(\"Content-Type\")\n\tdate := req.Header.Get(\"Date\")\n\tfcResource := req.URL.Path\n\n\tsignStr := httpMethod + \"\\n\" + contentMd5 + \"\\n\" + contentType + \"\\n\" + date + \"\\n\" + fcHeaders + fcResource\n\n\th := hmac.New(func() hash.Hash { return sha256.New() }, []byte(c.accessKeySecret))\n\t_, _ = io.WriteString(h, signStr)\n\treturn base64.StdEncoding.EncodeToString(h.Sum(nil))\n}", "func getHMAC(sessionID string) string {\n\th := hmac.New(sha512.New, key)\n\th.Write([]byte(sessionID))\n\tfmt.Printf(\"%x\\n\", h.Sum(nil))\n\t// return signature and sessionID with separator\n\treturn fmt.Sprintf(\"%x\", h.Sum(nil)) + \"|\" + sessionID\n}", "func (b Binding) Signature() (ret string) {\n\tret = b.ValidationString()\n\tif receivesBinaryContent(b) {\n\t\tret = \":\" + ret\n\t}\n\treturn\n}", "func (r *Response) IsSignatureValid(c *sa.Client) (bool, error) {\n\tsaDate := r.HTTPResponse.Header.Get(\"X-SA-DATE\")\n\tsaSignature := r.HTTPResponse.Header.Get(\"X-SA-SIGNATURE\")\n\tvar buffer bytes.Buffer\n\tbuffer.WriteString(saDate)\n\tbuffer.WriteString(\"\\n\")\n\tbuffer.WriteString(c.AppID)\n\tbuffer.WriteString(\"\\n\")\n\tbuffer.WriteString(r.RawJSON)\n\traw := buffer.String()\n\tbyteKey, _ := hex.DecodeString(c.AppKey)\n\tbyteData := []byte(raw)\n\tsig := hmac.New(sha256.New, byteKey)\n\tsig.Write([]byte(byteData))\n\tcomputedSig := base64.StdEncoding.EncodeToString(sig.Sum(nil))\n\tif computedSig != saSignature {\n\t\treturn false, nil\n\t}\n\treturn true, nil\n}", "func (signature Signature) String() string {\n\treturn base58.Encode(signature[:])\n}", "func GetSignature(key string, method string, req map[string]string, fcResource string) string {\n\theader := &headers{}\n\tlowerKeyHeaders := map[string]string{}\n\tfor k, v := range req {\n\t\tlowerKey := strings.ToLower(k)\n\t\tif strings.HasPrefix(lowerKey, HTTPHeaderPrefix) {\n\t\t\theader.Keys = append(header.Keys, lowerKey)\n\t\t\theader.Vals = append(header.Vals, v)\n\t\t}\n\t\tlowerKeyHeaders[lowerKey] = v\n\t}\n\tsort.Sort(header)\n\n\tfcHeaders := \"\"\n\tfor i := range header.Keys {\n\t\tfcHeaders += header.Keys[i] + \":\" + header.Vals[i] + \"\\n\"\n\t}\n\n\tdate := req[HTTPHeaderDate]\n\tif expires, ok := getExpiresFromURLQueries(fcResource); ok {\n\t\tdate = expires\n\t}\n\n\tsignStr := method + \"\\n\" + lowerKeyHeaders[strings.ToLower(HTTPHeaderContentMD5)] + \"\\n\" + lowerKeyHeaders[strings.ToLower(HTTPHeaderContentType)] + \"\\n\" + date + \"\\n\" + fcHeaders + fcResource\n\n\th := hmac.New(func() hash.Hash { return sha256.New() }, []byte(key))\n\tio.WriteString(h, signStr)\n\tsignedStr := base64.StdEncoding.EncodeToString(h.Sum(nil))\n\n\treturn signedStr\n}", "func (k krSignature) IsWellFormed() bool {\n\treturn len(k) == 64\n}", "func (s Signature) String() string {\n\treturn fmt.Sprintf(\"Signature(hash=%v): %v\", s.Hash, Bytes(s.Data))\n}", "func (a *Authorization) Signature() ([]byte, error) {\n\treturn a.signature, nil\n}", "func DoCheckSignature(PartnerID string, RqTimestamp string, Signature string) string {\n\n\th := sha256.New()\n\th.Write([]byte(PartnerID + \"|\" + RqTimestamp))\n\n\tsEnc := b64.StdEncoding.EncodeToString(h.Sum(nil))\n\tif sEnc == Signature {\n\t\treturn \"00\"\n\t}\n\n\treturn \"63\"\n}", "func verify(json string, signature string, pubkeyPem string) bool {\n // hash := hash(json)\n\n return true\n}", "func HMAC(algo HashAlgo, s, key string) string {\n\tvar mac hash.Hash\n\n\tswitch algo {\n\tcase AlgoMD5:\n\t\tmac = hmac.New(md5.New, []byte(key))\n\tcase AlgoSha1:\n\t\tmac = hmac.New(sha1.New, []byte(key))\n\tcase AlgoSha224:\n\t\tmac = hmac.New(sha256.New224, []byte(key))\n\tcase AlgoSha256:\n\t\tmac = hmac.New(sha256.New, []byte(key))\n\tcase AlgoSha384:\n\t\tmac = hmac.New(sha512.New384, []byte(key))\n\tcase AlgoSha512:\n\t\tmac = hmac.New(sha512.New, []byte(key))\n\tdefault:\n\t\treturn s\n\t}\n\n\tmac.Write([]byte(s))\n\n\treturn hex.EncodeToString(mac.Sum(nil))\n}", "func (gh *GitHubChecker) validSignature(payload []byte, signature string) error {\n\texpected := gh.hashPayload(payload)\n\n\tsignatureParts := strings.SplitN(signature, \"=\", 2)\n\tif len(signatureParts) != 2 {\n\t\treturn fmt.Errorf(\"%s header should be of the form \\\"<type>=<hash>\\\", not %q\", xGitHubSignature, signature)\n\t}\n\n\ttp := signatureParts[0]\n\thash := signatureParts[1]\n\n\tif tp != \"sha1\" {\n\t\treturn fmt.Errorf(\"%s header signature type should be \\\"sha1\\\", not %q\", xGitHubSignature, signature)\n\t}\n\n\tif !hmac.Equal([]byte(hash), []byte(expected)) {\n\t\treturn fmt.Errorf(\"%s header signature hash should be %q, not %q\", xGitHubSignature, expected, hash)\n\t}\n\n\treturn nil\n}", "func (d *Deployment) CheckHMAC(version string) error {\n\n\t// Build the filenames.\n\tartifactPath, exists := makeArtifactPath(d.artifactDir, d.appName, version, d.acfg.Extension)\n\tif !exists {\n\t\treturn fmt.Errorf(\"Artifact does not exist: %s\", artifactPath)\n\t}\n\thmacPath, exists := makeHMACPath(d.artifactDir, d.appName, version, d.acfg.Extension)\n\tif !exists {\n\t\treturn fmt.Errorf(\"HMAC does not exist: %s\", artifactPath)\n\t}\n\n\t// Read in the HMAC.\n\tif expectedMAC, err := ioutil.ReadFile(hmacPath); err == nil {\n\n\t\t// Open the artifact, and calculate its HMAC.\n\t\tif fp, err := os.Open(artifactPath); err == nil {\n\t\t\tmessageMAC := CalculateHMAC(fp, NewHMACCalculator(d.cfg.Secret))\n\t\t\tif !hmac.Equal(messageMAC, expectedMAC) {\n\t\t\t\treturn fmt.Errorf(\n\t\t\t\t\t\"Artifact is corrupt: Expected HMAC: %q: Calculated HMAC: %q\",\n\t\t\t\t\tstring(expectedMAC),\n\t\t\t\t\tstring(messageMAC),\n\t\t\t\t)\n\t\t\t}\n\n\t\t} else {\n\t\t\treturn fmt.Errorf(\"Error while reading %q: %s\", artifactPath, err.Error())\n\t\t}\n\t} else {\n\t\treturn fmt.Errorf(\"Error while reading %q: %s\", hmacPath, err.Error())\n\t}\n\n\treturn nil\n}", "func checkMAC(unsignedData, receivedHMAC, key string) bool {\n\tmac := hmac.New(sha1.New, []byte(key))\n\tmac.Write([]byte(unsignedData))\n\texpectedMAC := mac.Sum(nil)\n\t// fmt.Println(hex.EncodeToString([]byte(expectedMAC)))\n\t// log.Debugf(\"## checkMAC sig: secret: %s\", key)\n\tlog.Debugf(\"## checkMAC sig: messageSig: %x\", string(receivedHMAC))\n\tlog.Debugf(\"## checkMAC sig: computedSig: %x\", hex.EncodeToString([]byte(expectedMAC)))\n\treturn receivedHMAC == hex.EncodeToString([]byte(expectedMAC))\n}", "func createSignature(c *Credentials, formattedShortTime, stringToSign string) string {\n\th1 := makeHmac([]byte(\"AWS4\"+c.SecretAccessKey), []byte(formattedShortTime))\n\th2 := makeHmac(h1, []byte(c.Region))\n\th3 := makeHmac(h2, []byte(\"s3\"))\n\th4 := makeHmac(h3, []byte(\"aws4_request\"))\n\tsignature := makeHmac(h4, []byte(stringToSign))\n\treturn hex.EncodeToString(signature)\n}", "func HMAC(in string, key []byte) (string, error) {\n\th := hmac.New(sha1.New, key)\n\tn, err := h.Write([]byte(in))\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tif got, want := n, len(in); got < want {\n\t\treturn \"\", fmt.Errorf(\"only hashed %d of %d bytes\", got, want)\n\t}\n\tdig := h.Sum(nil)\n\treturn fmt.Sprintf(\"%x\", dig), nil\n}", "func (m EncMessage) Signature() []byte {\n\treturn m.Sig\n}", "func getTokenSignature(header *Header, body *Body, secret *pbauth.Secret) (string, error) {\n\tif err := ValidateHeader(header); err != nil {\n\t\treturn \"\", err\n\t}\n\tif err := ValidateBody(body); err != nil {\n\t\treturn \"\", err\n\t}\n\tif err := ValidateSecret(secret); err != nil {\n\t\treturn \"\", err\n\t}\n\tif body.Permission == Admin && header.Alg != Hs512 {\n\t\treturn \"\", consts.ErrInvalidPermission\n\t}\n\tif header.TokenTyp != Jwt && header.TokenTyp != Jet {\n\t\treturn \"\", consts.ErrUnknownTokenType\n\t}\n\t// Token Signature = <encoded header>.<encoded body>.<hashed(<encoded header>.<encoded body>)>\n\t// 1. Encode the header\n\tencodedHeader, err := base64Encode(header)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\t// 2. Encode the body\n\tencodedBody, err := base64Encode(body)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\t// 3. Build <encoded header>.<encoded body>\n\t// 4. Build <hashed(<encoded header>.<encoded body>)>\n\t// 5. Build Token Signature = <encoded header>.<encoded body>.<hashed(<encoded header>.<encoded body>)>\n\treturn buildTokenSignature(encodedHeader, encodedBody, header.Alg, secret)\n}", "func signature(req *http.Request, awsSecretAccessKey string) string {\n\treturn signWithKey(stringToSign(req), awsSecretAccessKey)\n}", "func (m *SigningMethodHMAC) Verify(signingString string, sig []byte, key interface{}) error {\n\t// Verify the key is the right type\n\tkeyBytes, ok := key.([]byte)\n\tif !ok {\n\t\treturn ErrInvalidKeyType\n\t}\n\n\t// Can we use the specified hashing method?\n\tif !m.Hash.Available() {\n\t\treturn ErrHashUnavailable\n\t}\n\n\t// This signing method is symmetric, so we validate the signature\n\t// by reproducing the signature from the signing string and key, then\n\t// comparing that against the provided signature.\n\thasher := hmac.New(m.Hash.New, keyBytes)\n\thasher.Write([]byte(signingString))\n\tif !hmac.Equal(sig, hasher.Sum(nil)) {\n\t\treturn ErrSignatureInvalid\n\t}\n\n\t// No validation errors. Signature is good.\n\treturn nil\n}", "func SignatureHash(signature string) string {\n\treturn fmt.Sprintf(\"%x\", sha512.Sum384([]byte(signature)))\n}", "func checkHMAC(r io.Reader, mac hash.Hash) (ok bool, err error) {\n\tmac1 := mac.Sum(nil) // Calculated HMAC\n\tmac2 := make([]byte, mac.Size()) // Read HMAC\n\tif _, err = io.ReadFull(r, mac2); err != nil {\n\t\treturn false, err\n\t}\n\treturn hmac.Equal(mac1, mac2), nil\n}", "func generateSignature(postData, secret []byte) string {\n\tmac := hmac.New(md5.New, secret)\n\tmac.Write(postData)\n\tsignature := fmt.Sprintf(\"%x\", mac.Sum(nil))\n\n\treturn signature\n}", "func TestCheckSignatureEncoding(t *testing.T) {\n\tt.Parallel()\n\n\ttests := []struct {\n\t\tname string\n\t\tsig []byte\n\t\tisValid bool\n\t}{\n\t\t{\n\t\t\tname: \"valid signature\",\n\t\t\tsig: decodeHex(\"304402204e45e16932b8af514961a1d3a1a25\" +\n\t\t\t\t\"fdf3f4f7732e9d624c6c61548ab5fb8cd41022018152\" +\n\t\t\t\t\"2ec8eca07de4860a4acdd12909d831cc56cbbac46220\" +\n\t\t\t\t\"82221a8768d1d09\"),\n\t\t\tisValid: true,\n\t\t},\n\t\t{\n\t\t\tname: \"empty.\",\n\t\t\tsig: nil,\n\t\t\tisValid: false,\n\t\t},\n\t\t{\n\t\t\tname: \"bad magic\",\n\t\t\tsig: decodeHex(\"314402204e45e16932b8af514961a1d3a1a25\" +\n\t\t\t\t\"fdf3f4f7732e9d624c6c61548ab5fb8cd41022018152\" +\n\t\t\t\t\"2ec8eca07de4860a4acdd12909d831cc56cbbac46220\" +\n\t\t\t\t\"82221a8768d1d09\"),\n\t\t\tisValid: false,\n\t\t},\n\t\t{\n\t\t\tname: \"bad 1st int marker magic\",\n\t\t\tsig: decodeHex(\"304403204e45e16932b8af514961a1d3a1a25\" +\n\t\t\t\t\"fdf3f4f7732e9d624c6c61548ab5fb8cd41022018152\" +\n\t\t\t\t\"2ec8eca07de4860a4acdd12909d831cc56cbbac46220\" +\n\t\t\t\t\"82221a8768d1d09\"),\n\t\t\tisValid: false,\n\t\t},\n\t\t{\n\t\t\tname: \"bad 2nd int marker\",\n\t\t\tsig: decodeHex(\"304402204e45e16932b8af514961a1d3a1a25\" +\n\t\t\t\t\"fdf3f4f7732e9d624c6c61548ab5fb8cd41032018152\" +\n\t\t\t\t\"2ec8eca07de4860a4acdd12909d831cc56cbbac46220\" +\n\t\t\t\t\"82221a8768d1d09\"),\n\t\t\tisValid: false,\n\t\t},\n\t\t{\n\t\t\tname: \"short len\",\n\t\t\tsig: decodeHex(\"304302204e45e16932b8af514961a1d3a1a25\" +\n\t\t\t\t\"fdf3f4f7732e9d624c6c61548ab5fb8cd41022018152\" +\n\t\t\t\t\"2ec8eca07de4860a4acdd12909d831cc56cbbac46220\" +\n\t\t\t\t\"82221a8768d1d09\"),\n\t\t\tisValid: false,\n\t\t},\n\t\t{\n\t\t\tname: \"long len\",\n\t\t\tsig: decodeHex(\"304502204e45e16932b8af514961a1d3a1a25\" +\n\t\t\t\t\"fdf3f4f7732e9d624c6c61548ab5fb8cd41022018152\" +\n\t\t\t\t\"2ec8eca07de4860a4acdd12909d831cc56cbbac46220\" +\n\t\t\t\t\"82221a8768d1d09\"),\n\t\t\tisValid: false,\n\t\t},\n\t\t{\n\t\t\tname: \"long X\",\n\t\t\tsig: decodeHex(\"304402424e45e16932b8af514961a1d3a1a25\" +\n\t\t\t\t\"fdf3f4f7732e9d624c6c61548ab5fb8cd41022018152\" +\n\t\t\t\t\"2ec8eca07de4860a4acdd12909d831cc56cbbac46220\" +\n\t\t\t\t\"82221a8768d1d09\"),\n\t\t\tisValid: false,\n\t\t},\n\t\t{\n\t\t\tname: \"long Y\",\n\t\t\tsig: decodeHex(\"304402204e45e16932b8af514961a1d3a1a25\" +\n\t\t\t\t\"fdf3f4f7732e9d624c6c61548ab5fb8cd41022118152\" +\n\t\t\t\t\"2ec8eca07de4860a4acdd12909d831cc56cbbac46220\" +\n\t\t\t\t\"82221a8768d1d09\"),\n\t\t\tisValid: false,\n\t\t},\n\t\t{\n\t\t\tname: \"short Y\",\n\t\t\tsig: decodeHex(\"304402204e45e16932b8af514961a1d3a1a25\" +\n\t\t\t\t\"fdf3f4f7732e9d624c6c61548ab5fb8cd41021918152\" +\n\t\t\t\t\"2ec8eca07de4860a4acdd12909d831cc56cbbac46220\" +\n\t\t\t\t\"82221a8768d1d09\"),\n\t\t\tisValid: false,\n\t\t},\n\t\t{\n\t\t\tname: \"trailing crap\",\n\t\t\tsig: decodeHex(\"304402204e45e16932b8af514961a1d3a1a25\" +\n\t\t\t\t\"fdf3f4f7732e9d624c6c61548ab5fb8cd41022018152\" +\n\t\t\t\t\"2ec8eca07de4860a4acdd12909d831cc56cbbac46220\" +\n\t\t\t\t\"82221a8768d1d0901\"),\n\t\t\tisValid: false,\n\t\t},\n\t\t{\n\t\t\tname: \"X == N \",\n\t\t\tsig: decodeHex(\"30440220fffffffffffffffffffffffffffff\" +\n\t\t\t\t\"ffebaaedce6af48a03bbfd25e8cd0364141022018152\" +\n\t\t\t\t\"2ec8eca07de4860a4acdd12909d831cc56cbbac46220\" +\n\t\t\t\t\"82221a8768d1d09\"),\n\t\t\tisValid: false,\n\t\t},\n\t\t{\n\t\t\tname: \"X == N \",\n\t\t\tsig: decodeHex(\"30440220fffffffffffffffffffffffffffff\" +\n\t\t\t\t\"ffebaaedce6af48a03bbfd25e8cd0364142022018152\" +\n\t\t\t\t\"2ec8eca07de4860a4acdd12909d831cc56cbbac46220\" +\n\t\t\t\t\"82221a8768d1d09\"),\n\t\t\tisValid: false,\n\t\t},\n\t\t{\n\t\t\tname: \"Y == N\",\n\t\t\tsig: decodeHex(\"304402204e45e16932b8af514961a1d3a1a25\" +\n\t\t\t\t\"fdf3f4f7732e9d624c6c61548ab5fb8cd410220fffff\" +\n\t\t\t\t\"ffffffffffffffffffffffffffebaaedce6af48a03bb\" +\n\t\t\t\t\"fd25e8cd0364141\"),\n\t\t\tisValid: false,\n\t\t},\n\t\t{\n\t\t\tname: \"Y > N\",\n\t\t\tsig: decodeHex(\"304402204e45e16932b8af514961a1d3a1a25\" +\n\t\t\t\t\"fdf3f4f7732e9d624c6c61548ab5fb8cd410220fffff\" +\n\t\t\t\t\"ffffffffffffffffffffffffffebaaedce6af48a03bb\" +\n\t\t\t\t\"fd25e8cd0364142\"),\n\t\t\tisValid: false,\n\t\t},\n\t\t{\n\t\t\tname: \"0 len X\",\n\t\t\tsig: decodeHex(\"302402000220181522ec8eca07de4860a4acd\" +\n\t\t\t\t\"d12909d831cc56cbbac4622082221a8768d1d09\"),\n\t\t\tisValid: false,\n\t\t},\n\t\t{\n\t\t\tname: \"0 len Y\",\n\t\t\tsig: decodeHex(\"302402204e45e16932b8af514961a1d3a1a25\" +\n\t\t\t\t\"fdf3f4f7732e9d624c6c61548ab5fb8cd410200\"),\n\t\t\tisValid: false,\n\t\t},\n\t\t{\n\t\t\tname: \"extra R padding\",\n\t\t\tsig: decodeHex(\"30450221004e45e16932b8af514961a1d3a1a\" +\n\t\t\t\t\"25fdf3f4f7732e9d624c6c61548ab5fb8cd410220181\" +\n\t\t\t\t\"522ec8eca07de4860a4acdd12909d831cc56cbbac462\" +\n\t\t\t\t\"2082221a8768d1d09\"),\n\t\t\tisValid: false,\n\t\t},\n\t\t{\n\t\t\tname: \"extra S padding\",\n\t\t\tsig: decodeHex(\"304502204e45e16932b8af514961a1d3a1a25\" +\n\t\t\t\t\"fdf3f4f7732e9d624c6c61548ab5fb8cd41022100181\" +\n\t\t\t\t\"522ec8eca07de4860a4acdd12909d831cc56cbbac462\" +\n\t\t\t\t\"2082221a8768d1d09\"),\n\t\t\tisValid: false,\n\t\t},\n\t}\n\n\t// flags := ScriptVerifyStrictEncoding\n\tflags := StandardVerifyFlags\n\tfor _, test := range tests {\n\t\terr := TstCheckSignatureEncoding(test.sig, flags)\n\t\tif err != nil && test.isValid {\n\t\t\tt.Errorf(\"checkSignatureEncoding test '%s' failed \"+\n\t\t\t\t\"when it should have succeeded: %v\", test.name,\n\t\t\t\terr)\n\t\t} else if err == nil && !test.isValid {\n\t\t\tt.Errorf(\"checkSignatureEncooding test '%s' succeeded \"+\n\t\t\t\t\"when it should have failed\", test.name)\n\t\t}\n\t}\n}", "func TestHMAC(t *testing.T) {\n\t// Generate 1MB of random bytes\n\trandomBytes := make([]byte, 1<<20)\n\tif _, err := rand.Read(randomBytes); err != nil {\n\t\tt.Fatalf(\"%v reading random bytes\", err)\n\t}\n\tinput := bytes.NewReader(randomBytes)\n\n\t// Compute the source data's key\n\tkey, err := ComputeKey(input, \"\")\n\tif err != nil {\n\t\tt.Fatalf(\"%v computing key\", err)\n\t}\n\n\t// Create a writer to encrypt the data with key\n\twriter, err := NewWriter(input, key)\n\tif err != nil {\n\t\tt.Fatalf(\"%v creating Writer\", err)\n\t}\n\n\t// Capture the encrypted output in a buffer.\n\tvar output bytes.Buffer\n\tmac, err := writer.Encrypt(&output)\n\tif err != nil {\n\t\tt.Fatalf(\"%v encrypting input\", err)\n\t}\n\n\t// Wrap the output bytes in a type that implements io.ReadSeeker\n\toutputReader := bytes.NewReader(output.Bytes())\n\n\t// Seek to and read the tail bytes of the output to get the embedded HMAC\n\tif _, err := outputReader.Seek(-int64(len(mac)), io.SeekEnd); err != nil {\n\t\tt.Fatalf(\"%v seeking to HMAC suffix\", err)\n\t}\n\n\toutputHMAC := make([]byte, len(mac))\n\tif l, err := outputReader.Read(outputHMAC); l != len(mac) || err != nil {\n\t\tt.Fatalf(\"%v reading HMAC suffix\", err)\n\t}\n\n\t// Fail if the returned and embedded signatures differ\n\tif !hmac.Equal(mac, outputHMAC) {\n\t\tt.Fatal(\"Returned hash differs from embedded hash\")\n\t}\n}", "func (t *Transaction) Signature() string {\n\treturn utils.EncodeToBase64(t.signature)\n}", "func (a Asset) signature() string {\n\tif !a.Config.URL.SignURL {\n\t\treturn \"\"\n\t}\n\n\talgo, length := a.getSignatureAlgorithmAndLength()\n\n\ttoSign := joinUrl([]interface{}{a.Transformation, a.PublicID})\n\n\treturn signature.SignURL(toSign, a.Config.Cloud.APISecret, algo, length)\n}", "func (s *SignatureVerification) GetSignature() string {\n\tif s == nil || s.Signature == nil {\n\t\treturn \"\"\n\t}\n\treturn *s.Signature\n}", "func ValidateHmac(encryptedDataWithHmac string, key string) (string, error) {\n\t// ensure data has value\n\tif len(encryptedDataWithHmac) <= 64 { // hex is 2x the byte, so 32 normally is now 64\n\t\treturn \"\", errors.New(\"Data with HMAC is Required\")\n\t}\n\n\t// ensure key is 32 bytes\n\tif len(key) < 32 {\n\t\treturn \"\", errors.New(\"Key Must Be 32 Bytes\")\n\t}\n\n\t// cut the key to 32 bytes only\n\tkey = util.Left(key, 32)\n\n\t// data to byte\n\tdata := []byte(encryptedDataWithHmac)\n\n\t// parse message\n\tmessage := data[:len(data)-64]\n\tmac, err := util.HexToByte(string(data[len(data)-64:]))\n\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\t// new mac\n\tmacProducer := hmac.New(sha256.New, []byte(key))\n\tmacProducer.Write(message)\n\n\t// get calculated mac\n\tcalculatedMac := macProducer.Sum(nil)\n\n\tif hmac.Equal(mac, calculatedMac) {\n\t\t// hmac match, return encrypted data without hmac\n\t\treturn string(message), nil\n\t}\n\n\t// if process gets here, then hmac failed\n\treturn \"\", errors.New(\"HMAC Verification Failed\")\n}", "func (wh *Webhook) ValidateSignature(body []byte, signature string) error {\n\tmac := hmac.New(sha1.New, []byte(wh.Token))\n\tif _, err := mac.Write(body); err != nil {\n\t\treturn err\n\t}\n\n\tsig, err := hex.DecodeString(signature)\n\tif err != nil || !hmac.Equal(sig, mac.Sum(nil)) {\n\t\treturn ErrInvalidWebhookSignature\n\t}\n\n\treturn nil\n}", "func (c *HMACStrategy) Validate(token string) error {\n\tsplit := strings.Split(token, \".\")\n\tif len(split) != 2 {\n\t\treturn errors.WithStack(fosite.ErrInvalidTokenFormat)\n\t}\n\n\tkey := split[0]\n\tsignature := split[1]\n\tif key == \"\" || signature == \"\" {\n\t\treturn errors.WithStack(fosite.ErrInvalidTokenFormat)\n\t}\n\n\tdecodedSignature, err := b64.DecodeString(signature)\n\tif err != nil {\n\t\treturn errors.WithStack(err)\n\t}\n\n\tdecodedKey, err := b64.DecodeString(key)\n\tif err != nil {\n\t\treturn errors.WithStack(err)\n\t}\n\n\tuseSecret := append([]byte{}, c.GlobalSecret...)\n\tmac := hmac.New(sha256.New, useSecret)\n\t_, err = mac.Write(decodedKey)\n\tif err != nil {\n\t\treturn errors.WithStack(err)\n\t}\n\n\tif !hmac.Equal(decodedSignature, mac.Sum([]byte{})) {\n\t\t// Hash is invalid\n\t\treturn errors.WithStack(fosite.ErrTokenSignatureMismatch)\n\t}\n\n\treturn nil\n}", "func validateSignatureAgainstKey(token *jwt.Token, tokenParts []string, key interface{}) error {\n\t// jwt.SigningMethod.Verify requires signing string and signature as separate inputs\n\treturn token.Method.Verify(strings.Join(tokenParts[0:2], \".\"), token.Signature, key)\n}", "func (ctx *GithubWebhookHttpHandler) computeExpectedSignature(body []byte) string {\n\tsecret := ctx.webhookConfig.Secret\n\tmac := hmac.New(sha1.New, []byte(secret))\n\tmac.Write(body)\n\texpectedSignature := fmt.Sprintf(\"sha1=%s\", hex.EncodeToString(mac.Sum(nil)))\n\treturn expectedSignature\n}", "func GetSignature(signedParams map[string]*string, secret *string) (_result *string) {\n\tstringToSign := buildStringToSign(signedParams)\n\tsignature := sign(stringToSign, tea.StringValue(secret))\n\treturn tea.String(signature)\n}", "func VerifySignatureValidity(sig []byte) int {\n\t//64+1\n\tif len(sig) != 65 {\n\t\tlog.Panic(\"VerifySignatureValidity: sig len is not 65 bytes\")\n\t\treturn 0\n\t}\n\t//malleability check:\n\t//highest bit of 32nd byte must be 1\n\t//0x7f is 126 or 0b01111111\n\tif (sig[32] >> 7) == 1 {\n\t\treturn 0 // signature is malleable\n\t}\n\t//recovery id check\n\tif sig[64] >= 4 {\n\t\treturn 0 // recovery id invalid\n\t}\n\treturn 1\n}", "func createHMACKey() string {\n\tkey := make([]byte, 49)\n\trand.Reader.Read(key)\n\tvar cooked = base64.StdEncoding.EncodeToString(key)\n\treturn cooked\n}", "func Verify(h http.HandlerFunc) http.HandlerFunc {\n\treturn http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\n\t\thmacHeader := r.Header.Get(\"AUTHORIZATION\")\n\n\t\thmac := simplcrypto.HMACWithSecretAndData(token, hmacData)\n\t\thmacString := simplcrypto.Base64URLEncode(hmac)\n\n\t\tif hmacHeader != hmacString {\n\t\t\tfmt.Printf(\"unauthorized\\n\")\n\t\t\tw.WriteHeader(http.StatusUnauthorized)\n\t\t\treturn\n\t\t}\n\n\t\th.ServeHTTP(w, r)\n\t})\n}", "func verifyMac(w io.Writer, name string, data, signature []byte) error {\n\t// name := \"projects/my-project/locations/us-east1/keyRings/my-key-ring/cryptoKeys/my-key/cryptoKeyVersions/123\"\n\t// data := \"my previous data\"\n\t// signature := []byte(\"...\") // Response from a sign request\n\n\t// Create the client.\n\tctx := context.Background()\n\tclient, err := kms.NewKeyManagementClient(ctx)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"failed to create kms client: %w\", err)\n\t}\n\tdefer client.Close()\n\n\t// Build the request.\n\treq := &kmspb.MacVerifyRequest{\n\t\tName: name,\n\t\tData: data,\n\t\tMac: signature,\n\t}\n\n\t// Verify the signature.\n\tresult, err := client.MacVerify(ctx, req)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"failed to verify signature: %w\", err)\n\t}\n\n\tfmt.Fprintf(w, \"Verified: %t\", result.Success)\n\treturn nil\n}", "func signStringToSign(stringToSign string, signingKey []byte) (string, error) {\n\thm, err := hmacsha256(signingKey, stringToSign)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\treturn hex.EncodeToString(hm), nil\n}", "func buildTokenSignature(encodedHeader string, encodedBody string, alg Algorithm, secret *pbauth.Secret) (string, error) {\n\tif strings.TrimSpace(encodedHeader) == \"\" {\n\t\treturn \"\", consts.ErrInvalidEncodedHeader\n\t}\n\tif strings.TrimSpace(encodedBody) == \"\" {\n\t\treturn \"\", consts.ErrInvalidEncodedBody\n\t}\n\tif err := ValidateSecret(secret); err != nil {\n\t\treturn \"\", err\n\t}\n\t// 3. Build <encoded header>.<encoded body>\n\tvar bufferHeaderBody bytes.Buffer\n\tbufferHeaderBody.WriteString(encodedHeader)\n\tbufferHeaderBody.WriteString(\".\")\n\tbufferHeaderBody.WriteString(encodedBody)\n\tencodedHeaderBody := bufferHeaderBody.String()\n\t// 4. Build <hashed(<encoded header>.<encoded body>)>\n\tencodedSignature, err := hashSignature(alg, encodedHeaderBody, secret)\n\tif err != nil {\n\t\treturn \"\", nil\n\t}\n\t// 5. Build Token Signature = <encoded header>.<encoded body>.<hashed(<encoded header>.<encoded body>)>\n\tvar bufferTokenSignature bytes.Buffer\n\tbufferTokenSignature.WriteString(encodedHeaderBody)\n\tbufferTokenSignature.WriteString(\".\")\n\tbufferTokenSignature.WriteString(encodedSignature)\n\treturn bufferTokenSignature.String(), nil\n}", "func isLegacySignature(data []byte) (bool, error) {\n\t// Decode clearsign block.\n\tb, _ := clearsign.Decode(data)\n\tif b == nil {\n\t\treturn false, errClearsignedMsgNotFound\n\t}\n\n\t// The plaintext of legacy signatures always begins with \"SIFHASH\", and non-legacy signatures\n\t// never do, as they are JSON.\n\treturn bytes.HasPrefix(b.Plaintext, []byte(\"SIFHASH:\\n\")), nil\n}", "func (pk Publickey) Verify(t string) (string, error) {\n\tif t == \"\" {\n\t\treturn \"\", fmt.Errorf(\"no tokenString\")\n\t}\n\tt = strings.TrimPrefix(t, \"Bearer \")\n\tclaims := &jwt.StandardClaims{}\n\tjwp := &jwt.Parser{\n\t\tValidMethods: []string{\"RS256\", \"RS384\", \"RS512\"},\n\t\tSkipClaimsValidation: false,\n\t}\n\t_, err := jwp.ParseWithClaims(t, claims, func(token *jwt.Token) (interface{}, error) {\n\t\treturn pk.verifyKey, nil\n\t})\n\n\tif err != nil {\n\t\treturn \"\", fmt.Errorf(\"Token is invalid: %v\", err)\n\t}\n\n\tnow := time.Now()\n\tiat := now.Add(-pk.freshnessTime)\n\tif claims.ExpiresAt == 0 || claims.ExpiresAt < now.Unix() {\n\t\treturn \"\", fmt.Errorf(\"Token is expired\")\n\t}\n\tif claims.IssuedAt == 0 || claims.IssuedAt < iat.Unix() {\n\t\treturn \"\", fmt.Errorf(\"Token is too old\")\n\t}\n\n\treturn claims.Subject, nil\n}", "func GenerateUserEmailHMAC(userEmail string) string { return api.GenerateUserEmailHMAC(userEmail) }", "func validMAC(message, messageMAC, key []byte, shaVersion string) (bool, error) {\n\tvar mac hash.Hash\n\n\tswitch shaVersion {\n\tcase \"sha256\":\n\t\tmac = hmac.New(sha256.New, key)\n\tcase \"sha512\":\n\t\tmac = hmac.New(sha512.New, key)\n\tdefault:\n\t\treturn false, fmt.Errorf(\"unsupported SHA version: %s\", shaVersion)\n\t}\n\tmac.Write(message)\n\texpectedMAC := mac.Sum(nil)\n\treturn hmac.Equal(messageMAC, expectedMAC), nil\n}", "func (s CodeSigningSignature) String() string {\n\treturn awsutil.Prettify(s)\n}", "func createHMAC(sharedKey,\n\tsecretKey,\n\tdate,\n\thttpMethod,\n\trequestURL,\n\tcontentType,\n\tcontentMD5,\n\tnepApplicationKey,\n\tnepCorrelationID,\n\tnepOrganization,\n\tnepServiceVersion string) (string, error) {\n\tparsedDate, err := http.ParseTime(date)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tdate = parsedDate.Format(dateTimeFormat)\n\toneTimeSecret := secretKey + date\n\tu, err := url.Parse(requestURL)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\ttoSign := httpMethod + \"\\n\" + u.RequestURI()\n\tif contentType != \"\" {\n\t\ttoSign += \"\\n\" + contentType\n\t}\n\tif contentMD5 != \"\" {\n\t\ttoSign += \"\\n\" + contentMD5\n\t}\n\tif nepApplicationKey != \"\" {\n\t\ttoSign += \"\\n\" + nepApplicationKey\n\t}\n\tif nepCorrelationID != \"\" {\n\t\ttoSign += \"\\n\" + nepCorrelationID\n\t}\n\tif nepOrganization != \"\" {\n\t\ttoSign += \"\\n\" + nepOrganization\n\t}\n\tif nepServiceVersion != \"\" {\n\t\ttoSign += \"\\n\" + nepServiceVersion\n\t}\n\n\tkey := hmac.New(sha512.New, []byte(oneTimeSecret))\n\tkey.Write([]byte(toSign))\n\ttoken := \"AccessKey \" + sharedKey + \":\" + base64.StdEncoding.EncodeToString(key.Sum(nil))\n\treturn token, nil\n}", "func verifyGitHubEventSignature(providedSignature string, secret string, body []byte) bool {\n\tif providedSignature == \"\" || !strings.HasPrefix(providedSignature, \"sha1=\") {\n\t\treturn false\n\t}\n\n\tsignature, err := hex.DecodeString(providedSignature[5:])\n\tif err != nil {\n\t\treturn false\n\t}\n\n\tmac := hmac.New(sha1.New, []byte(secret))\n\tmac.Write(body)\n\tcomputed := mac.Sum(nil)\n\n\treturn hmac.Equal(computed, signature)\n}", "func VerifySignature(r *http.Request, securityHeader *ContentSecurityHeader, tolerance time.Duration) int {\n\tseconds, err := strconv.ParseInt(securityHeader.Timestamp, 10, 64)\n\tif err != nil {\n\t\treturn httpx.CodeSignatureInvalidHeader\n\t}\n\n\tnow := time.Now().Unix()\n\ttoleranceSeconds := int64(tolerance.Seconds())\n\tif seconds+toleranceSeconds < now || now+toleranceSeconds < seconds {\n\t\treturn httpx.CodeSignatureWrongTime\n\t}\n\n\treqPath, reqQuery := getPathQuery(r)\n\tsignContent := strings.Join([]string{\n\t\tsecurityHeader.Timestamp,\n\t\tr.Method,\n\t\treqPath,\n\t\treqQuery,\n\t\tcomputeBodySignature(r),\n\t}, \"\\n\")\n\tactualSignature := codec.HmacBase64(securityHeader.Key, signContent)\n\n\tpassed := securityHeader.Signature == actualSignature\n\tif !passed {\n\t\tlogx.Infof(\"signature different, expect: %s, actual: %s\",\n\t\t\tsecurityHeader.Signature, actualSignature)\n\t}\n\n\tif passed {\n\t\treturn httpx.CodeSignaturePass\n\t}\n\n\treturn httpx.CodeSignatureInvalidToken\n}", "func ComputeSignature(authToken, uri string, params map[string]string) string {\n\toriginalString := fmt.Sprintf(\"%s%s\", uri, headersWithSep(params, \"\", \"\", false))\n\tmac := hmac.New(sha1.New, []byte(authToken))\n\tmac.Write([]byte(originalString))\n\treturn base64.StdEncoding.EncodeToString(mac.Sum(nil))\n}", "func (s *Signature) String() string {\n\treturn fmt.Sprintf(\"%s %s %s\", s.Value, s.PublicKey, s.Endorsement)\n}", "func verifySECP256K1RSignatureFormat(sig []byte) error {\n\tif len(sig) != SECP256K1RSigLen {\n\t\treturn errInvalidSigLen\n\t}\n\n\tvar s secp256k1.ModNScalar\n\ts.SetByteSlice(sig[32:64])\n\tif s.IsOverHalfOrder() {\n\t\treturn errMutatedSig\n\t}\n\treturn nil\n}", "func (t *Token) Signature() []byte {\n\treturn t.signature\n}", "func (r *RPCRequest) GenerateSig(key, secret string) error {\n\tif len(key) == 0 || len(secret) == 0 {\n\t\treturn errors.New(\"You must supply an access key and an access secret\")\n\t}\n\tnonce := time.Now().UnixNano() / int64(time.Millisecond)\n\tsigString := fmt.Sprintf(\"_=%d&_ackey=%s&_acsec=%s&_action=%s\", nonce, key, secret, r.Action)\n\n\t// Append args if present\n\tif len(r.Arguments) != 0 {\n\t\tvar argsString string\n\n\t\t// We have to do this to sort by keys\n\t\tkeys := make([]string, 0, len(r.Arguments))\n\t\tfor key := range r.Arguments {\n\t\t\tkeys = append(keys, key)\n\t\t}\n\t\tsort.Strings(keys)\n\n\t\tfor _, k := range keys {\n\t\t\tv := r.Arguments[k]\n\t\t\tvar s string\n\n\t\t\tswitch t := v.(type) {\n\t\t\tcase []SubscriptionEvent:\n\t\t\t\tvar str = make([]string, len(t))\n\t\t\t\tfor _, j := range t {\n\t\t\t\t\tstr = append(str, string(j))\n\t\t\t\t}\n\t\t\t\ts = strings.Join(str, \"\")\n\t\t\tcase []string:\n\t\t\t\ts = strings.Join(t, \"\")\n\t\t\tcase bool:\n\t\t\t\ts = strconv.FormatBool(t)\n\t\t\tcase int:\n\t\t\t\ts = strconv.FormatInt(int64(t), 10)\n\t\t\tcase int64:\n\t\t\t\ts = strconv.FormatInt(t, 10)\n\t\t\tcase float64:\n\t\t\t\ts = strconv.FormatFloat(t, 'f', -1, 64)\n\t\t\tcase string:\n\t\t\t\ts = t\n\t\t\tdefault:\n\t\t\t\t// Absolutely panic here\n\t\t\t\tpanic(fmt.Sprintf(\"Cannot generate sig string: Unable to handle arg of type %T\", t))\n\t\t\t}\n\t\t\targsString += fmt.Sprintf(\"&%s=%s\", k, s)\n\t\t}\n\t\tsigString += argsString\n\t}\n\thasher := sha256.New()\n\thasher.Write([]byte(sigString))\n\tsigHash := base64.StdEncoding.EncodeToString(hasher.Sum(nil))\n\tr.Sig = fmt.Sprintf(\"%s.%d.%s\", key, nonce, sigHash)\n\treturn nil\n}", "func HmacSha256Signature( params string, key string ) (string, error) {\n\t// formalize the param then output to stdin of openssl command\n\tformalizeParamsCmd := exec.Command(\"echo\", \"-n\", strings.TrimSpace(params))\n\t// openssl command: sha256 message digest algorithm, create hashed MAC with key\n\tsignatureMsgCmd := exec.Command(\"openssl\", \"dgst\", \"-sha256\", \"-hmac\", key)\n\n\t// create new pipe.\n\tpipeReader, pipeWriter := io.Pipe()\n\n\t// wire up the pipe b/w 2 commands as below:\n\n\t// assign stdout of first cmd to pipe Writer\n\tformalizeParamsCmd.Stdout = pipeWriter\n\n\t// assign stdin of second cmd to pipe reader.\n\tsignatureMsgCmd.Stdin = pipeReader\n\n\t// assign the os stdout to the second cmd.\n\tsignatureMsgCmd.Stdout = os.Stdout\n\n\t// run the first cmd.\n\terr := formalizeParamsCmd.Start()\n\tif err != nil {\n\t\tlogrus.Errorf(\"Failed to execute the echo command: param = %s, err = %s\", params, err)\n\t\treturn \"\", err\n\t}\n\n\t// run the second cmd.\n\tvar b bytes.Buffer\n\tsignatureMsgCmd.Stdout = &b\n\terr = signatureMsgCmd.Start()\n\tif err != nil {\n\t\tlogrus.Errorf(\"Failed to execute the openssl dgst command: param = %s, err = %s\", params, err)\n\t\treturn \"\", err\n\t}\n\n\t// make a new go routine to wait for the first command finished.\n\tgo func() {\n\t\t// defer util the go routine done.\n\t\tdefer pipeWriter.Close()\n\t\t// wait util finished.\n\t\terr = formalizeParamsCmd.Wait()\n\t\tif err != nil {\n\t\t\tlogrus.Errorf(\"Failed to run the echo finished with err %s %s\", formalizeParamsCmd.Stdout, err)\n\t\t}\n\t\t// done now can close the pipeWriter.\n\t}()\n\t// wait util the second done.\n\terr = signatureMsgCmd.Wait()\n\tif err != nil {\n\t\tlogrus.Errorf(\"Failed to run the openssl finished with %s err %s\", signatureMsgCmd.Stdout, err)\n\t\treturn \"\", err\n\t}\n\t// return the result from stdout.\n\treturn strings.Trim(b.String(), \"\\n\"), nil\n}", "func compareHmac(value string, hash string, secret string) bool {\n return hash == Hash(value, secret)\n}", "func Signature(method string) []byte {\n\t// hash method\n\thasher := sha3.NewLegacyKeccak256()\n\thasher.Write([]byte(method))\n\tb := hasher.Sum(nil)\n\treturn b[:4]\n}", "func validateSignature(pubKey string, signature string, elements ...string) error {\n\tsig, err := util.ConvertSignature(signature)\n\tif err != nil {\n\t\treturn www.UserError{\n\t\t\tErrorCode: www.ErrorStatusInvalidSignature,\n\t\t}\n\t}\n\tb, err := hex.DecodeString(pubKey)\n\tif err != nil {\n\t\treturn err\n\t}\n\tpk, err := identity.PublicIdentityFromBytes(b)\n\tif err != nil {\n\t\treturn err\n\t}\n\tvar msg string\n\tfor _, v := range elements {\n\t\tmsg += v\n\t}\n\tif !pk.VerifyMessage([]byte(msg), sig) {\n\t\treturn www.UserError{\n\t\t\tErrorCode: www.ErrorStatusInvalidSignature,\n\t\t}\n\t}\n\treturn nil\n}", "func validateSignature(transactionID string, transactionInputSignature string, unspentOutputAddress string) (bool, error) {\n\n\t// unspentOutputAddress is actually public key\n\t// first try to decode it to PEM block\n\tpemBlock, _ := pem.Decode([]byte(unspentOutputAddress))\n\tif pemBlock == nil {\n\t\treturn false, nil\n\t}\n\t// try to get the public key out of the PEM block\n\tpub, err := x509.ParsePKIXPublicKey(pemBlock.Bytes)\n\tif err != nil {\n\t\treturn false, err\n\t}\n\n\t// get the string value out of signature which is hex encoded\n\tdecodedTransactionInputSignature, err := hex.DecodeString(transactionInputSignature)\n\tif err != nil {\n\t\treturn false, err\n\t}\n\n\t// hash the unsigned transactionID so we can use the value in signature verification\n\thashedID := sha256.Sum256([]byte(transactionID))\n\n\t// verify signed decoded transactionID to the hashed unsigned transactionID\n\tvar verificationError = rsa.VerifyPKCS1v15(pub.(*rsa.PublicKey), crypto.SHA256, hashedID[:], []byte(decodedTransactionInputSignature))\n\n\t// verification failed\n\tif verificationError != nil {\n\t\treturn false, verificationError\n\t}\n\n\t// verification was success if there is no error\n\treturn true, nil\n}", "func IsSignatureUUIDValid(promise *cAPI.Promise) (bool, bson.ObjectId) {\n\tif bson.IsObjectIdHex(promise.Context.SignatureUUID) {\n\t\treturn true, bson.ObjectIdHex(promise.Context.SignatureUUID)\n\t}\n\n\treturn false, bson.NewObjectId()\n}", "func validMAC(message, messageMAC, key []byte) bool {\n\tmac := hmac.New(sha256.New, key)\n\tmac.Write(message)\n\texpectedMAC := mac.Sum(nil)\n\treturn hmac.Equal(messageMAC, expectedMAC)\n}", "func (s *SharedMemory) Signature() string {\n\treturn util.DecodeCharPtr(unsafe.Pointer(&s.shmem.dwSignature), C.sizeof_DWORD)\n}", "func (ss StdSignature) GetSignature() []byte {\n\treturn ss.Signature\n}", "func verifySlackSignature(r *http.Request, slackSigningSecret []byte) bool {\n\tif r.Body == nil {\n\t\treturn false\n\t}\n\n\t// do not consume req.body\n\tbodyBytes, _ := ioutil.ReadAll(r.Body)\n\tr.Body = ioutil.NopCloser(bytes.NewBuffer(bodyBytes))\n\n\t// prepare message for signing\n\ttimestamp := r.Header.Get(hTimestamp)\n\tslackSignature := r.Header.Get(hSignature)\n\tmessage := \"v0:\" + timestamp + \":\" + string(bodyBytes)\n\n\t// Timeout check\n\tts, err := strconv.ParseInt(timestamp, 10, 64)\n\tif err != nil {\n\t\tlog.Printf(\"failed strconv.ParseInt%v\\n\", err)\n\t\treturn false\n\t}\n\n\ttSince := time.Since(time.Unix(ts, 0))\n\tdiff := time.Duration(abs64(int64(tSince)))\n\tif diff > 5*time.Minute {\n\t\tlog.Println(\"timed out\")\n\t\treturn false\n\t}\n\n\t// Not timeouted, then check Mac\n\treturn checkMAC(message, slackSignature, slackSigningSecret)\n}", "func ValidMAC(message, messageMAC, key []byte) bool {\n\tfmt.Println(\"===>\", string(messageMAC))\n\tmac := hmac.New(sha256.New, key)\n\tmac.Write(message)\n\texpectedMAC := mac.Sum(nil)\n\tfmt.Println(string(expectedMAC))\n\treturn hmac.Equal(messageMAC, expectedMAC)\n}", "func VerifySignature(addr, signature string) (err error) {\n\tt := time.Now().UTC()\n\tdata := []byte(t.Format(passwordFormat))\n\tsig, err := hex.DecodeString(signature)\n\tif err != nil {\n\t\treturn err\n\t}\n\thash := crypto.Keccak256Hash(data)\n\tpubkey, err := crypto.Ecrecover(hash[:], sig)\n\tif err != nil {\n\t\treturn\n\t}\n\tsender := utils.PubkeyToAddress(pubkey)\n\tif addr != sender.String() {\n\t\treturn errors.New(\"not match\")\n\t}\n\treturn nil\n}", "func TestHMACAttack(t *testing.T) {\n\tkeySize := rand2.Intn(63) + 1\n\trandKey := make([]byte, keySize)\n\t_, _ = rand.Read(randKey)\n\n\thsm := HmacSHA1Module{randKey}\n\tmsg := []byte(\"The quick brown fox jumps over the lazy dog\")\n\n\tgo func() {\n\t\thttp.HandleFunc(\"/\", func(w http.ResponseWriter, r *http.Request) {\n\t\t\tmsgBytes := r.URL.Query().Get(\"file\")\n\t\t\tmsg, err := url.QueryUnescape(msgBytes)\n\t\t\tif err != nil {\n\t\t\t\tt.Fatalf(\"Failed to decode msg: %s\", err)\n\t\t\t}\n\n\t\t\tdigestHex := r.URL.Query().Get(\"signature\")\n\t\t\tdigest, err := hex.DecodeString(digestHex)\n\t\t\tif err != nil {\n\t\t\t\tt.Fatalf(\"Failed to decode digest: %s\", err)\n\t\t\t}\n\n\t\t\texpected := hsm.HMACSHA1([]byte(msg))\n\t\t\tif len(expected) != len(digest) {\n\t\t\t\tw.WriteHeader(500)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tfor i := 0; i < len(digest); i++ {\n\t\t\t\ttime.Sleep(5 * time.Millisecond)\n\t\t\t\tif digest[i] != expected[i] {\n\t\t\t\t\tw.WriteHeader(500)\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t}\n\t\t})\n\t\tif err := http.ListenAndServe(\":9000\", nil); err != nil {\n\t\t\tt.Fatalf(\"Failed to serve: %s\", err)\n\t\t}\n\t}()\n\n\tsend := func(msg, digest []byte) (bool, time.Duration) {\n\t\tmsgB := url.QueryEscape(string(msg))\n\t\tdigestB := hex.EncodeToString(digest)\n\n\t\treq, err := http.NewRequest(\"GET\", \"http://localhost:9000/\", nil)\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"Failed to create new http requests: %s\", err)\n\t\t}\n\n\t\tparams := req.URL.Query()\n\t\tparams.Add(\"file\", msgB)\n\t\tparams.Add(\"signature\", digestB)\n\t\treq.URL.RawQuery = params.Encode()\n\n\t\ttimeS := time.Now()\n\t\tresp, err := http.DefaultClient.Do(req)\n\t\ttimeE := time.Now().Sub(timeS)\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"Failed in http client: %s\", err)\n\t\t}\n\t\tif resp.StatusCode == 200 {\n\t\t\treturn true, timeE\n\t\t}\n\t\treturn false, timeE\n\t}\n\n\tcorMac := make([]byte, 1)\n\tok, timeE := send(msg, corMac)\n\n\tfor timeE.Milliseconds()/5 < 1 {\n\t\tcorMac = append(corMac, byte(0x00))\n\t\tok, timeE = send(msg, corMac)\n\t}\n\tlenMac := len(corMac)\n\n\tfor i := 0; i < lenMac; i++ {\n\t\tmaxTimes := 0\n\t\tvar maxByte byte\n\t\tfor j := 0; j < 256; j++ {\n\t\t\tcorMac[i] = byte(j)\n\t\t\tsum := 0\n\t\t\tfor k := 0; k < 10; k++ {\n\t\t\t\tok, timeE = send(msg, corMac)\n\t\t\t\tsum += int(timeE.Milliseconds())\n\t\t\t}\n\t\t\tif sum > maxTimes {\n\t\t\t\tmaxTimes = sum\n\t\t\t\tmaxByte = byte(j)\n\t\t\t}\n\t\t}\n\t\tcorMac[i] = maxByte\n\t}\n\trequire.EqualValues(t, ok, true)\n}", "func fnHmac(ctx Context, doc *JDoc, params []string) interface{} {\n\tstats := ctx.Value(EelTotalStats).(*ServiceStats)\n\tif params == nil || len(params) != 3 {\n\t\tctx.Log().Error(\"error_type\", \"func_hmac\", \"op\", \"hmac\", \"cause\", \"wrong_number_of_parameters\", \"params\", params)\n\t\tstats.IncErrors()\n\t\tAddError(ctx, SyntaxError{fmt.Sprintf(\"wrong number of parameters in call to hmac function\"), \"curl\", params})\n\t\treturn nil\n\t} else {\n\t\thashFunc := extractStringParam(params[0])\n\t\tinput := extractStringParam(params[1])\n\t\tkey := extractStringParam(params[2])\n\t\tif hashFunc == \"SHA1\" {\n\t\t\tkey_for_sign := []byte(key)\n\t\t\th := hmac.New(sha1.New, key_for_sign)\n\t\t\th.Write([]byte(input))\n\t\t\treturn base64.StdEncoding.EncodeToString(h.Sum(nil))\n\t\t} else {\n\t\t\tctx.Log().Error(\"error_type\", \"func_hmac\", \"op\", \"hmac\", \"cause\", \"hash_func_not_yet_support\", \"params\", params)\n\t\t\treturn nil\n\t\t}\n\t}\n\treturn nil\n}", "func (s *Signature) valid() bool {\n\treturn len(s.FirstName) > 0 &&\n\t\tlen(s.LastName) > 0 &&\n\t\tlen(s.Email) > 0 &&\n\t\ts.Age >= 18 && s.Age <= 180 &&\n\t\tlen(s.Message) < 140\n}", "func signSubscription(decoded SubscriptionConfirmation, fullURL string) []byte {\n\t// get timestamp+token hash first\n\tfirstMAC := hmac.New(sha256.New, []byte(decoded.Token))\n\tfirstMAC.Write([]byte(decoded.Timestamp))\n\tfirstHash := firstMAC.Sum(nil)\n\t// then combine that with TopicArn\n\tsecondMAC := hmac.New(sha256.New, firstHash)\n\tsecondMAC.Write([]byte(decoded.TopicArn))\n\tsecondHash := secondMAC.Sum(nil)\n\t// then combine that with full URL\n\tthirdMAC := hmac.New(sha256.New, secondHash)\n\tthirdMAC.Write([]byte(fullURL))\n\tthirdHash := thirdMAC.Sum(nil)\n\treturn thirdHash\n}", "func getHmacKey() string {\n\tkey, keyErr := readSecret(\"faasflow-hmac-secret\")\n\tif keyErr != nil {\n\t\tkey = defaultHmacKey\n\t}\n\treturn key\n}", "func (k Key) Signature() uint32 {\n\treturn uint32(k[8])<<24 | uint32(k[9])<<16 | uint32(k[10])<<8 | uint32(k[11])\n}", "func getHmacCode(s string) string {\n\th := hmac.New(sha256.New, []byte(Cfg.DingTalkSecret))\n\th.Write([]byte(s))\n\treturn base64.StdEncoding.EncodeToString(h.Sum(nil))\n}", "func (c *CryptoSigner) VerifySignature(ctx context.Context, jwt string) (payload []byte, err error) {\n\tjws, err := jose.ParseSigned(jwt)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to parse JWT: %w\", err)\n\t}\n\n\tvar found bool\n\tfor _, sig := range jws.Signatures {\n\t\tif sig.Header.KeyID == c.keyID {\n\t\t\tfound = true\n\t\t}\n\t}\n\tif !found {\n\t\treturn nil, fmt.Errorf(\"key not found in jwt headers\")\n\t}\n\n\tpayload, err = jws.Verify(c.pubKeys.Keys[0].Public())\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to verify JWT: %w\", err)\n\t}\n\n\treturn payload, nil\n}", "func (sig *Signature) VerifySignature(publicKey interface{}, encoding string) bool {\n\tif sig.Data == nil {\n\t\tlog.Warn(\"sig does not contain signature data\", \"sig\", sig)\n\t\treturn false\n\t}\n\tif publicKey == nil {\n\t\tlog.Warn(\"PublicKey is nil\")\n\t\treturn false\n\t}\n\tencoding += sig.GetSignatureMetaData().String()\n\tdata := []byte(encoding)\n\tswitch sig.Algorithm {\n\tcase Ed25519:\n\t\tif pkey, ok := publicKey.(ed25519.PublicKey); ok {\n\t\t\treturn ed25519.Verify(pkey, data, sig.Data.([]byte))\n\t\t}\n\t\tlog.Warn(\"Could not assert type ed25519.PublicKey\", \"publicKeyType\", fmt.Sprintf(\"%T\", publicKey))\n\tcase Ed448:\n\t\tlog.Warn(\"Ed448 not yet Supported!\")\n\tcase Ecdsa256:\n\t\tif pkey, ok := publicKey.(*ecdsa.PublicKey); ok {\n\t\t\tif sig, ok := sig.Data.([]*big.Int); ok && len(sig) == 2 {\n\t\t\t\thash := sha256.Sum256(data)\n\t\t\t\treturn ecdsa.Verify(pkey, hash[:], sig[0], sig[1])\n\t\t\t}\n\t\t\tlog.Warn(\"Could not assert type []*big.Int\", \"signatureDataType\", fmt.Sprintf(\"%T\", sig.Data))\n\t\t\treturn false\n\t\t}\n\t\tlog.Warn(\"Could not assert type ecdsa.PublicKey\", \"publicKeyType\", fmt.Sprintf(\"%T\", publicKey))\n\tcase Ecdsa384:\n\t\tif pkey, ok := publicKey.(*ecdsa.PublicKey); ok {\n\t\t\tif sig, ok := sig.Data.([]*big.Int); ok && len(sig) == 2 {\n\t\t\t\thash := sha512.Sum384(data)\n\t\t\t\treturn ecdsa.Verify(pkey, hash[:], sig[0], sig[1])\n\t\t\t}\n\t\t\tlog.Warn(\"Could not assert type []*big.Int\", \"signature\", sig.Data)\n\t\t\treturn false\n\t\t}\n\t\tlog.Warn(\"Could not assert type ecdsa.PublicKey\", \"publicKeyType\", fmt.Sprintf(\"%T\", publicKey))\n\tdefault:\n\t\tlog.Warn(\"Signature algorithm type not supported\", \"type\", sig.Algorithm)\n\t}\n\treturn false\n}", "func hashSignature(alg Algorithm, signatureValue string, secret *pbauth.Secret) (string, error) {\n\tif strings.TrimSpace(signatureValue) == \"\" {\n\t\treturn \"\", consts.ErrInvalidSignatureValue\n\t}\n\tif err := ValidateSecret(secret); err != nil {\n\t\treturn \"\", err\n\t}\n\tkey := []byte(secret.Key)\n\tvar h hash.Hash\n\tswitch alg {\n\tcase Hs256:\n\t\th = hmac.New(sha256.New, key)\n\tcase Hs512:\n\t\th = hmac.New(sha512.New, key)\n\tdefault:\n\t\treturn \"\", consts.ErrNoHashAlgorithm\n\t}\n\th.Write([]byte(signatureValue))\n\treturn base64.URLEncoding.EncodeToString(h.Sum(nil)), nil\n}", "func signatureMetaToString(sig *cb.SignaturePolicyEnvelope) (string, error) {\n\tvar roles []string\n\tfor _, id := range sig.Identities {\n\t\trole, err := mspPrincipalToString(id)\n\t\tif err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\t\troles = append(roles, role)\n\t}\n\treturn signaturePolicyToString(sig.Rule, roles)\n}", "func (is *Signer) BuildSignature(request *http.Request) (string, error) {\n\tstringToSign, err := is.BuildStringToSign(request)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\th := hmac.New(sha256.New, []byte(is.SecretAccessKey))\n\th.Write([]byte(stringToSign))\n\n\tsignature := strings.TrimSpace(base64.StdEncoding.EncodeToString(h.Sum(nil)))\n\tsignature = strings.Replace(signature, \" \", \"+\", -1)\n\tsignature = url.QueryEscape(signature)\n\n\tlogger.Debug(fmt.Sprintf(\n\t\t\"QingCloud signature: [%d] %s\",\n\t\tutils.StringToUnixInt(request.Header.Get(\"Date\"), \"RFC 822\"),\n\t\tsignature))\n\tif request.Method == \"GET\" {\n\t\tis.BuiltURL += \"&signature=\" + signature\n\t} else if request.Method == \"POST\" {\n\t\tis.BuiltForm += \"&signature=\" + signature\n\t}\n\n\treturn signature, nil\n}", "func main() {\n\tmsg := []byte(\"Bob loves Alice.\")\n\tkey := []byte(\"passw0rd\")\n\n\th1 := hmac.New(sha512.New, key)\n\th1.Write(msg)\n\tmac1 := h1.Sum(nil)\n\tfmt.Printf(\"MAC1: %x\\n\", mac1)\n\n\th2 := hmac.New(sha512.New, key)\n\th2.Write(msg)\n\tmac2 := h2.Sum(nil)\n\tfmt.Printf(\"MAC2: %x\\n\", mac2)\n\n\tfmt.Printf(\"Valid? %v\\n\", hmac.Equal(mac1, mac2))\n}", "func VerifySignature(msg []byte, sig []byte, pubkey1 []byte) int {\n\tif msg == nil || sig == nil || pubkey1 == nil {\n\t\tlog.Panic(\"VerifySignature, ERROR: invalid input, nils\")\n\t}\n\tif len(sig) != 65 {\n\t\tlog.Panic(\"VerifySignature, invalid signature length\")\n\t}\n\tif len(pubkey1) != 33 {\n\t\tlog.Panic(\"VerifySignature, invalid pubkey length\")\n\t}\n\n\t//malleability check:\n\t//to enforce malleability, highest bit of S must be 1\n\t//S starts at 32nd byte\n\t//0x80 is 0b10000000 or 128 and masks highest bit\n\tif (sig[32] >> 7) == 1 {\n\t\treturn 0 //valid signature, but fails malleability\n\t}\n\n\tif sig[64] >= 4 {\n\t\treturn 0 //recover byte invalid\n\t}\n\n\tpubkey2 := RecoverPubkey(msg, sig) //if pubkey recovered, signature valid\n\n\tif pubkey2 == nil {\n\t\treturn 0\n\t}\n\n\tif len(pubkey2) != 33 {\n\t\tlog.Panic(\"recovered pubkey length invalid\")\n\t}\n\n\tif bytes.Equal(pubkey1, pubkey2) != true {\n\t\treturn 0 //pubkeys do not match\n\t}\n\n\treturn 1 //valid signature\n}", "func C31VerifyHMAC(rw http.ResponseWriter, rq *http.Request) {\n\tsecret := []byte(\"THIS IS A SECRET DON'T TELL ANYONE\")\n\tfile := []byte(rq.URL.Query().Get(\"file\"))\n\thmac, _ := hex.DecodeString(rq.URL.Query().Get(\"signature\"))\n\thmac = PadLeft(hmac, 0, 20)\n\tfileHmac := HMACSHA1(secret, file)\n\tif InsecureCompare(fileHmac, hmac) {\n\t\trw.Write([]byte(\"OK\"))\n\t} else {\n\t\trw.WriteHeader(http.StatusInternalServerError)\n\t\trw.Write([]byte(\"Invalid hash\"))\n\t}\n}", "func ValidateSignature(hash, cipher string, key []byte) error {\n\tif util.IsSignValid(hash, cipher, key) {\n\t\treturn nil\n\t}\n\treturn ErrInvalidSignature\n}", "func VerifyHmacSha1(message []byte, secret string, signature string) bool {\n\n\tsign := HmacSha1(message, secret)\n\n\tactual := make([]byte, 20)\n\thex.Decode(actual, []byte(signature))\n\n\treturn hmac.Equal(sign, actual)\n}", "func (cs CommitSig) String() string {\n\treturn fmt.Sprintf(\"CommitSig{%X by %X on %v @ %s}\",\n\t\ttmbytes.Fingerprint(cs.Signature),\n\t\ttmbytes.Fingerprint(cs.ValidatorAddress),\n\t\tcs.BlockIDFlag,\n\t\tCanonicalTime(cs.Timestamp))\n}", "func signatureMetaToString(sig *cb.SignaturePolicyEnvelope) (string, error) {\n\tvar roles []string\n\n\tfor _, id := range sig.Identities {\n\t\trole, err := mspPrincipalToString(id)\n\t\tif err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\n\t\troles = append(roles, role)\n\t}\n\n\treturn signaturePolicyToString(sig.Rule, roles)\n}", "func GetAuthStr(accessKeyID string, accessKeySecret string, method string, header map[string]string, resource string) string {\n\treturn \"FC \" + accessKeyID + \":\" + GetSignature(accessKeySecret, method, header, resource)\n}", "func sign(credentials Credentials, req Request, option *SignOption) string {\n\tsigningKey := getSigningKey(credentials, option)\n\treq.prepareHeaders(option)\n\tcanonicalRequest := req.canonical(option)\n\tsignature := util.HmacSha256Hex(signingKey, canonicalRequest)\n\n\treturn signature\n}" ]
[ "0.65445095", "0.62604153", "0.5990109", "0.590877", "0.58225965", "0.58003604", "0.57954544", "0.57604545", "0.57391244", "0.572719", "0.5722057", "0.5715614", "0.56788564", "0.5632296", "0.56035364", "0.5602562", "0.5569063", "0.5566009", "0.55550116", "0.5551403", "0.5543849", "0.5543436", "0.55233353", "0.55212563", "0.5520631", "0.5511979", "0.5484066", "0.5477652", "0.54614925", "0.5450374", "0.543858", "0.542232", "0.54021776", "0.53805226", "0.5372603", "0.53548664", "0.53432894", "0.5339126", "0.5332623", "0.5329134", "0.5326757", "0.53167456", "0.53118753", "0.5302435", "0.5273249", "0.5266227", "0.5265534", "0.52577746", "0.5249572", "0.52475363", "0.52393216", "0.5230948", "0.5214391", "0.52082586", "0.5206076", "0.5198871", "0.5196048", "0.5193893", "0.5185868", "0.5179064", "0.5163613", "0.5162719", "0.5157416", "0.5155658", "0.51367575", "0.5126884", "0.51087976", "0.5101826", "0.5101289", "0.50991654", "0.5098947", "0.5092596", "0.50883", "0.5087602", "0.5086594", "0.5080287", "0.50787014", "0.5050306", "0.50450176", "0.50404704", "0.5039411", "0.50349516", "0.50286454", "0.50243217", "0.5024261", "0.4994728", "0.49939394", "0.49921834", "0.4990425", "0.49877915", "0.49723634", "0.49695027", "0.49674067", "0.49671698", "0.49667424", "0.49637124", "0.496271", "0.49626124", "0.49558344", "0.49517024" ]
0.51911163
58
pingLoop periodically sends a ping to all remote clusters.
func (rcs *Service) pingLoop(done <-chan struct{}) { pingChan := make(chan *model.RemoteCluster, MaxConcurrentSends*2) // create a thread pool to send pings concurrently to remotes. for i := 0; i < MaxConcurrentSends; i++ { go rcs.pingEmitter(pingChan, done) } go rcs.pingGenerator(pingChan, done) }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func pingLoop(results chan Host, hostRegistry *HostRegistry, interval time.Duration, timeout time.Duration) {\n\tfor {\n\t\thostAddresses := hostRegistry.GetHostAddresses()\n\n\t\tlog.Info(\"Pinging these addresses: %q\\n\", hostAddresses)\n\n\t\tfor _, address := range hostAddresses {\n\t\t\tlog.Debug(\"Pinging: %v\\n\", address)\n\n\t\t\thost, err := hostRegistry.GetHost(address)\n\t\t\tif err != nil {\n\t\t\t\tlog.Warning(\"GetHost() returned error=%v for address=%v\", err, address)\n\t\t\t}\n\n\t\t\tgo pingAddress(results, host, timeout)\n\t\t}\n\n\t\tlog.Debug(\"Started pings for all hosts. Sleeping for: %v\", interval)\n\t\ttime.Sleep(interval)\n\t}\n}", "func (s *Server) loop() {\n\theartbeatTicker := s.clock.NewTicker(heartbeatInterval)\n\tdefer heartbeatTicker.Stop()\n\n\tresyncTicker := s.clock.NewTicker(resyncInterval)\n\tdefer resyncTicker.Stop()\n\n\tfor {\n\t\tselect {\n\t\t//\n\t\t// Re-sync cluster peers\n\t\t//\n\t\tcase <-resyncTicker.Chan():\n\t\t\terr := s.resyncPeerList()\n\t\t\tif err != nil {\n\t\t\t\ts.WithError(err).Error(\"Unexpected error re-syncing the list of peer nodes.\")\n\t\t\t}\n\n\t\t\terr = s.resyncNethealthPods()\n\t\t\tif err != nil {\n\t\t\t\ts.WithError(err).Error(\"Unexpected error re-syncing the list of peer pods.\")\n\t\t\t}\n\t\tcase <-s.triggerResync:\n\t\t\terr := s.resyncPeerList()\n\t\t\tif err != nil {\n\t\t\t\ts.WithError(err).Error(\"Unexpected error re-syncing the list of peer nodes.\")\n\t\t\t}\n\n\t\t\terr = s.resyncNethealthPods()\n\t\t\tif err != nil {\n\t\t\t\ts.WithError(err).Error(\"Unexpected error re-syncing the list of peer pods.\")\n\t\t\t}\n\n\t\t//\n\t\t// Send a heartbeat to each peer we know about\n\t\t// Check for peers that are timing out / down\n\t\t//\n\t\tcase <-heartbeatTicker.Chan():\n\t\t\ts.checkTimeouts()\n\t\t\tfor _, peer := range s.peers {\n\t\t\t\ts.sendHeartbeat(peer)\n\t\t\t}\n\n\t\t//\n\t\t// Rx heartbeats responses from peers\n\t\t//\n\t\tcase rx := <-s.rxMessage:\n\t\t\terr := s.processAck(rx)\n\t\t\tif err != nil {\n\t\t\t\ts.WithFields(logrus.Fields{\n\t\t\t\t\tlogrus.ErrorKey: err,\n\t\t\t\t\t\"peer_addr\": rx.peerAddr,\n\t\t\t\t\t\"rx_time\": rx.rxTime,\n\t\t\t\t\t\"message\": rx.message,\n\t\t\t\t}).Error(\"Error processing icmp message.\")\n\t\t\t}\n\t\t}\n\t}\n}", "func pingNodesLoop() {\n\tstop := false\n\tfor !stop {\n\t\tpingNodes()\n\t\t// TODO longer ping interval\n\t\ttime.Sleep(300 * time.Second)\n\t}\n}", "func pingLoop(client *Client) {\n\t// Create ticker to send pings every two minutes.\n\tticker := time.NewTicker(time.Minute * 2)\n\tfor {\n\t\tselect {\n\t\t// If the client is done, stop the time and goroutine.\n\t\tcase <-client.Done:\n\t\t\tticker.Stop()\n\t\t\treturn\n\t\t// Loop pings to keep connection alive.\n\t\tcase <-ticker.C:\n\t\t\tSendPing(client, strconv.FormatInt(time.Now().UnixNano(), 10))\n\t\t}\n\t}\n}", "func (me *Mgr) doPing() {\n\tfor !me.stopped {\n\t\tme.workers.Scan(func(id string, w interface{}) {\n\t\t\terr := w.(*Worker).Ping()\n\t\t\tif err != DEADERR {\n\t\t\t\treturn\n\t\t\t}\n\t\t\t// dead\n\t\t\tme.deadChan <- id\n\t\t\tme.deadWorkers.Set(id, []byte(\"OK\"))\n\t\t\tme.workers.Delete(id)\n\t\t})\n\t\ttime.Sleep(15 * time.Second)\n\t}\n}", "func (PingCIMunger) EachLoop(_ *github_util.Config) error { return nil }", "func (conn *Conn) ping() {\n\ttick := time.NewTicker(conn.PingFreq)\n\tfor {\n\t\tselect {\n\t\tcase <-tick.C:\n\t\t\tconn.Raw(fmt.Sprintf(\"PING :%d\", time.Now().UnixNano()))\n\t\tcase <-conn.cPing:\n\t\t\ttick.Stop()\n\t\t\treturn\n\t\t}\n\t}\n}", "func (conn *Conn) ping(ctx context.Context) {\n\tdefer conn.wg.Done()\n\ttick := time.NewTicker(conn.cfg.PingFreq)\n\tfor {\n\t\tselect {\n\t\tcase <-tick.C:\n\t\t\tconn.Ping(fmt.Sprintf(\"%d\", time.Now().UnixNano()))\n\t\tcase <-ctx.Done():\n\t\t\t// control channel closed, bail out\n\t\t\ttick.Stop()\n\t\t\treturn\n\t\t}\n\t}\n}", "func periodicPing() {\n\tfor {\n\t\t// Shuffle membership list and get a member\n\t\t// Only executed when the membership list is not empty\n\t\tif CurrentList.Size() > 0 {\n\t\t\tmember := CurrentList.Shuffle()\n\t\t\t// Do not pick itself as the ping target\n\t\t\tif (member.TimeStamp == CurrentMember.TimeStamp) && (member.IP == CurrentMember.IP) {\n\t\t\t\ttime.Sleep(PingSendingPeriod)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tLogger.Info(\"Member (%d, %d) is selected by shuffling\\n\", member.TimeStamp, member.IP)\n\t\t\t// Get update entry from TTL Cache\n\t\t\tupdate, flag, err := getUpdate()\n\t\t\t// if no update there, do pure ping\n\t\t\tif err != nil {\n\t\t\t\tping(member)\n\t\t\t} else {\n\t\t\t\t// Send update as payload of ping\n\t\t\t\tpingWithPayload(member, update, flag)\n\t\t\t}\n\t\t}\n\t\ttime.Sleep(PingSendingPeriod)\n\t}\n}", "func (connection *Connection) ping() {\n\tfor {\n\t\ttime.Sleep(1 * time.Second)\n\t\tif len(connection.consumers) > 0 {\n\t\t\t//do some ping, if no response then kill it\n\t\t\tfor _, consumer := range connection.consumers {\n\t\t\t\t_, pingError := consumer.connection.Write([]byte(\"hunga\"))\n\t\t\t\tif pingError != nil {\n\t\t\t\t\t// fmt.Print(\"PING ERROR\")\n\t\t\t\t\tconnection.killConsumer(consumer.id)\n\t\t\t\t} else {\n\t\t\t\t\tconnection.getConsumerMessage(consumer.id)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n}", "func (m *Monitor) runMonitorLoop() error {\n\t// Get all nodes in cluster\n\tcfg, err := kubeutils.BuildConfig()\n\tif err != nil {\n\t\treturn err\n\t}\n\tclient, err := clientset.NewForConfig(cfg)\n\tif err != nil {\n\t\treturn err\n\t}\n\tklog.Info(\"started master\")\n\tvar deadNodes []*v1.Node\n\tfor {\n\t\t// Don't thrash here..\n\t\tklog.V(4).Info(\"little pause before work\")\n\t\ttime.Sleep(pausePollingSecs)\n\n\t\t// Get all the nodes - that have been reported as UnReachable...\n\t\t// reporting happens using configmaps in specified namespace\n\t\tdeadNodes, err = kubeutils.GetUnreachableNodes(client, m.namespace)\n\t\tif err != nil {\n\t\t\tklog.Errorf(\"error getting nodes reported as unreachable: %s\", err)\n\t\t\t// Try again\n\t\t\tcontinue\n\t\t}\n\t\tklog.V(3).Infof(\"got an unreachable node list (%d nodes)\", len(deadNodes))\n\n\t\t// reap any nodes as required...\n\t\tif m.reap && len(deadNodes) > 0 {\n\t\t\tklog.V(4).Info(\"We are set to reap\")\n\t\t\tfor _, node := range deadNodes {\n\t\t\t\tif err := reaper.Reap(node, client, m.dryRun); err != nil {\n\t\t\t\t\tklog.Errorf(\"error reaping %s, %s\", node.Name, err)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n}", "func (rcs *Service) pingEmitter(pingChan <-chan *model.RemoteCluster, done <-chan struct{}) {\n\tfor {\n\t\tselect {\n\t\tcase rc := <-pingChan:\n\t\t\tif rc == nil {\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tonline := rc.IsOnline()\n\n\t\t\tif err := rcs.pingRemote(rc); err != nil {\n\t\t\t\trcs.server.Log().Log(mlog.LvlRemoteClusterServiceWarn, \"Remote cluster ping failed\",\n\t\t\t\t\tmlog.String(\"remote\", rc.DisplayName),\n\t\t\t\t\tmlog.String(\"remoteId\", rc.RemoteId),\n\t\t\t\t\tmlog.Err(err),\n\t\t\t\t)\n\t\t\t}\n\n\t\t\tif online != rc.IsOnline() {\n\t\t\t\tif metrics := rcs.server.GetMetrics(); metrics != nil {\n\t\t\t\t\tmetrics.IncrementRemoteClusterConnStateChangeCounter(rc.RemoteId, rc.IsOnline())\n\t\t\t\t}\n\t\t\t\trcs.fireConnectionStateChgEvent(rc)\n\t\t\t}\n\t\tcase <-done:\n\t\t\treturn\n\t\t}\n\t}\n}", "func pinger(c chan<- string) {\n\tfor i := 0; ; i++ {\n\t\tc <- \"Ping\"\n\t}\n}", "func Ping(node *shared.Node) {\n\tfor {\n\t\tblockchain.SwimBatchPuzzleGenerator(node)\n\n\t\ttime.Sleep(pingInterval)\n\t\ttarget := node.MembersSet.GetRandom()\n\t\tif target == \"\" {\n\t\t\tcontinue\n\t\t}\n\t\ttargetPeer := strings.Split(target, \" \")\n\t\tip := targetPeer[0]\n\t\tport := targetPeer[1]\n\t\tconn, err := net.Dial(\"tcp\", ip+\":\"+port)\n\t\tif err != nil {\n\t\t\t// failure detected!\n\t\t\tif strings.HasSuffix(err.Error(), \"connect: connection refused\") {\n\t\t\t\tnode.MembersSet.SetDelete(target)\n\t\t\t\tnode.FailMsgBuffer.Add(target)\n\t\t\t\tfmt.Println(\"FAILURE DETECTED \" + target)\n\t\t\t} else {\n\t\t\t\tfmt.Println(\"Dial Error: \", err)\n\t\t\t}\n\t\t} else {\n\t\t\t// SWIM Implementation would send membership update message here\n\t\t\tswimMsg := \"DEAD \" + strings.Join(node.FailMsgBuffer.GetN(10), \",\") + \"\\n\"\n\t\t\tlogBandwithInfo(\"Send\", len(swimMsg))\n\t\t\tfmt.Fprintf(conn, swimMsg)\n\t\t\tfmt.Print(\"SWIM SENT \" + swimMsg)\n\t\t\ttransactionsMsg := strings.Join(node.TransactionBuffer.GetN(10000), \"\\n\") + \"\\n\"\n\t\t\tlogBandwithInfo(\"Send\", len(transactionsMsg))\n\t\t\tfmt.Fprintf(conn, transactionsMsg)\n\t\t\tfor _, block := range node.BlockBuffer.GetAll() {\n\t\t\t\tblockchain.SendBlock(node, conn, block)\n\t\t\t}\n\n\t\t\tconn.Close()\n\t\t}\n\t}\n}", "func PingEveryone(self *State) {\n\tfor i := 0; i < len(self.AllPorts); i ++ {\n\t\tif self.AllPorts[i] != self.ListenPort {\n\t\t\tgo ping(self, self.AllPorts[i])\n\t\t}\n\t}\n}", "func ping() error {\n\tfor i := 0; i < 10; i++ {\n\t\t// Ping the server by sending a GET request to `/health`.\n\t\tresp, err := http.Get(\"http://localhost\" + viper.GetString(\"addr\") + \"/\")\n\t\tif err == nil && resp.StatusCode == 200 {\n\t\t\treturn nil\n\t\t}\n\n\t\t// Sleep for a second to continue the next ping.\n\t\tlog.Infoln(\"Waiting for the router, retry in 1 second.\")\n\t\ttime.Sleep(time.Second)\n\t}\n\treturn errors.New(\"app is not working\")\n}", "func ping(c chan string) {\n\n\t// on increment de 1 en 1 a chaque tour de boucle\n\tfor i := 1; ; i++ {\n\t\tc <- fmt.Sprintf(\"ping %v\", i)\n\t}\n}", "func (rcs *Service) pingRemote(rc *model.RemoteCluster) error {\n\tframe, err := makePingFrame(rc)\n\tif err != nil {\n\t\treturn err\n\t}\n\turl := fmt.Sprintf(\"%s/%s\", rc.SiteURL, PingURL)\n\n\tresp, err := rcs.sendFrameToRemote(PingTimeout, rc, frame, url)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tping := model.RemoteClusterPing{}\n\terr = json.Unmarshal(resp, &ping)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif err := rcs.server.GetStore().RemoteCluster().SetLastPingAt(rc.RemoteId); err != nil {\n\t\trcs.server.Log().Log(mlog.LvlRemoteClusterServiceError, \"Failed to update LastPingAt for remote cluster\",\n\t\t\tmlog.String(\"remote\", rc.DisplayName),\n\t\t\tmlog.String(\"remoteId\", rc.RemoteId),\n\t\t\tmlog.Err(err),\n\t\t)\n\t}\n\trc.LastPingAt = model.GetMillis()\n\n\tif metrics := rcs.server.GetMetrics(); metrics != nil {\n\t\tsentAt := time.Unix(0, ping.SentAt*int64(time.Millisecond))\n\t\telapsed := time.Since(sentAt).Seconds()\n\t\tmetrics.ObserveRemoteClusterPingDuration(rc.RemoteId, elapsed)\n\n\t\t// we approximate clock skew between remotes.\n\t\tskew := elapsed/2 - float64(ping.RecvAt-ping.SentAt)/1000\n\t\tmetrics.ObserveRemoteClusterClockSkew(rc.RemoteId, skew)\n\t}\n\n\trcs.server.Log().Log(mlog.LvlRemoteClusterServiceDebug, \"Remote cluster ping\",\n\t\tmlog.String(\"remote\", rc.DisplayName),\n\t\tmlog.String(\"remoteId\", rc.RemoteId),\n\t\tmlog.Int64(\"SentAt\", ping.SentAt),\n\t\tmlog.Int64(\"RecvAt\", ping.RecvAt),\n\t\tmlog.Int64(\"Diff\", ping.RecvAt-ping.SentAt),\n\t)\n\treturn nil\n}", "func (s *Service) loop() {\n\tspan, ctx := trace.StartSpanFromContextWithTraceID(context.Background(), \"\", \"service-loop\")\n\n\tif s.ClusterReportIntervalS == 0 {\n\t\ts.ClusterReportIntervalS = defaultClusterReportIntervalS\n\t}\n\tif s.HeartbeatNotifyIntervalS == 0 {\n\t\ts.HeartbeatNotifyIntervalS = defaultHeartbeatNotifyIntervalS\n\t}\n\tif s.MaxHeartbeatNotifyNum <= 0 {\n\t\ts.MaxHeartbeatNotifyNum = defaultMaxHeartbeatNotifyNum\n\t}\n\tif s.MetricReportIntervalM <= 0 {\n\t\ts.MetricReportIntervalM = defaultMetricReportIntervalM\n\t}\n\tif s.ConsistentCheckIntervalM <= 0 {\n\t\ts.ConsistentCheckIntervalM = defaultCheckConsistentIntervalM\n\t}\n\n\treportTicker := time.NewTicker(time.Duration(s.ClusterReportIntervalS) * time.Second)\n\tdefer reportTicker.Stop()\n\theartbeatNotifyTicker := time.NewTicker(time.Duration(s.HeartbeatNotifyIntervalS) * time.Second)\n\tdefer heartbeatNotifyTicker.Stop()\n\n\tmetricReportTicker := time.NewTicker(time.Duration(s.MetricReportIntervalM) * time.Minute)\n\tdefer metricReportTicker.Stop()\n\n\tcheckTicker := time.NewTicker(time.Duration(s.ConsistentCheckIntervalM) * time.Minute)\n\tdefer checkTicker.Stop()\n\n\tfor {\n\t\tselect {\n\t\tcase <-reportTicker.C:\n\t\t\tif s.ConsulAgentAddr == \"\" {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tclusterInfo := clustermgr.ClusterInfo{\n\t\t\t\tRegion: s.Region,\n\t\t\t\tClusterID: s.ClusterID,\n\t\t\t\tReadonly: s.Readonly,\n\t\t\t\tNodes: make([]string, 0),\n\t\t\t}\n\t\t\tspaceStatInfo := s.DiskMgr.Stat(ctx)\n\t\t\tclusterInfo.Capacity = spaceStatInfo.TotalSpace\n\t\t\tclusterInfo.Available = spaceStatInfo.WritableSpace\n\t\t\t// filter learner node\n\t\t\tpeers := s.raftNode.Status().Peers\n\t\t\tpeersM := make(map[uint64]raftserver.Peer)\n\t\t\tfor i := range peers {\n\t\t\t\tpeersM[peers[i].Id] = peers[i]\n\t\t\t}\n\t\t\tfor id, node := range s.raftNode.GetNodes() {\n\t\t\t\tif peersM[id].IsLearner {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\tclusterInfo.Nodes = append(clusterInfo.Nodes, s.RaftConfig.RaftNodeConfig.NodeProtocol+node)\n\t\t\t}\n\n\t\t\tval, err := json.Marshal(clusterInfo)\n\t\t\tif err != nil {\n\t\t\t\tspan.Error(\"json marshal clusterInfo failed, err: \", err)\n\t\t\t\tbreak\n\t\t\t}\n\n\t\t\tclusterKey := clustermgr.GetConsulClusterPath(s.Region) + s.ClusterID.ToString()\n\t\t\t_, err = s.consulClient.KV().Put(&api.KVPair{Key: clusterKey, Value: val}, nil)\n\t\t\tif err != nil {\n\t\t\t\tspan.Error(\"update clusterInfo into consul failed, err: \", err)\n\t\t\t}\n\t\tcase <-heartbeatNotifyTicker.C:\n\t\t\tif !s.raftNode.IsLeader() {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tchanges := s.DiskMgr.GetHeartbeatChangeDisks()\n\t\t\t// report heartbeat change metric\n\t\t\ts.reportHeartbeatChange(float64(len(changes)))\n\t\t\t// in some case, like cm's network problem, it may trigger a mounts of disk heartbeat change\n\t\t\t// in this situation, we need to ignore it and do some alert\n\t\t\tif len(changes) > s.MaxHeartbeatNotifyNum {\n\t\t\t\tspan.Error(\"a lots of disk heartbeat change happen: \", changes)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tfor i := range changes {\n\t\t\t\tspan.Debugf(\"notify disk heartbeat change, change info: %v\", changes[i])\n\t\t\t\terr := s.VolumeMgr.DiskWritableChange(ctx, changes[i].DiskID)\n\t\t\t\tif err != nil {\n\t\t\t\t\tspan.Error(\"notify disk heartbeat change failed, err: \", err)\n\t\t\t\t}\n\t\t\t}\n\t\tcase <-metricReportTicker.C:\n\t\t\ts.metricReport(ctx)\n\t\tcase <-checkTicker.C:\n\t\t\tif !s.raftNode.IsLeader() {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tgo func() {\n\t\t\t\tclis := make([]*clustermgr.Client, 0)\n\t\t\t\tpeers := s.raftNode.Status().Peers\n\t\t\t\tpeersM := make(map[uint64]raftserver.Peer)\n\t\t\t\tfor i := range peers {\n\t\t\t\t\tpeersM[peers[i].Id] = peers[i]\n\t\t\t\t}\n\t\t\t\tfor id, node := range s.raftNode.GetNodes() {\n\t\t\t\t\tif peersM[id].IsLearner {\n\t\t\t\t\t\tcontinue\n\t\t\t\t\t}\n\t\t\t\t\thost := s.RaftConfig.RaftNodeConfig.NodeProtocol + node\n\t\t\t\t\tcli := clustermgr.New(&clustermgr.Config{LbConfig: rpc.LbConfig{Hosts: []string{host}}})\n\t\t\t\t\tclis = append(clis, cli)\n\t\t\t\t}\n\t\t\t\tif len(clis) <= 1 {\n\t\t\t\t\treturn\n\t\t\t\t}\n\n\t\t\t\tiVids, err := s.checkVolInfos(ctx, clis)\n\t\t\t\tif err != nil {\n\t\t\t\t\tspan.Errorf(\"get checkVolInfos failed:%v\", err)\n\t\t\t\t\treturn\n\t\t\t\t}\n\n\t\t\t\tif len(iVids) != 0 {\n\t\t\t\t\t// readIndex request may be aggregated,which could temporarily lead to each nodes volume info not equal\n\t\t\t\t\t// so use get volume do double check\n\t\t\t\t\tactualIVids, err := s.doubleCheckVolInfos(ctx, clis, iVids)\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\tspan.Errorf(\"double check vids:%v volume info failed:%v\", iVids, err)\n\t\t\t\t\t\treturn\n\t\t\t\t\t}\n\t\t\t\t\tif len(actualIVids) != 0 {\n\t\t\t\t\t\ts.reportInConsistentVols(actualIVids)\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}()\n\n\t\tcase <-s.closeCh:\n\t\t\treturn\n\t\t}\n\t}\n}", "func (p *Pinger) Ping() error {\r\n\tconn, err := p.NewConn()\r\n\tif err != nil {\r\n\t\treturn err\r\n\t}\r\n\tp.conn = conn\r\n\tfor i := 0; i < p.amt; i++ {\r\n\t\terr = p.SendOnePing(i, conn)\r\n\t}\r\n\treturn err\r\n}", "func (c *Channel) SendLoop() {\n\tfor msg := range c.Messages {\n\t\tif !c.Alive {\n\t\t\treturn\n\t\t}\n\t\tc.LogF(\"Sending msg `%s` to `%d` members in `%s`\", msg, len(c.Members), c.GetName())\n\t\tfor _, v := range c.Members {\n\t\t\tv.Write(msg)\n\t\t}\n\t}\n}", "func (c *connection) sendLoop() {\n\tc.group.Add(1)\n\tvar id int\n\tfor msg := range c.out {\n\t\ttime.Sleep(0)\n\t\tid = int(msg[0])\n\t\tif id == c.myId {\n\t\t\tc.in <- msg\n\t\t} else {\n\t\t\tif id >= len(c.peers) {\n\t\t\t\tgo func() {\n\t\t\t\t\ttime.Sleep(time.Millisecond * 500)\n\t\t\t\t\tc.out <- msg\n\t\t\t\t}()\n\t\t\t} else {\n\t\t\t\tmsg[0] = 1\n\n\t\t\t\twrite(c.peers[id].conn, msg)\n\t\t\t}\n\t\t}\n\t}\n\tc.running = false\n\tc.group.Done()\n\tc.group.Wait()\n\tclose(c.in)\n}", "func pinger(c chan string) {\n\tfor i := 0; ; i++ {\n\t\tc <- \"ping\"\n\t}\n}", "func (c *Conn) pinger() {\n\tticker := time.NewTicker(PingInterval)\n\tdefer ticker.Stop()\n\tfor {\n\t\tselect {\n\t\tcase <-c.done:\n\t\t\treturn\n\t\tcase <-ticker.C:\n\t\t\tif _, err := c.write(websocket.PingMessage, []byte{}); err != nil {\n\t\t\t\t_ = c.Close()\n\t\t\t}\n\t\t}\n\t}\n}", "func (this *RpcObject) Loop() {\n\tfor this.IsRun {\n\t\tstart := time.Now()\n\t\tthis.ExecuteEvent()\n\t\tdelta := MAX_SLEEP_TIME - time.Now().Sub(start)\n\t\tif delta > 0 {\n\t\t\ttime.Sleep(delta)\n\t\t} else {\n\t\t\truntime.Gosched()\n\t\t}\n\t}\n}", "func (p *Pool) heartbeat() {\n\tgo func() {\n\t\tfor {\n\t\t\tp.Cmd(\"PING\")\n\t\t\ttime.Sleep(1 * time.Second)\n\t\t}\n\t}()\n}", "func (s *Runservice) etcdPingerLoop(ctx context.Context) {\n\tfor {\n\t\tif err := s.etcdPinger(ctx); err != nil {\n\t\t\tlog.Errorf(\"err: %+v\", err)\n\t\t}\n\n\t\tsleepCh := time.NewTimer(1 * time.Second).C\n\t\tselect {\n\t\tcase <-ctx.Done():\n\t\t\treturn\n\t\tcase <-sleepCh:\n\t\t}\n\t}\n}", "func pinger(wg *sync.WaitGroup, configuration *config) {\n\tfor {\n\t\tfor i := 0; i < len(configuration.Address); i++ {\n\n\t\t\t//Ping syscall, -c ping count, -i interval, -w timeout\n\t\t\tout, _ := exec.Command(\"ping\", configuration.Address[i], \"-c 5\", \"-i 3\", \"-w 10\").Output()\n\t\t\tif (strings.Contains(string(out), \"Destination Host Unreachable\")) || (strings.Contains(string(out), \"100% packet loss\")) {\n\t\t\t\tfmt.Println(\"Server down\")\n\t\t\t\tvar (\n\t\t\t\t\thost = \"xxx\"\n\t\t\t\t\tuser = \"xxx\"\n\t\t\t\t\tpass = \"xxx\"\n\t\t\t\t\trecipent = \"xxx\"\n\t\t\t\t)\n\t\t\t\t//recipent := configuration.Recipient[\"Recipinet\"+strconv.Itoa(i+1)]\n\n\t\t\t\tconfig := mailer.Config{\n\t\t\t\t\tHost: host,\n\t\t\t\t\tPort: 465,\n\t\t\t\t\tUser: user,\n\t\t\t\t\tPass: pass,\n\t\t\t\t}\n\n\t\t\t\tMailer := mailer.NewMailer(config, true)\n\n\t\t\t\tmail := mailer.NewMail()\n\t\t\t\tmail.FromName = \"Go Mailer\"\n\t\t\t\tmail.From = user\n\t\t\t\tmail.SetTo(recipent)\n\t\t\t\tmail.Subject = \"Server \"\n\t\t\t\tmail.Body = \"Your server is down\"\n\n\t\t\t\tif err := Mailer.Send(mail); err != nil {\n\t\t\t\t\tfmt.Println(err)\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\tfmt.Println(\"Server is running\")\n\n\t\t\t}\n\t\t}\n\t\ttime.Sleep(2 * time.Second)\n\t}\n\twg.Done() // need to fix\n}", "func (ad *AWSData) loop() {\n\tad.getAWSInfo()\n\n\tfor ad.running {\n\t\tstart := time.Now()\n\t\tdelayTimer := time.NewTimer(ad.delay)\n\t\tlog.Trace(\"Loop Start\")\n\t\tselect {\n\t\tcase ac := <-ad.hostChange:\n\t\t\tlog.Trace(\"Loop:Changing Host\")\n\t\t\terr := ad.doSetAddress(ac)\n\t\t\tif err != nil {\n\t\t\t\tlog.Warn(\"got error setting DNS:{}, {}\", ac, err)\n\t\t\t}\n\t\tcase <-ad.forceUpdate:\n\t\t\tlog.Trace(\"Loop:AWS force Update\")\n\t\t\tad.getAWSInfo()\n\t\tcase <-delayTimer.C:\n\t\t\tlog.Trace(\"Loop:Hit AWS update timeout\")\n\t\t\terr := ad.getAWSInfo()\n\t\t\tif err != nil {\n\t\t\t\tlog.Fatal(\"Problems talking to AWS:{}\", err)\n\t\t\t}\n\t\t}\n\t\tdelayTimer.Stop()\n\t\tloopTime := time.Since(start).Seconds()\n\t\tdnsLoopLatency.Observe(loopTime)\n\t\tlog.Trace(\"Loop End: {}s\", fmt.Sprintf(\"%.4f\", loopTime))\n\t}\n\tif ad.running {\n\t\tlog.Warn(\"Exited main loop w/o shuttdown!\")\n\t}\n}", "func StartPing(interval time.Duration){\n\tlog.Println(\"Ping Time\")\n\tpinging := true\n\tfor pinging {\n\t\tpinging = false\n\t\tlog.Print(\"Pinging set to \" + strconv.FormatBool(pinging))\n\n\t\tif shouldIPing() {\n\t\t\tpinging = true\n\t\t\tlog.Print(\"Pinging set to \" + strconv.FormatBool(pinging))\n\n\t\t\tbullyImpl.SetIsCoordinatorAlive(false)\n\t\t\tlog.Print(bullyImpl.IsCoordinatorAlive())\n\t\t\tbullyImpl.GetMoi().Ping(bullyImpl.GetCoordinator())\n\n\t\t\ttimer := time.NewTimer(interval)\n\t\t\tselect {\n\t\t\tcase <- endTimer:\n\t\t\t\tlog.Print(\"Pinging was ended\")\n\t\t\tcase <- timer.C:\n\t\t\t\tif shouldIPing() && !bullyImpl.IsCoordinatorAlive() {\n\t\t\t\t\tpinging = false\n\t\t\t\t\tlog.Print(\"Pinging set to \" + strconv.FormatBool(pinging))\n\t\t\t\t\tlog.Print(\"Coordinator is not alive, launching a new Election\")\n\t\t\t\t\tgo func(){ electionChannel <- struct{}{} }()\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n}", "func (bili *BiliClient) heartbeatLoop() {\n\tfor bili.checkConnect() {\n\t\terr := bili.sendSocketData(0, 16, bili.protocolVersion, 2, 1, \"\")\n\t\tif err != nil {\n\t\t\tbili.setConnect(false)\n\t\t\tlog.Printf(\"heartbeatError:%s\\r\\n\", err.Error())\n\t\t\treturn\n\t\t}\n\t\ttime.Sleep(time.Second * 5)\n\t}\n}", "func (s *Server) loopServiceDiscovery() {\n\ts.Info(\"Starting DNS service discovery for nethealth pod.\")\n\tticker := s.clock.NewTicker(dnsDiscoveryInterval)\n\tdefer ticker.Stop()\n\tquery := s.config.ServiceDiscoveryQuery\n\n\tpreviousNames := []string{}\n\n\tfor {\n\t\t<-ticker.Chan()\n\n\t\ts.Debugf(\"Querying %v for service discovery\", query)\n\t\tnames, err := net.LookupHost(query)\n\t\tif err != nil {\n\t\t\ts.WithError(err).WithField(\"query\", query).Error(\"Error querying service discovery.\")\n\t\t\tcontinue\n\t\t}\n\n\t\tsort.Strings(names)\n\t\tif reflect.DeepEqual(names, previousNames) {\n\t\t\tcontinue\n\t\t}\n\t\tpreviousNames = names\n\t\ts.Info(\"Triggering peer resync due to service discovery change\")\n\n\t\tselect {\n\t\tcase s.triggerResync <- true:\n\t\tdefault:\n\t\t\t// Don't block\n\t\t}\n\t}\n}", "func PingHosts(ipBase string, ipRange []int) {\n\n\tvar wg sync.WaitGroup\n\tcmd := LibConfig.SysCommands[\"PING\"] + \" -q -W 1 -c 1 \" + ipBase\n\n\tfor i := ipRange[0]; i < ipRange[1]; i++ {\n\t\twg.Add(1)\n\n\t\t// allow threaded system command calls to finish asynchronously\n\t\tgo func(i int, w *sync.WaitGroup) {\n\t\t\tdefer w.Done()\n\t\t\tRunCommand(cmd + strconv.Itoa(i))\n\t\t}(i, &wg)\n\t}\n\n\twg.Wait()\n\n}", "func (self *PapaSystem) netLoop(ctx context.Context) {\n\t// TODO alive\n\tfor self.alive.IsRunning() {\n\t\tclient, err := dial(ctx)\n\t\tif err == nil {\n\t\t\terr = network(ctx, client)\n\t\t}\n\t\tif err != nil {\n\t\t\tlog.Print(err)\n\t\t\ttime.Sleep(1 * time.Second)\n\t\t\tif neterr, ok := err.(net.Error); ok {\n\t\t\t\tif neterr.Temporary() {\n\t\t\t\t\tcontinue\n\t\t\t\t} else {\n\t\t\t\t\tlog.Fatal(\"network error is permanent\")\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n}", "func (d *Dao) pingESCluster(ctx context.Context) (err error) {\n\t//for name, client := range d.ESPool {\n\t//\tif _, _, err = client.Ping(d.c.Es[name].Addr[0]).Do(ctx); err != nil {\n\t//\t\td.PromError(\"Es:Ping\", \"%s:Ping error(%v)\", name, err)\n\t//\t\treturn\n\t//\t}\n\t//}\n\treturn\n}", "func (k *Kafka) loop() {\n\tdefer func() {\n\t\tatomic.StoreInt32(&k.running, 0)\n\t\tif nil != k.producer {\n\t\t\tk.producer.Close()\n\t\t}\n\t\tk.stats.Decr(\"output.kafka.running\", 1)\n\t\tclose(k.responseChan)\n\t\tclose(k.closedChan)\n\t}()\n\tk.stats.Incr(\"output.kafka.running\", 1)\n\n\tfor {\n\t\tif err := k.connect(); err != nil {\n\t\t\tk.log.Errorf(\"Failed to connect to Kafka: %v\\n\", err)\n\t\t\tselect {\n\t\t\tcase <-time.After(time.Second):\n\t\t\tcase <-k.closeChan:\n\t\t\t\treturn\n\t\t\t}\n\t\t} else {\n\t\t\tbreak\n\t\t}\n\t}\n\tk.log.Infof(\"Sending Kafka messages to addresses: %s\\n\", k.addresses)\n\n\tvar open bool\n\tfor atomic.LoadInt32(&k.running) == 1 {\n\t\tvar msg types.Message\n\t\tif msg, open = <-k.messages; !open {\n\t\t\treturn\n\t\t}\n\t\tk.stats.Incr(\"output.kafka.count\", 1)\n\t\tvar err error\n\t\tfor _, part := range msg.Parts {\n\t\t\tif _, _, err = k.producer.SendMessage(&sarama.ProducerMessage{\n\t\t\t\tTopic: k.conf.Kafka.Topic,\n\t\t\t\tValue: sarama.ByteEncoder(part),\n\t\t\t}); err != nil {\n\t\t\t\tk.stats.Incr(\"output.kafka.send.error\", 1)\n\t\t\t\tbreak\n\t\t\t} else {\n\t\t\t\tk.stats.Incr(\"output.kafka.send.success\", 1)\n\t\t\t}\n\t\t}\n\t\tselect {\n\t\tcase k.responseChan <- types.NewSimpleResponse(err):\n\t\tcase <-k.closeChan:\n\t\t\treturn\n\t\t}\n\t}\n}", "func (s *Switch) peersLoop(ctx context.Context) {\n\tfor {\n\t\tselect {\n\t\tcase <-s.morePeersReq:\n\t\t\tif s.isShuttingDown() {\n\t\t\t\treturn\n\t\t\t}\n\t\t\ts.logger.WithContext(ctx).Debug(\"loop: got morePeersReq\")\n\t\t\ts.askForMorePeers(ctx)\n\t\t// todo: try getting the connections (heartbeat)\n\t\tcase <-s.shutdownCtx.Done():\n\t\t\treturn\n\t\t}\n\t}\n}", "func (p *Probe) loop() {\n\tdefer close(p.stopped)\n\n\t// Do a first probe right away, so that the prober immediately exports results for everything.\n\tp.run()\n\tfor {\n\t\tselect {\n\t\tcase <-p.tick.Chan():\n\t\t\tp.run()\n\t\tcase <-p.ctx.Done():\n\t\t\treturn\n\t\t}\n\t}\n}", "func Pinger(c chan connection, size int, PingInterval time.Duration, WriteWaitInterval time.Duration) {\n\tfor i := 0; i < size; i++ {\n\t\tgo func() {\n\t\t\tcSet := make(map[string]*websocket.Conn)\n\t\t\ttimer := time.NewTicker(PingInterval)\n\t\t\tfor {\n\t\t\t\tselect {\n\t\t\t\tcase conn := <-c:\n\t\t\t\t\tcSet[conn.uniqConnID] = conn.conn\n\t\t\t\tcase <-timer.C:\n\t\t\t\t\tfor uniqConnID, conn := range cSet {\n\t\t\t\t\t\tlogger.Debug(fmt.Sprintf(\"Pinging UniqConnID: %s \", uniqConnID))\n\t\t\t\t\t\tif err := conn.WriteControl(websocket.PingMessage, []byte(\"--ping--\"), time.Now().Add(WriteWaitInterval)); err != nil {\n\t\t\t\t\t\t\tlogger.Error(fmt.Sprintf(\"[websocket.pingPeer] - Failed to ping User: %s Error: %v\", uniqConnID, err))\n\t\t\t\t\t\t\tmetrics.Increment(\"server_ping_failure_total\", \"\")\n\t\t\t\t\t\t\tdelete(cSet, uniqConnID)\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}()\n\t}\n}", "func (sc *sparkyClient) pingProcessor() {\n\tvar pingCount int\n\tvar ptMax, ptMin int\n\tvar latencyHist pingHistory\n\n\t// We never want to run the ping test beyond maxPingTestLength seconds\n\ttimeout := time.NewTimer(time.Duration(maxPingTestLength) * time.Second)\n\n\t// Signal pingTest() that we're ready\n\tclose(sc.pingProcessorReady)\n\n\tfor {\n\t\tselect {\n\t\tcase <-timeout.C:\n\t\t\t// If we've been pinging for maxPingTestLength, call it quits\n\t\t\treturn\n\t\tcase pt := <-sc.pingTime:\n\t\t\tpingCount++\n\n\t\t\t// Calculate our ping time in microseconds\n\t\t\tptMicro := pt.Nanoseconds() / 1000\n\n\t\t\t// Add this ping to our ping history\n\t\t\tlatencyHist = append(latencyHist, ptMicro)\n\n\t\t\tptMin, ptMax = latencyHist.minMax()\n\n\t\t\t// Advance the progress bar a bit\n\t\t\tsc.pingProgressTicker <- true\n\n\t\t\t// Update the ping stats widget\n\t\t\tsc.wr.jobs[\"latency\"].(*termui.Sparklines).Lines[0].Data = latencyHist.toMilli()\n\t\t\tsc.wr.jobs[\"latencystats\"].(*termui.Par).Text = fmt.Sprintf(\"Cur/Min/Max\\n%.2f/%.2f/%.2f ms\\nAvg/σ\\n%.2f/%.2f ms\",\n\t\t\t\tfloat64(ptMicro/1000), float64(ptMin/1000), float64(ptMax/1000), latencyHist.mean()/1000, latencyHist.stdDev()/1000)\n\t\t\tsc.wr.Render()\n\t\t}\n\t}\n}", "func (c *Cluster) run() {\n\n\tticker := time.NewTicker(c.fo.heartBeat)\n\n\tmissed := 0\n\t// Don't rehash immediately on the first ping. If this node just came onlyne, leader will\n\t// account it on the next ping. Otherwise it will be rehashing twice.\n\trehashSkipped := false\n\n\tfor {\n\t\tselect {\n\t\tcase <-ticker.C:\n\t\t\tif c.fo.leader == c.thisNodeName {\n\t\t\t\t// I'm the leader, send pings\n\t\t\t\tc.sendPings()\n\t\t\t} else {\n\t\t\t\tmissed++\n\t\t\t\tif missed >= c.fo.voteTimeout {\n\t\t\t\t\t// Elect the leader\n\t\t\t\t\tmissed = 0\n\t\t\t\t\tc.electLeader()\n\t\t\t\t}\n\t\t\t}\n\t\tcase ping := <-c.fo.leaderPing:\n\t\t\t// Ping from a leader.\n\n\t\t\tif ping.Term < c.fo.term {\n\t\t\t\t// This is a ping from a stale leader. Ignore.\n\t\t\t\tlog.Println(\"cluster: ping from a stale leader\", ping.Term, c.fo.term, ping.Leader, c.fo.leader)\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tif ping.Term > c.fo.term {\n\t\t\t\tc.fo.term = ping.Term\n\t\t\t\tc.fo.leader = ping.Leader\n\t\t\t\tlog.Printf(\"cluster: leader '%s' elected\", c.fo.leader)\n\t\t\t} else if ping.Leader != c.fo.leader {\n\t\t\t\tif c.fo.leader != \"\" {\n\t\t\t\t\t// Wrong leader. It's a bug, should never happen!\n\t\t\t\t\tlog.Printf(\"cluster: wrong leader '%s' while expecting '%s'; term %d\",\n\t\t\t\t\t\tping.Leader, c.fo.leader, ping.Term)\n\t\t\t\t} else {\n\t\t\t\t\tlog.Printf(\"cluster: leader set to '%s'\", ping.Leader)\n\t\t\t\t}\n\t\t\t\tc.fo.leader = ping.Leader\n\t\t\t}\n\n\t\t\tmissed = 0\n\t\t\tif ping.Signature != c.ring.Signature() {\n\t\t\t\tif rehashSkipped {\n\t\t\t\t\tlog.Println(\"cluster: rehashing at a request of\",\n\t\t\t\t\t\tping.Leader, ping.Nodes, ping.Signature, c.ring.Signature())\n\t\t\t\t\tc.rehash(ping.Nodes)\n\t\t\t\t\trehashSkipped = false\n\n\t\t\t\t\t//globals.hub.rehash <- true\n\t\t\t\t} else {\n\t\t\t\t\trehashSkipped = true\n\t\t\t\t}\n\t\t\t}\n\n\t\tcase vreq := <-c.fo.electionVote:\n\t\t\tif c.fo.term < vreq.req.Term {\n\t\t\t\t// This is a new election. This node has not voted yet. Vote for the requestor and\n\t\t\t\t// clear the current leader.\n\t\t\t\tlog.Printf(\"Voting YES for %s, my term %d, vote term %d\", vreq.req.Node, c.fo.term, vreq.req.Term)\n\t\t\t\tc.fo.term = vreq.req.Term\n\t\t\t\tc.fo.leader = \"\"\n\t\t\t\tvreq.resp <- ClusterVoteResponse{Result: true, Term: c.fo.term}\n\t\t\t} else {\n\t\t\t\t// This node has voted already or stale election, reject.\n\t\t\t\tlog.Printf(\"Voting NO for %s, my term %d, vote term %d\", vreq.req.Node, c.fo.term, vreq.req.Term)\n\t\t\t\tvreq.resp <- ClusterVoteResponse{Result: false, Term: c.fo.term}\n\t\t\t}\n\t\tcase <-c.fo.done:\n\t\t\treturn\n\t\t}\n\t}\n}", "func (n *Peer) Loop() error {\n\tfuncs := map[string]func(io.Reader, <-chan *packet) error{\n\t\t\"ping\": n.pongAfterReadPing,\n\t\t\"pong\": n.readPong,\n\t\t\"inv\": n.readInv,\n\t\t\"headers\": n.readHeaders,\n\t\t\"merkleblock\": n.readMerkle,\n\t\t\"addr\": n.readAddr,\n\t}\n\tpch := n.goReadMessage()\n\tt := time.NewTimer(3 * time.Minute)\n\tfor {\n\t\tdefer func() {\n\t\t\tselect {\n\t\t\tcase <-pch:\n\t\t\tdefault:\n\t\t\t}\n\t\t}()\n\t\tif err := n.resetDeadline(); err != nil {\n\t\t\treturn n.errClose(err)\n\t\t}\n\t\tselect {\n\t\tcase p := <-pch:\n\t\t\tif p.err != nil {\n\t\t\t\treturn n.errClose(p.err)\n\t\t\t}\n\t\t\tlog.Println(p.cmd + \" from \" + n.conn.RemoteAddr().String())\n\t\t\tn.timeout = 0\n\t\t\tf, exist := funcs[p.cmd]\n\t\t\tif !exist {\n\t\t\t\tlog.Printf(\"%s:unknown or unsupported command\", p.cmd)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif err := f(p.payload, pch); err != nil {\n\t\t\t\tlog.Print(err)\n\t\t\t}\n\t\tcase w := <-wch:\n\t\t\terr := n.writeMessage(w.cmd, w.data)\n\t\t\tw.err <- err\n\t\t\tif err != nil {\n\t\t\t\tlog.Println(err)\n\t\t\t}\n\t\t\tlog.Print(\"sended \", w.cmd)\n\t\tcase <-t.C:\n\t\t\tif n.timeout++; n.timeout > timeout {\n\t\t\t\treturn errors.New(\"timeout\")\n\t\t\t}\n\t\t\tif err := n.writePing(); err != nil {\n\t\t\t\treturn n.errClose(err)\n\t\t\t}\n\t\t}\n\t\tif !t.Stop() {\n\t\t\t<-t.C\n\t\t}\n\t\tt.Reset(3 * time.Minute)\n\t}\n}", "func UpdateLoop() {\n\tfor {\n\t\tup, ok := <-ostent.Updates.Get()\n\t\tif ok {\n\t\t\tconnections.update(up)\n\t\t\tlastCopy.set(up)\n\t\t}\n\t}\n}", "func Pinging(memberList *[]MemberID, msgQueue *[]GossipMessage, selfMember *MemberID) {\n\tfor {\n\t\tif len(*msgQueue) == 0 {\n\t\t\tvar msg GossipMessage\n\t\t\tmsg.Status = 3\n\t\t\tGossip(msg, memberList, msgQueue, selfMember)\n\t\t} else {\n\t\t\t// fmt.Println(*msgQueue)\n\t\t\tGossip((*msgQueue)[0], memberList, msgQueue, selfMember)\n\t\t\t*msgQueue = (*msgQueue)[1:]\n\t\t}\n\t\ttime.Sleep(600 * time.Millisecond)\n\t}\n}", "func sleepMonitor() {\n\tc, err := icmp.ListenPacket(\"ip4:icmp\", \"0.0.0.0\")\n\tif err != nil {\n\t\tlog.Printf(\"error listening for udp - sending data to all ports for all connected clients. err: %s\", err)\n\t\treturn\n\t}\n\tgo icmpEchoSender(c)\n\tdefer c.Close()\n\tfor {\n\t\tbuf := make([]byte, 1500)\n\t\tn, peer, err := c.ReadFrom(buf)\n\t\tif err != nil {\n\t\t\tlog.Printf(\"%s\\n\", err.Error())\n\t\t\tcontinue\n\t\t}\n\t\tmsg, err := icmp.ParseMessage(1, buf[:n])\n\t\tif err != nil {\n\t\t\tcontinue\n\t\t}\n\n\t\tip := peer.String()\n\n\t\t// Look for echo replies, mark it as received.\n\t\tif msg.Type == ipv4.ICMPTypeEchoReply {\n\t\t\tnetMutex.Lock()\n\t\t\tpingResponse[ip] = stratuxClock.Time\n\t\t\tnetMutex.Unlock()\n\t\t\tcontinue // No further processing needed.\n\t\t}\n\n\t\t// Only deal with ICMP Unreachable packets (since that's what iOS and Android seem to be sending whenever the apps are not available).\n\t\tif msg.Type != ipv4.ICMPTypeDestinationUnreachable {\n\t\t\tcontinue\n\t\t}\n\t\t// Packet parsing.\n\t\tmb, err := msg.Body.Marshal(1)\n\t\tif err != nil {\n\t\t\tcontinue\n\t\t}\n\t\tif len(mb) < 28 {\n\t\t\tcontinue\n\t\t}\n\n\t\t// The unreachable port.\n\t\tport := (uint16(mb[26]) << 8) | uint16(mb[27])\n\t\tipAndPort := ip + \":\" + strconv.Itoa(int(port))\n\n\t\tnetMutex.Lock()\n\t\tp, ok := outSockets[ipAndPort]\n\t\tif !ok {\n\t\t\t// Can't do anything, the client isn't even technically connected.\n\t\t\tnetMutex.Unlock()\n\t\t\tcontinue\n\t\t}\n\t\tp.LastUnreachable = stratuxClock.Time\n\t\toutSockets[ipAndPort] = p\n\t\tnetMutex.Unlock()\n\t}\n}", "func (db *DB) Ping() <-chan SQLResult {\n\tresult := make(chan SQLResult, 10)\n\tgo func() {\n\t\tdefer close(result)\n\n\t\tticker := time.NewTicker(db.conf.GetFrequency())\n\t\tfor range ticker.C {\n\t\t\tgo func() {\n\t\t\t\tresult <- executeQuery(db.db, db.conf.GetQuery())\n\t\t\t}()\n\t\t}\n\t}()\n\treturn result\n}", "func (c *Client) Ping(ping string) {\n\tvar (\n\t\targ = ReqKeepAlive{}\n\t\treply = RespKeepAlive{}\n\t\terr error\n\t)\n\n\tfor {\n\t\tselect {\n\t\tcase <-c.quit:\n\t\t\tgoto closed\n\t\tdefault:\n\t\t}\n\n\t\tif c.Client != nil && c.err == nil {\n\t\t\tif err = c.Call(ping, &arg, &reply); err != nil {\n\t\t\t\tc.err = err\n\t\t\t\tif err != rpc.ErrShutdown {\n\t\t\t\t\tc.Client.Close()\n\t\t\t\t}\n\t\t\t}\n\t\t} else {\n\t\t\tif err = c.dial(); err == nil {\n\t\t\t\tc.err = nil\n\t\t\t}\n\t\t}\n\t\ttime.Sleep(pingDuration)\n\t}\n\nclosed:\n\tif c.Client != nil {\n\t\tc.Client.Close()\n\t}\n}", "func (scholten *Scholten) loop() {\n\n\terr := scholten.serv.startListening()\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tif scholten.root {\n\t\tgo scholten.doWork()\n\t}\n\n\tfor {\n\t\tselect {\n\t\tcase basic := <-scholten.basicChan:\n\t\t\t// ALUNO\n\n\t\tcase control := <-scholten.controlChan:\n\t\t\t// ALUNO\n\n\t\tcase termination := <- scholten.terminationChan:\n\t\t\t// ALUNO\n\t\t}\n\t}\n}", "func ping(hosts []string, returnUnavailable bool) []string {\n\tvar toReturn []string\n\tvar cmds []*exec.Cmd\n\n\t// Start pinging:\n\tfor _, host := range hosts {\n\t\tlog.Println(\"Pinging\", host)\n\t\t// cmd := exec.Command(\"ssh\", \"-o ConnectTimeout=1\", host, \"echo\")\n\t\tcmd := exec.Command(\"nc\", \"-z\", \"-w 1\", host, \"22\")\n\t\tcmd.Start()\n\t\tcmds = append(cmds, cmd)\n\t}\n\n\t// Read result of the pings:\n\tfor i, cmd := range cmds {\n\t\tif err := cmd.Wait(); err != nil {\n\t\t\tlog.Println(\"Unavailable host:\", hosts[i], \"ping error:\", err)\n\t\t\tif returnUnavailable {\n\t\t\t\ttoReturn = append(toReturn, hosts[i])\n\t\t\t}\n\t\t} else {\n\t\t\tif !returnUnavailable {\n\t\t\t\ttoReturn = append(toReturn, hosts[i])\n\t\t\t}\n\t\t}\n\t}\n\n\treturn toReturn\n}", "func (p *Peer) pingHandler() {\n\tpingTicker := time.NewTicker(pingInterval)\n\tdefer pingTicker.Stop()\n\nout:\n\tfor {\n\t\tselect {\n\t\tcase <-pingTicker.C:\n\t\t\tnonce, err := wire.RandomUint64()\n\t\t\tif err != nil {\n\t\t\t\tlog.Errorf(\"Not sending ping to %s: %v\", p, err)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tp.QueueMessage(wire.NewMsgPing(nonce), nil)\n\n\t\tcase <-p.quit:\n\t\t\tbreak out\n\t\t}\n\t}\n}", "func Loop(){\n\tfor {\n\t\t\t <-ElectionTimer.C\n\t\t\tif r.Id == r.LeaderId { \n\t\t\t\t\t//r.ResetTimer()\n\t\t\t\t}else{\n\t\t\t\t\tr.CallElection()\t\t\t\t\t\t\t\n\t\t\t\t}\n\t\t}//end of for\t\n}", "func (e *Hosts) PingPong(ctx context.Context, stream hosts.Hosts_PingPongStream) error {\n\tfor {\n\t\treq, err := stream.Recv()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tlog.Logf(\"Got ping %v\", req.Stroke)\n\t\tif err := stream.Send(&hosts.Pong{Stroke: req.Stroke}); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n}", "func (c *Client) Loop(n, m int) {\n\tinterval := initialInterval\n\tfor {\n\t\trandSleep(n, m)\n\t\tresp, err := c.MakeRequest(\"3\", \"2\", true, false)\n\t\tif err != nil {\n\t\t\tlog.Println(err)\n\t\t\tcontinue\n\t\t}\n\t\tuc := resp.Apps[0].UpdateCheck\n\t\tif uc.Status != \"ok\" {\n\t\t\tc.Log(\"update check status: %s\\n\", uc.Status)\n\t\t} else {\n\t\t\tc.SetVersion(resp)\n\t\t}\n\t}\n}", "func (c *switchBotCollector) updateLoop() {\n\tticker := time.NewTicker(scrapeInterval)\n\n\tlog.Println(\"start collecting...\")\n\tc.update()\n\tfor {\n\t\tselect {\n\t\tcase <-ticker.C:\n\t\t\tc.update()\n\t\t}\n\t}\n}", "func (p *PublisherMunger) EachLoop() error {\n\tbuf := bytes.NewBuffer(nil)\n\tp.plog = NewPublisherLog(buf)\n\n\tif err := p.updateKubernetes(); err != nil {\n\t\tp.plog.Errorf(\"%v\", err)\n\t\tp.plog.Flush()\n\t\treturn err\n\t}\n\tif err := p.construct(); err != nil {\n\t\tp.plog.Errorf(\"%v\", err)\n\t\tp.plog.Flush()\n\t\treturn err\n\t}\n\tif err := p.publish(); err != nil {\n\t\tp.plog.Errorf(\"%v\", err)\n\t\tp.plog.Flush()\n\t\treturn err\n\t}\n\treturn nil\n}", "func SendHeartbeat(){\n\n\tfor{\n\t/*\t\n\t\trandNum := rand.Intn(100) \n\t\tif randNum > 97 && r.id == r.clusterConfig.LeaderId { \n\t\t\t//r.clusterConfig.Servers[r.id].isLeader=2\t\t\t//break ThisLoop \n\t\t\ttime.Sleep(100 * time.Second)\n\t\t\t}\n\t*/\n\t\tselect{\n\t\t\t\n\t\t\tcase <-raft.C1:\n\t\t\t\t//log.Println(\"in send SendHeartbeat-Append\")\n\t\t\t\n\t\t\tcase <-raft.C2:\n\t\t\t\t//log.Println(\"in send SendHeartbeat-commit\")\n\t\t\t\n\t\t\tcase <-time.After(100*time.Millisecond):\n\t\t\t\tif r.clusterConfig.Servers[r.id].isLeader == 1 {\n\t\t\t\t\tfor i:=0; i<N; i++ {\n\t\t\t\t\t\t\tif i == r.id { continue }\t\t\t\t\n\t\t\t\t\t\t\targs := &HeartbeatRPCArgs{r.id,r.currentTerm}\t\t\t\t\n\t\t\t\t\t\t\tvar reply string\t\t\t\t\n\t\t\t\t\t\t\tvar err error = nil\n\t\t\t\t\t\t\trr := make(chan error, 1)\n\t\t\t\t\t\t\tgo func() { rr <- r.clusterConfig.Servers[i].Client.Call(\"RPC.HeartbeatRPC\", args, &reply) } ()\n\t\t\t\t\t\t\tselect{\n\t\t\t\t\t\t\t\tcase err = <-rr:\n\t\t\t\t\t\t\t\t\tif err != nil {\t\n\t\t\t\t\t\t\t\t\t\tlog.Println(\"[Server] HeartbeatRPC Error:\", err) \n\t\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t\tcase <-time.After(20*time.Millisecond):\n\t\t\t\t\t\t\t\t//\tlog.Println(\"Heartbeat reply not got \",i)\n\t\t\t\t\t\t\t\t\tcontinue //log.Println(\"Heartbeat reply not got \",i)\n\t\t\t\t\t\t\t}// inner select loop\n\t\t\t\t\t}//end of inner for \n\t\t\t\t}//end of if\n\t\t}//end of select\n\t}//end of for loop\n}", "func ping(pings chan<- string, msg string) {\n\tpings <- msg\n}", "func ping(pings chan<- string, msg string) {\n\tpings <- msg\n}", "func ping(pings chan<- string, msg string) {\n\tpings <- msg\n}", "func (bc *BotConnection) pingpong() {\n\tdefer func() {\n\t\tlog.Println(\" pingpong: dying\")\n\t\tbc.waitGroup.Done()\n\t}()\n\n\t// Send first ping to avoid early EOF:\n\tping := struct {\n\t\tType string `json:\"type\"`\n\t}{\n\t\tType: \"ping\",\n\t}\n\terr := websocket.JSON.Send(bc.ws, &ping)\n\tif err != nil {\n\t\tlog.Printf(\" pingpong: JSON send error: %s\\n\", err)\n\t}\n\n\t// Start a timer to tick every 15 seconds:\n\tticker := time.Tick(time.Second * 15)\n\n\talive := true\n\tfor alive {\n\t\t// Wait on either the timer tick or the `die` channel:\n\t\tselect {\n\t\tcase _ = <-ticker:\n\t\t\t//log.Println(\" pingpong: ping\")\n\t\t\terr = websocket.JSON.Send(bc.ws, &ping)\n\t\t\tif err != nil {\n\t\t\t\tlog.Printf(\" pingpong: JSON send error: %s\\n\", err)\n\t\t\t\tbreak\n\t\t\t}\n\t\t\t// NOTE: `readIncomingMessages` will read the \"pong\" response.\n\t\t\t// Cannot issue a read here because a read is already blocking there.\n\t\t\tbreak\n\t\tcase _ = <-bc.die:\n\t\t\talive = false\n\t\t\tbreak\n\t\t}\n\t}\n}", "func (t *transport) sendLoop(addr string, reconnectInterval, retryTimeout time.Duration, log SomeLogger) {\n\tvar (\n\t\tsock net.Conn\n\t\terr error\n\t\treconnectC <-chan time.Time\n\t)\n\n\tif reconnectInterval > 0 {\n\t\treconnectTicker := time.NewTicker(reconnectInterval)\n\t\tdefer reconnectTicker.Stop()\n\t\treconnectC = reconnectTicker.C\n\t}\n\nRECONNECT:\n\t// Attempt to connect\n\tsock, err = net.Dial(\"udp\", addr)\n\tif err != nil {\n\t\tlog.Printf(\"[STATSD] Error connecting to server: %s\", err)\n\t\tgoto WAIT\n\t}\n\n\tfor {\n\t\tselect {\n\t\tcase buf, ok := <-t.sendQueue:\n\t\t\t// Get a buffer from the queue\n\t\t\tif !ok {\n\t\t\t\t_ = sock.Close() // nolint: gosec\n\t\t\t\tt.shutdownWg.Done()\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tif len(buf) > 0 {\n\t\t\t\t// cut off \\n in the end\n\t\t\t\t_, err := sock.Write(buf[0 : len(buf)-1])\n\t\t\t\tif err != nil {\n\t\t\t\t\tlog.Printf(\"[STATSD] Error writing to socket: %s\", err)\n\t\t\t\t\t_ = sock.Close() // nolint: gosec\n\t\t\t\t\tgoto WAIT\n\t\t\t\t}\n\t\t\t}\n\n\t\t\t// return buffer to the pool\n\t\t\tselect {\n\t\t\tcase t.bufPool <- buf:\n\t\t\tdefault:\n\t\t\t\t// pool is full, let GC handle the buf\n\t\t\t}\n\t\tcase <-reconnectC:\n\t\t\t_ = sock.Close() // nolint: gosec\n\t\t\tgoto RECONNECT\n\t\t}\n\t}\n\nWAIT:\n\t// Wait for a while\n\ttime.Sleep(retryTimeout)\n\tgoto RECONNECT\n}", "func ping(pings chan <- string, msg string) {\n\tpings <- msg\n}", "func pingLeases(leases []subnet.SubnetLease) error {\n\tconst workers = 5\n\tconst timeout = 1 * time.Second\n\n\tif len(leases) == 0 {\n\t\treturn nil\n\t}\n\n\twork := make(chan subnet.SubnetLease)\n\tresults := make(chan bool, workers)\n\tclient := http.Client{Timeout: timeout}\n\n\tvar wg sync.WaitGroup\n\twg.Add(workers)\n\tfor i := 0; i < workers; i++ {\n\t\tgo func() {\n\t\t\tfor l := range work {\n\t\t\t\tres, err := client.Get(fmt.Sprintf(\"http://%s:%s/ping\", l.Network.IP, l.Attrs.HTTPPort))\n\t\t\t\tif err == nil {\n\t\t\t\t\tres.Body.Close()\n\t\t\t\t}\n\t\t\t\tresults <- err == nil && res.StatusCode == 200\n\t\t\t}\n\t\t\twg.Done()\n\t\t}()\n\t}\n\n\tgo func() {\n\t\twg.Wait()\n\t\tclose(results)\n\t}()\n\n\tfor _, l := range leases {\n\t\tselect {\n\t\tcase work <- l:\n\t\tcase success := <-results:\n\t\t\tif success {\n\t\t\t\tclose(work)\n\t\t\t\treturn nil\n\t\t\t}\n\t\t}\n\t}\n\tclose(work)\n\n\tfor success := range results {\n\t\tif success {\n\t\t\treturn nil\n\t\t}\n\t}\n\n\treturn errors.New(\"failed to successfully ping a neighbor\")\n}", "func Loop(w chan *tsdb.Point, interval time.Duration) {\n\ttick := tsdb.Tick(interval)\n\tt := time.Now()\n\tfor ; ; t = <-tick {\n\t\tstart := time.Now()\n\n\t\temit := newEmitter(w, t)\n\t\tvar wg sync.WaitGroup\n\t\tfor _, c := range collectors {\n\t\t\tc := c\n\t\t\tgo func() {\n\t\t\t\tcollect(emit, c)\n\t\t\t\twg.Done()\n\t\t\t}()\n\t\t\twg.Add(1)\n\t\t}\n\t\twg.Wait()\n\n\t\tstatCycleMillis.Add(time.Since(start).Nanoseconds() / 1e6)\n\t}\n}", "func loopPollingForInterfaceAddrs(c chan []string) {\n\tvar lastSortedUpdate []string\n\tfor range time.NewTicker(10 * time.Second).C {\n\t\taddrs, err := net.InterfaceAddrs()\n\t\tif err != nil {\n\t\t\tlog.WithError(err).Panic(\"Failed to get host interface addresses\")\n\t\t}\n\n\t\tipv4s := extractUnicastIPv4Addrs(addrs)\n\t\tsort.Strings(ipv4s)\n\n\t\tif reflect.DeepEqual(lastSortedUpdate, ipv4s) {\n\t\t\tcontinue\n\t\t}\n\n\t\tlog.WithField(\"update\", ipv4s).Debug(\"Interface addresses updated.\")\n\t\tc <- ipv4s\n\t}\n}", "func (m *MockUDPClient) Ping(num uint) {\n\tfor i := 0; i < int(num); i++ {\n\t\tm.ctrl <- true\n\t}\n}", "func ping(cfg configFlags) {\n\t// Dial a remote server and send a stream to that server.\n\tc, err := vsock.Dial(uint32(cfg.contextID), uint32(cfg.port))\n\tif err != nil {\n\t\tlog.Fatalf(\"failed to dial: %v\", err)\n\t}\n\tdefer c.Close()\n\n\tvar p func(i uint) []byte\n\tp = func(i uint) []byte {\n\t\tc := make([]byte, 8)\n\t\tbinary.LittleEndian.PutUint64(c, uint64(i))\n\t\tb := md5.Sum(c)\n\t\treturn b[:]\n\t}\n\n\tif cfg.pattern != \"\" {\n\t\tb, err := hex.DecodeString(cfg.pattern)\n\t\tif err != nil {\n\t\t\tlog.Println(\"pattern must be specified as hex digits\")\n\t\t\tlog.Fatalf(\"failed to decode pattern: %v\", err)\n\t\t}\n\t\tp = func(i uint) []byte { return b }\n\t\tfmt.Printf(\"PATTERN: %s\", cfg.pattern)\n\t}\n\n\tlogf(\"PING %s FROM %s\", c.LocalAddr(), c.RemoteAddr())\n\n\tbuf := make([]byte, 64)\n\ttick := time.NewTicker(cfg.interval)\n\tfor i := uint(0); cfg.count == 0 || i < cfg.count; i++ {\n\t\tn, err := c.Write(p(i))\n\t\tif err != nil {\n\t\t\tlog.Fatalf(\"error writing to socket: %v\", err)\n\t\t}\n\t\tn, err = c.Read(buf)\n\t\tfmt.Printf(\"%d bytes from %s: ping_seq=%d\\n\", n, c.RemoteAddr(), i)\n\t\t<-tick.C\n\t}\n}", "func ping(c chan<- string) {\n\tfor {\n\t\tvar str string\n\t\tfmt.Scanln(&str)\n\t\tc <- str\n\t}\n}", "func (serv *Server) pollPinger() {\n\tvar (\n\t\tpingTicker *time.Ticker = time.NewTicker(16 * time.Second)\n\n\t\tall []int\n\t\tconn int\n\t\tnumPinger int32\n\t)\n\n\tfor {\n\t\tselect {\n\t\tcase <-pingTicker.C:\n\t\t\tall = serv.Clients.All()\n\n\t\t\tfor _, conn = range all {\n\t\t\t\tselect {\n\t\t\t\tcase serv.qpinger <- conn:\n\t\t\t\tdefault:\n\t\t\t\t\tnumPinger = serv.numGoPinger.Load()\n\t\t\t\t\tif numPinger < serv.Options.maxGoroutinePinger {\n\t\t\t\t\t\tgo serv.pinger()\n\t\t\t\t\t\tserv.numGoPinger.Add(1)\n\t\t\t\t\t\tserv.qpinger <- conn\n\t\t\t\t\t} else {\n\t\t\t\t\t\tgo serv.delayPinger(conn)\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t}\n\t\tcase <-serv.running:\n\t\t\treturn\n\t\t}\n\t}\n}", "func (c *Coordinator) loop(fn loopFunc, interval time.Duration, reason string) chan struct{} {\n\tdone := make(chan struct{})\n\tgo func() {\n\t\tticker := c.ticker(interval)\n\t\tdefer close(done)\n\n\t\tfor {\n\t\t\tselect {\n\t\t\t// taker or renew old leases\n\t\t\tcase <-ticker():\n\t\t\t\tif err := fn(); err != nil {\n\t\t\t\t\tc.Logger.WithError(err).Errorf(\"Worker %s failed to %s\", c.WorkerId, reason)\n\t\t\t\t}\n\t\t\t// someone called stop and we need to exit.\n\t\t\tcase <-done:\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t}()\n\n\treturn done\n}", "func ping(url string) {\n\tfor {\n\t\ttime.Sleep(10000 * time.Millisecond)\n\t\thttp.Get(url)\n\t}\n}", "func (m *Mongo) Pinger() {\n\trand.Seed(time.Now().UTC().UnixNano())\n\t// Start pinger on a random schedule\n\ttime.Sleep(time.Duration(rand.Intn(5)) * time.Second)\n\n\tfor {\n\t\tlog.Infof(\"ping hosts:%s\", m.Hosts)\n\t\terr := m.Session.Ping()\n\t\tif err != nil {\n\t\t\tlog.Error(err)\n\t\t\tm.Connect(m.Hosts)\n\t\t\treturn\n\t\t}\n\t\ttime.Sleep(40 * time.Second)\n\t}\n}", "func (t *transport) reportLoop(reportInterval time.Duration, log SomeLogger) {\n\tdefer t.shutdownWg.Done()\n\n\treportTicker := time.NewTicker(reportInterval)\n\tdefer reportTicker.Stop()\n\n\tfor {\n\t\tselect {\n\t\tcase <-t.shutdown:\n\t\t\treturn\n\t\tcase <-reportTicker.C:\n\t\t\tlostPeriod := atomic.SwapInt64(&t.lostPacketsPeriod, 0)\n\t\t\tif lostPeriod > 0 {\n\t\t\t\tlog.Printf(\"[STATSD] %d packets lost (overflow)\", lostPeriod)\n\t\t\t}\n\t\t}\n\t}\n}", "func hostCheckIn() {\n \n fmt.Println(\"Host Check in!\")\n currentTime := int32(time.Now().Unix())\n for index, _ := range hosts { \n go func(i int) {\n fmt.Println(\"Submitting Metrics for \"+hosts[i].name)\n metrics := hostMetrics(&hosts[i],currentTime)\n var jsonStr = []byte(metrics)\n req, err := http.NewRequest(\"POST\", metricApiUrl, bytes.NewBuffer(jsonStr))\n req.Header.Set(\"Content-Type\", \"application/json\")\n\n client := &http.Client{}\n resp, err := client.Do(req)\n if err != nil {\n return\n }\n defer resp.Body.Close()\n\n fmt.Println(\"response Status:\", resp.Status)\n fmt.Println(\"response Headers:\", resp.Header)\n body, _ := ioutil.ReadAll(resp.Body)\n fmt.Println(\"response Body:\", string(body))\n \n }(index)\n if(math.Mod(float64(index), 100) == 0) {\n time.Sleep(100*time.Millisecond)\n }\n }\n\n // Report in every 30 seconds\n time.Sleep(CheckInInterval * time.Second) \n hostCheckIn() \n}", "func (db *DB) doNotifyLoop(ctx context.Context, ch chan struct{}, psc *redis.PubSubConn, eb *backoff.ExponentialBackOff) error {\n\tfor {\n\t\terr, ok := doNotify(ctx, psc, ch).(net.Error)\n\t\tif !ok && err != nil {\n\t\t\tlog.Error().Err(ctx.Err()).Msg(\"failed to notify channel\")\n\t\t\treturn err\n\t\t}\n\t\tif ok && err.Timeout() {\n\t\t\tselect {\n\t\t\tcase <-ctx.Done():\n\t\t\t\treturn ctx.Err()\n\t\t\tcase <-time.After(eb.NextBackOff()):\n\t\t\t}\n\t\t\treturn nil\n\t\t}\n\t}\n}", "func pingServer() error {\n\tfor i := 0; i < 2; i++ {\n\t\t// Ping the server by sending a GET request to `/health`.\n\t\tresp, err := http.Get(\"http://127.0.0.1:8080\" + \"/api/health\")\n\t\tif err == nil && resp.StatusCode == 200 {\n\t\t\treturn nil\n\t\t}\n\n\t\t// Sleep for a second to continue the next ping.\n\t\tlog.Print(\"Waiting for the router, retry in 1 second.\")\n\t\ttime.Sleep(time.Second)\n\t}\n\treturn errors.New(\"Cannot connect to the router.\")\n}", "func connectLoop(snowflakes SnowflakeCollector) {\n\tfor {\n\t\ttimer := time.After(ReconnectTimeout)\n\t\t_, err := snowflakes.Collect()\n\t\tif err != nil {\n\t\t\tlog.Printf(\"WebRTC: %v Retrying...\", err)\n\t\t}\n\t\tselect {\n\t\tcase <-timer:\n\t\t\tcontinue\n\t\tcase <-snowflakes.Melted():\n\t\t\tlog.Println(\"ConnectLoop: stopped.\")\n\t\t\treturn\n\t\t}\n\t}\n}", "func Ping(conn net.Conn) {\n\tfor {\n\t\tconn.Write([]byte(\"ping\"))\n\t\ttime.Sleep(20 * time.Second)\n\t}\n}", "func Ping(conn net.Conn) {\n\tfor {\n\t\tconn.Write([]byte(\"ping\"))\n\t\ttime.Sleep(20 * time.Second)\n\t}\n}", "func (a App) checkerLoop() {\n\tticker := time.NewTicker(time.Duration(a.CheckInterval))\n\tdefer ticker.Stop()\n\n\ta.checkIPAndUpdateDNS()\n\n\tfor {\n\t\tselect {\n\t\tcase <-ticker.C:\n\t\t\ta.checkIPAndUpdateDNS()\n\t\tcase <-a.ctx.Done():\n\t\t\treturn\n\t\t}\n\t}\n}", "func pingServer() error {\n\tfor i := 0; i < viper.GetInt(\"max_ping_count\"); i++ {\n\t\t// Ping the server by sending a GET request to `/health`.\n\t\tresp, err := http.Get(viper.GetString(\"url\") + \"/sd/health\")\n\t\tif err == nil && resp.StatusCode == 200 {\n\t\t\treturn nil\n\t\t}\n\n\t\t// Sleep for a second to continue the next ping.\n\t\tlog.Info(\"Waiting for the router, retry in 1 second.\")\n\t\ttime.Sleep(time.Second)\n\t}\n\treturn errors.New(\"Cannot connect to the router.\")\n}", "func pingServer() error {\n\tfor i := 0; i < viper.GetInt(\"max_ping_count\"); i++ {\n\t\t// Ping the server by sending a GET request to `/health`.\n\t\tresp, err := http.Get(viper.GetString(\"url\") + \"/sd/health\")\n\t\tif err == nil && resp.StatusCode == 200 {\n\t\t\treturn nil\n\t\t}\n\n\t\t// Sleep for a second to continue the next ping.\n\t\tlog.Info(\"Waiting for the router, retry in 1 second.\")\n\t\ttime.Sleep(time.Second)\n\t}\n\treturn errors.New(\"Cannot connect to the router.\")\n}", "func (p *pinger) Ping(addr net.Addr) {\n\tdefer close(p.reportChan)\n\tdefer close(p.errChan)\n\n\tconn, err := icmp.ListenPacket(\"ip4:icmp\", \"\")\n\tif err != nil {\n\t\tp.errChan <- fmt.Errorf(\"cannot connect to addr %s: %v\", addr, err)\n\t\treturn\n\t}\n\tdefer conn.Close()\n\n\tseq := 0\n\tfor {\n\t\tselect {\n\t\tcase <-p.stop:\n\t\t\treturn\n\t\tdefault:\n\t\t\tping, err := p.ping(conn, addr, seq)\n\t\t\tif err != nil {\n\t\t\t\tp.errChan <- err\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tp.reportChan <- ping\n\t\t\tseq++\n\n\t\t\tif p.opts.Count != 0 && int(p.opts.Count) == seq {\n\t\t\t\tp.Stop()\n\t\t\t} else {\n\t\t\t\ttime.Sleep(time.Second)\n\t\t\t}\n\t\t}\n\t}\n}", "func handlePongs(done chan bool, outFileName string) {\n\n\tvar liveHosts[] string\n\n\t// CATCH ALL RESPONSES\n\tfor pong := range pongs{\n\n\t\tif pong.Alive == true {\n\t\t\tDebug(\"Host \" + pong.IP + \" is alive!\")\n\t\t\tliveHosts = append(liveHosts, pong.IP)\n\t\t}\n\t\tif pong.Error != nil {\n\t\t} else if pong.Alive == false {\n\t\t\t//Debug(\"Host \" + pong.IP + \" is dead!\")\n\t\t}\n\t}\n\n\t// WRITE TO OUTPUT FILE\n\twriteLiveHosts(liveHosts, outFileName)\n\tdone <- true\n\t\n}", "func (ss *socketSession) writeLoop() {\n\tdefer func() {\n\t\t// Stop the timer\n\t\tss.pingTimer.Stop()\n\t}()\n\n\tfor {\n\t\tselect {\n\t\tcase <-ss.stream.HasData:\n\t\t\t// Create a new token\n\t\t\tt, ok := ss.token.new()\n\t\t\tif !ok {\n\t\t\t\tlog.L.Warning(\"Closing session %s with remote address '%s' due to flooding attack!\", ss.session.SessionID(), ss.socketConn.RemoteAddr())\n\t\t\t\t// Immediately close the session. The client tries to flood the server...\n\t\t\t\tss.socketConn.Close()\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\t// Send the new token and the message\n\t\t\tss.socketConn.Write(t + \"&\" + ss.stream.Read())\n\t\tcase <-ss.pingTimer.C:\n\t\t\t// Check if the client didn't respond since the last ping request.\n\t\t\tif ss.pingCount >= 1 {\n\t\t\t\t// Close the socket\n\t\t\t\tss.socketConn.Close()\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\t// Increment the ping count\n\t\t\tss.pingCount += 1\n\n\t\t\t// Create a new token\n\t\t\tt, ok := ss.token.new()\n\t\t\tif !ok {\n\t\t\t\tlog.L.Warning(\"Closing session %s with remote address '%s' due to flooding attack!\", ss.session.SessionID(), ss.socketConn.RemoteAddr())\n\t\t\t\t// Immediately close the session. The client tries to flood the server...\n\t\t\t\tss.socketConn.Close()\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\t// Send the new token and a ping request\n\t\t\tss.socketConn.Write(t + \"&\" + socketKeyPing)\n\n\t\t\t// Reset the timer again\n\t\t\tss.pingTimer.Reset(pingPeriod)\n\t\tcase <-ss.stopWriteLoop:\n\t\t\t// Just exit the loop\n\t\t\treturn\n\t\t}\n\t}\n}", "func (c *Cluster) Ping(ping *ClusterPing, unused *bool) error {\n\tselect {\n\tcase c.fo.leaderPing <- ping:\n\tdefault:\n\t}\n\treturn nil\n}", "func (s *server) loop() {\n\tfor {\n\t\tselect {\n\t\tcase op := <-s.ops:\n\t\t\top()\n\t\t}\n\t}\n}", "func (e *Route53Exporter) CollectLoop() {\n\tclient := awsclient.NewClientFromSession(e.sess)\n\n\tfor {\n\t\tctx, ctxCancelFunc := context.WithTimeout(context.Background(), e.timeout)\n\t\te.Cancel = ctxCancelFunc\n\t\tlevel.Info(e.logger).Log(\"msg\", \"Updating Route53 metrics...\")\n\n\t\thostedZones, err := getAllHostedZones(client, ctx, e.logger)\n\n\t\tlevel.Info(e.logger).Log(\"msg\", \"Got all zones\")\n\t\tif err != nil {\n\t\t\tlevel.Error(e.logger).Log(\"msg\", \"Could not retrieve the list of hosted zones\", \"error\", err.Error())\n\t\t\tawsclient.AwsExporterMetrics.IncrementErrors()\n\t\t}\n\n\t\terr = e.getHostedZonesPerAccountMetrics(client, hostedZones, ctx)\n\t\tif err != nil {\n\t\t\tlevel.Error(e.logger).Log(\"msg\", \"Could not get limits for hosted zone\", \"error\", err.Error())\n\t\t\tawsclient.AwsExporterMetrics.IncrementErrors()\n\t\t}\n\n\t\terrs := e.getRecordsPerHostedZoneMetrics(client, hostedZones, ctx)\n\t\tfor _, err = range errs {\n\t\t\tlevel.Error(e.logger).Log(\"msg\", \"Could not get limits for hosted zone\", \"error\", err.Error())\n\t\t\tawsclient.AwsExporterMetrics.IncrementErrors()\n\t\t}\n\n\t\tlevel.Info(e.logger).Log(\"msg\", \"Route53 metrics Updated\")\n\n\t\tctxCancelFunc() // should never do anything as we don't run stuff in the background\n\n\t\ttime.Sleep(e.interval)\n\t}\n}", "func (e *EndpointsManager) Run() {\n\tticker := time.NewTicker(time.Second * 10)\n\tdefer ticker.Stop()\n\tfor {\n\t\tselect {\n\t\tcase <-ticker.C:\n\t\t\te.watchAliveEndpoint()\n\t\tcase <-e.exit:\n\t\t\tclose(e.closed)\n\t\t\tcommon.Logger.Info(\"service done!!!\")\n\t\t\treturn\n\t\t}\n\t}\n}", "func pingServer() error {\n\tfor i := 0; i < viper.GetInt(\"max_ping_count\"); i++ {\n\t\t// ping the server by sending a GET request to `/health`.\n\t\tresp, err := http.Get(viper.GetString(\"url\") + \"/sd/health\")\n\t\tif err == nil && resp.StatusCode == 200 {\n\t\t\treturn nil\n\t\t}\n\t\tlog.Print(\"Waiting for the router, retry in 1 second.\")\n\t\ttime.Sleep(time.Second)\n\t}\n\treturn errors.New(\"Cannot connect to the router.\")\n}", "func sendingLoop() {\n\tfor {\n\t\ttime.Sleep(time.Second)\n\n\t\tif len(sources) == 0 {\n\t\t\tcontinue\n\t\t}\n\n\t\tnow := time.Now().Unix()\n\n\t\tfor _, source := range sources {\n\t\t\tperiod := int64(timeutil.DurationToSeconds(source.getPeriod()))\n\t\t\tlastSendTime := source.getLastSendingDate()\n\n\t\t\tif period == 0 || lastSendTime <= 0 {\n\t\t\t\tgo source.Send()\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tif period+lastSendTime <= now {\n\t\t\t\tgo source.Send()\n\t\t\t}\n\t\t}\n\t}\n}", "func (c app) sendLoop() {\n\tfor {\n\t\tif b, open := <-c.out; !open {\n\t\t\tInfo(\"Send loop closed\")\n\t\t\tbreak\n\t\t} else {\n\t\t\tc.Connection.Send(b)\n\t\t}\n\t}\n}", "func storePingResults(results chan Host, hostRegistry *HostRegistry) {\n\tfor {\n\t\thost := <-results\n\n\t\tlog.Info(\"Storing results for host: %q\\n\", host)\n\n\t\thostRegistry.UpdateHost(host)\n\t}\n}", "func (k *KubeBoot) RunSyncLoop() {\n\tctx := context.Background()\n\n\tif k.Master {\n\t\tclient, err := k.Kubernetes.KubernetesClient()\n\t\tif err != nil {\n\t\t\tpanic(fmt.Sprintf(\"could not create kubernetes client: %v\", err))\n\t\t}\n\n\t\tklog.Info(\"polling for apiserver readiness\")\n\t\tfor {\n\t\t\t_, err = client.CoreV1().Namespaces().Get(ctx, \"kube-system\", metav1.GetOptions{})\n\t\t\tif err == nil {\n\t\t\t\tklog.Info(\"successfully connected to the apiserver\")\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tklog.Infof(\"failed to connect to the apiserver (will sleep and retry): %v\", err)\n\t\t\ttime.Sleep(5 * time.Second)\n\t\t}\n\t}\n\n\tfor {\n\t\tif err := k.syncOnce(ctx); err != nil {\n\t\t\tklog.Warningf(\"error during attempt to bootstrap (will sleep and retry): %v\", err)\n\t\t}\n\n\t\ttime.Sleep(1 * time.Minute)\n\t}\n}", "func (i *Client) loop() {\n\tfor m := range i.events {\n\t\tgo i.handleEvent(m)\n\t}\n\n\tfmt.Println(\"Done reading events\")\n}", "func (db *DB) watchLoop(ctx context.Context, ch chan struct{}) {\n\tvar psConn redis.Conn\n\teb := backoff.NewExponentialBackOff()\n\tfor {\n\t\tpsConn = db.pool.Get()\n\t\tpsc := redis.PubSubConn{Conn: psConn}\n\t\tif err := psc.PSubscribe(\"__keyspace*__:\" + db.versionSet); err != nil {\n\t\t\tlog.Error().Err(err).Msg(\"failed to subscribe to version set channel\")\n\t\t\tpsConn.Close()\n\t\t\treturn\n\t\t}\n\t\tif err := db.doNotifyLoop(ctx, ch, &psc, eb); err != nil {\n\t\t\tpsConn.Close()\n\t\t\treturn\n\t\t}\n\t}\n}", "func pingDatabase(db *sql.DB) (err error) {\n\tfor i := 0; i < 30; i++ {\n\t\terr = db.Ping()\n\t\tif err == nil {\n\t\t\treturn\n\t\t}\n\t\tlogrus.Infof(\"database ping failed. retry in 1s\")\n\t\ttime.Sleep(time.Second)\n\t}\n\treturn\n}", "func (module *ScreensaverModule) Loop(srv *Server) {\n\tfor {\n\t\tselect {\n\t\tcase <-time.After(5 * time.Second):\n\t\t\tmodule.Tick(srv)\n\t\t}\n\t}\n}", "func (c *Curator) updateTsmonLoop() {\n\tscanTicker := time.NewTicker(10 * time.Second)\n\n\tfor {\n\t\tc.blockIfNotLeader()\n\t\tids := c.stateHandler.GetKnownTSIDs()\n\t\tc.tsMon.updateExpected(ids)\n\t\tlog.Infof(\"@@@ tsmon: %s\", c.tsMon)\n\t\t<-scanTicker.C\n\t}\n}", "func (check *HealthCheck) CheckHealth(brokerUpdates chan<- Update, clusterUpdates chan<- Update, stop <-chan struct{}) {\n\tmanageTopic := !check.config.NoTopicCreation\n\terr := check.connect(manageTopic, stop)\n\tif err != nil {\n\t\treturn\n\t}\n\tdefer check.close(manageTopic)\n\n\tcheck.randSrc = rand.NewSource(time.Now().UnixNano())\n\n\tlog.Info(\"starting health check loop\")\n\tticker := time.NewTicker(check.config.CheckInterval)\n\tdefer ticker.Stop()\n\tfor {\n\t\tselect {\n\t\tcase <-ticker.C:\n\t\t\tbrokerStatus := check.checkBrokerHealth()\n\n\t\t\tdata, err := json.Marshal(brokerStatus)\n\t\t\tif err != nil {\n\t\t\t\tlog.Warn(\"Error while marshaling broker status: %s\", err.Error())\n\t\t\t\tdata = simpleStatus(brokerStatus.Status)\n\t\t\t}\n\n\t\t\tbrokerUpdates <- Update{brokerStatus.Status, data}\n\n\t\t\tif brokerStatus.Status == unhealthy {\n\t\t\t\tclusterUpdates <- Update{red, simpleStatus(red)}\n\t\t\t\tlog.Info(\"closing connection and reconnecting\")\n\t\t\t\terr := check.reconnect(stop)\n\t\t\t\tif err != nil {\n\t\t\t\t\tlog.Info(\"error while reconnecting:\", err)\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\tlog.Info(\"reconnected\")\n\t\t\t} else {\n\t\t\t\tclusterStatus := check.checkClusterHealth()\n\t\t\t\tdata, err := json.Marshal(clusterStatus)\n\t\t\t\tif err != nil {\n\t\t\t\t\tlog.Warn(\"Error while marshaling cluster status: %s\", err.Error())\n\t\t\t\t\tdata = simpleStatus(clusterStatus.Status)\n\t\t\t\t}\n\n\t\t\t\tclusterUpdates <- Update{clusterStatus.Status, data}\n\t\t\t}\n\t\tcase <-stop:\n\t\t\treturn\n\t\t}\n\t}\n}" ]
[ "0.8008959", "0.73642355", "0.7354216", "0.716509", "0.6779733", "0.67345625", "0.6406868", "0.63669646", "0.6331098", "0.63261425", "0.626655", "0.6236336", "0.621892", "0.61981124", "0.60962933", "0.6069212", "0.6055444", "0.604405", "0.59858143", "0.5978686", "0.59705174", "0.59695345", "0.5967623", "0.59548485", "0.59469837", "0.59454775", "0.5942324", "0.5923562", "0.5898109", "0.58969814", "0.5895752", "0.5877712", "0.5834711", "0.58290654", "0.58159465", "0.5798434", "0.57963544", "0.57910234", "0.5788358", "0.57873267", "0.57870215", "0.5770347", "0.5755688", "0.573172", "0.57001513", "0.5669963", "0.56699306", "0.56652504", "0.56602764", "0.5659498", "0.565529", "0.56540906", "0.5645345", "0.56353015", "0.5631964", "0.56271076", "0.56195474", "0.56195474", "0.56195474", "0.5610308", "0.5597348", "0.55745524", "0.55600286", "0.5555907", "0.5549728", "0.5545648", "0.5545021", "0.55273867", "0.55168444", "0.5511696", "0.5510423", "0.5507645", "0.55061823", "0.5502182", "0.54841286", "0.54550886", "0.5435456", "0.543016", "0.543016", "0.5419392", "0.5406355", "0.5406355", "0.5400181", "0.53947824", "0.5393066", "0.5380725", "0.5371781", "0.53678995", "0.5347716", "0.53443784", "0.53404325", "0.5339705", "0.5321386", "0.5319932", "0.53128475", "0.5305506", "0.5304775", "0.52974993", "0.529366", "0.5289379" ]
0.8445327
0
pingEmitter pulls Remotes from the ping queue (pingChan) and pings them. Pinging a remote cannot take longer than PingTimeoutMillis.
func (rcs *Service) pingEmitter(pingChan <-chan *model.RemoteCluster, done <-chan struct{}) { for { select { case rc := <-pingChan: if rc == nil { return } online := rc.IsOnline() if err := rcs.pingRemote(rc); err != nil { rcs.server.Log().Log(mlog.LvlRemoteClusterServiceWarn, "Remote cluster ping failed", mlog.String("remote", rc.DisplayName), mlog.String("remoteId", rc.RemoteId), mlog.Err(err), ) } if online != rc.IsOnline() { if metrics := rcs.server.GetMetrics(); metrics != nil { metrics.IncrementRemoteClusterConnStateChangeCounter(rc.RemoteId, rc.IsOnline()) } rcs.fireConnectionStateChgEvent(rc) } case <-done: return } } }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (rcs *Service) pingLoop(done <-chan struct{}) {\n\tpingChan := make(chan *model.RemoteCluster, MaxConcurrentSends*2)\n\n\t// create a thread pool to send pings concurrently to remotes.\n\tfor i := 0; i < MaxConcurrentSends; i++ {\n\t\tgo rcs.pingEmitter(pingChan, done)\n\t}\n\n\tgo rcs.pingGenerator(pingChan, done)\n}", "func ping(pings chan <- string, msg string) {\n\tpings <- msg\n}", "func ping(pings chan<- string, msg string) {\n\tpings <- msg\n}", "func ping(pings chan<- string, msg string) {\n\tpings <- msg\n}", "func ping(pings chan<- string, msg string) {\n\tpings <- msg\n}", "func pingLoop(results chan Host, hostRegistry *HostRegistry, interval time.Duration, timeout time.Duration) {\n\tfor {\n\t\thostAddresses := hostRegistry.GetHostAddresses()\n\n\t\tlog.Info(\"Pinging these addresses: %q\\n\", hostAddresses)\n\n\t\tfor _, address := range hostAddresses {\n\t\t\tlog.Debug(\"Pinging: %v\\n\", address)\n\n\t\t\thost, err := hostRegistry.GetHost(address)\n\t\t\tif err != nil {\n\t\t\t\tlog.Warning(\"GetHost() returned error=%v for address=%v\", err, address)\n\t\t\t}\n\n\t\t\tgo pingAddress(results, host, timeout)\n\t\t}\n\n\t\tlog.Debug(\"Started pings for all hosts. Sleeping for: %v\", interval)\n\t\ttime.Sleep(interval)\n\t}\n}", "func (c *MockInfluxClient) Ping(timeout time.Duration) (time.Duration, string, error) {\n\tif !c.Connected {\n\t\treturn time.Millisecond, \"\", errors.New(\"Mock client set to disconnected\")\n\t}\n\n\treturn time.Millisecond, \"\", nil\n}", "func (pinger *PerpetualPinger) pingAsync(self phi.Task) {\n\tresponder := make(chan phi.Message, 1)\n\tok := pinger.ponger.Send(Ping{Responder: responder})\n\tif !ok {\n\t\tpanic(\"failed to send ping\")\n\t}\n\tgo func() {\n\t\tfor m := range responder {\n\t\t\tok := self.Send(m)\n\t\t\tif !ok {\n\t\t\t\tpanic(\"failed to receive pong\")\n\t\t\t}\n\t\t}\n\t}()\n}", "func pingAddress(results chan Host, oldHost Host, timeout time.Duration) {\n\tisUp, rtt, err := pingWithFastping(oldHost.Address, timeout)\n\n\tif err != nil {\n\t\tlog.Error(err.Error())\n\t}\n\n\tnewHost := Host{}\n\n\tnewHost.Address = oldHost.Address\n\tnewHost.Description = oldHost.Description\n\n\tif isUp {\n\t\tnewHost.Status = Online\n\t\tnewHost.Latency = rtt\n\t} else {\n\t\tnewHost.Status = Offline\n\t}\n\tlog.Info(\"Pinged: address=%q status=%s rtt=%s\\n\", newHost.Address, newHost.Status, newHost.Latency)\n\n\tresults <- newHost\n}", "func (p *Ping) Ping(target p2pcrypto.PublicKey, msg string) (string, error) {\n\tvar response string\n\treqid := crypto.NewUUID()\n\tping := &pb.Ping{\n\t\tReqID: reqid[:],\n\t\tReq: true,\n\t\tMessage: msg,\n\t}\n\tpchan, err := p.sendRequest(target, reqid, ping)\n\tif err != nil {\n\t\treturn response, err\n\t}\n\n\ttimer := time.NewTimer(PingTimeout)\n\tselect {\n\tcase res := <-pchan:\n\t\tresponse = res.Message\n\t\tp.pendMuxtex.Lock()\n\t\tdelete(p.pending, reqid)\n\t\tp.pendMuxtex.Unlock()\n\tcase <-timer.C:\n\t\treturn response, errPingTimedOut\n\t}\n\n\treturn response, nil\n}", "func Pinging(memberList *[]MemberID, msgQueue *[]GossipMessage, selfMember *MemberID) {\n\tfor {\n\t\tif len(*msgQueue) == 0 {\n\t\t\tvar msg GossipMessage\n\t\t\tmsg.Status = 3\n\t\t\tGossip(msg, memberList, msgQueue, selfMember)\n\t\t} else {\n\t\t\t// fmt.Println(*msgQueue)\n\t\t\tGossip((*msgQueue)[0], memberList, msgQueue, selfMember)\n\t\t\t*msgQueue = (*msgQueue)[1:]\n\t\t}\n\t\ttime.Sleep(600 * time.Millisecond)\n\t}\n}", "func (p *pinger) Ping(addr net.Addr) {\n\tdefer close(p.reportChan)\n\tdefer close(p.errChan)\n\n\tconn, err := icmp.ListenPacket(\"ip4:icmp\", \"\")\n\tif err != nil {\n\t\tp.errChan <- fmt.Errorf(\"cannot connect to addr %s: %v\", addr, err)\n\t\treturn\n\t}\n\tdefer conn.Close()\n\n\tseq := 0\n\tfor {\n\t\tselect {\n\t\tcase <-p.stop:\n\t\t\treturn\n\t\tdefault:\n\t\t\tping, err := p.ping(conn, addr, seq)\n\t\t\tif err != nil {\n\t\t\t\tp.errChan <- err\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tp.reportChan <- ping\n\t\t\tseq++\n\n\t\t\tif p.opts.Count != 0 && int(p.opts.Count) == seq {\n\t\t\t\tp.Stop()\n\t\t\t} else {\n\t\t\t\ttime.Sleep(time.Second)\n\t\t\t}\n\t\t}\n\t}\n}", "func (m *manager) onPing(addr string, rtt time.Duration) error {\n\tv := int32(rtt.Nanoseconds() / 1000)\n\tif v == 0 { // Don't let it be zero, otherwise the update would fail\n\t\tv = 1\n\t}\n\n\tm.monitor.Measure(addr, v)\n\treturn nil\n}", "func (pe *WzPingEvent) Ping(descr string, seconds int) (int64, error) {\n\treturn pe.waitForResponse(pe.ping(wzlib.CHANNEL_CLIENT), descr, seconds)\n}", "func (p *Peer) pingHandler() {\n\tpingTicker := time.NewTicker(pingInterval)\n\tdefer pingTicker.Stop()\n\nout:\n\tfor {\n\t\tselect {\n\t\tcase <-pingTicker.C:\n\t\t\tnonce, err := wire.RandomUint64()\n\t\t\tif err != nil {\n\t\t\t\tlog.Errorf(\"Not sending ping to %s: %v\", p, err)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tp.QueueMessage(wire.NewMsgPing(nonce), nil)\n\n\t\tcase <-p.quit:\n\t\t\tbreak out\n\t\t}\n\t}\n}", "func (p Pinger) Pong(timeout int) (msgList []*icmp.Message, err error) {\r\n\tcErr := make(chan error, 1)\r\n\tcMsg := make(chan *icmp.Message, 1)\r\n\tfor i := 0; i < p.amt; i++ {\r\n\t\tgo func() {\r\n\t\t\tmsg, err := p.RecvOnePong()\r\n\t\t\tif err != nil {\r\n\t\t\t\tcErr <- err\r\n\t\t\t\treturn\r\n\t\t\t}\r\n\t\t\tcMsg <- msg\r\n\t\t}()\r\n\t}\r\n\tfor i := 0; i < p.amt; i++ {\r\n\t\tselect {\r\n\t\tcase res := <-cErr:\r\n\t\t\terr = res\r\n\t\tcase res := <-cMsg:\r\n\t\t\tmsgList = append(msgList, res)\r\n\t\tcase <-time.After(time.Duration(timeout) * time.Millisecond):\r\n\t\t\terr = errors.New(\"timeout\")\r\n\t\t}\r\n\t}\r\n\treturn\r\n}", "func (m *MockUDPClient) Ping(num uint) {\n\tfor i := 0; i < int(num); i++ {\n\t\tm.ctrl <- true\n\t}\n}", "func (s *Service) Ping(topicID string) error {\n\tt := time.Now()\n\tpf, err := s.client.Publish(topicID, []byte(fmt.Sprintf(\"pinged at %s\", t)), 2, false)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn pf.Wait(5 * time.Second)\n}", "func (connection *Connection) ping() {\n\tfor {\n\t\ttime.Sleep(1 * time.Second)\n\t\tif len(connection.consumers) > 0 {\n\t\t\t//do some ping, if no response then kill it\n\t\t\tfor _, consumer := range connection.consumers {\n\t\t\t\t_, pingError := consumer.connection.Write([]byte(\"hunga\"))\n\t\t\t\tif pingError != nil {\n\t\t\t\t\t// fmt.Print(\"PING ERROR\")\n\t\t\t\t\tconnection.killConsumer(consumer.id)\n\t\t\t\t} else {\n\t\t\t\t\tconnection.getConsumerMessage(consumer.id)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n}", "func (p *protocol) Ping(ctx context.Context, peer p2pcrypto.PublicKey) error {\n\tplogger := p.logger.WithFields(log.String(\"type\", \"ping\"), log.String(\"to\", peer.String()))\n\tplogger.Debug(\"send ping request\")\n\n\tdata, err := types.InterfaceToBytes(p.local)\n\tif err != nil {\n\t\treturn err\n\t}\n\tch := make(chan []byte, 1)\n\tfoo := func(msg []byte) {\n\t\tplogger.Debug(\"handle ping response\")\n\t\tsender := &node.Info{}\n\t\terr := types.BytesToInterface(msg, sender)\n\n\t\tif err != nil {\n\t\t\tplogger.With().Warning(\"got unreadable pong\", log.Err(err))\n\t\t\treturn\n\t\t}\n\t\t// TODO: if we pinged it we already have id so no need to update,\n\t\t// but what if id or listen address has changed?\n\t\tch <- sender.ID.Bytes()\n\t}\n\n\terr = p.msgServer.SendRequest(ctx, server.PingPong, data, peer, foo, func(err error) {})\n\n\tif err != nil {\n\t\treturn err\n\t}\n\n\ttimeout := time.NewTimer(MessageTimeout) // todo: check whether this is useless because of `requestLifetime`\n\tselect {\n\tcase id := <-ch:\n\t\tif id == nil {\n\t\t\treturn errors.New(\"failed sending message\")\n\t\t}\n\t\tif !bytes.Equal(id, peer.Bytes()) {\n\t\t\treturn errors.New(\"got pong with different public key\")\n\t\t}\n\tcase <-timeout.C:\n\t\treturn errors.New(\"ping timeout\")\n\t}\n\n\treturn nil\n}", "func ping(c chan string) {\n\n\t// on increment de 1 en 1 a chaque tour de boucle\n\tfor i := 1; ; i++ {\n\t\tc <- fmt.Sprintf(\"ping %v\", i)\n\t}\n}", "func PingTimeout(addr string, timeout int) (PingResponse, error) {\n\treturn ping(addr, time.Millisecond * time.Duration(timeout))\n}", "func (m *Memberlist) Ping(node string, addr net.Addr) (time.Duration, error) {\n\t// Prepare a ping message and setup an ack handler.\n\tselfAddr, selfPort := m.getAdvertise()\n\tping := ping{\n\t\tSeqNo: m.nextSeqNo(),\n\t\tNode: node,\n\t\tSourceAddr: selfAddr,\n\t\tSourcePort: selfPort,\n\t\tSourceNode: m.config.Name,\n\t}\n\tackCh := make(chan ackMessage, m.config.IndirectChecks+1)\n\tm.setProbeChannels(ping.SeqNo, ackCh, nil, m.config.ProbeInterval)\n\n\ta := Address{Addr: addr.String(), Name: node}\n\n\t// Send a ping to the node.\n\tif err := m.encodeAndSendMsg(a, pingMsg, &ping); err != nil {\n\t\treturn 0, err\n\t}\n\n\t// Mark the sent time here, which should be after any pre-processing and\n\t// system calls to do the actual send. This probably under-reports a bit,\n\t// but it's the best we can do.\n\tsent := time.Now()\n\n\t// Wait for response or timeout.\n\tselect {\n\tcase v := <-ackCh:\n\t\tif v.Complete == true {\n\t\t\treturn v.Timestamp.Sub(sent), nil\n\t\t}\n\tcase <-time.After(m.config.ProbeTimeout):\n\t\t// Timeout, return an error below.\n\t}\n\n\tm.logger.Printf(\"[DEBUG] memberlist: Failed UDP ping: %v (timeout reached)\", node)\n\treturn 0, NoPingResponseError{ping.Node}\n}", "func (me *Mgr) doPing() {\n\tfor !me.stopped {\n\t\tme.workers.Scan(func(id string, w interface{}) {\n\t\t\terr := w.(*Worker).Ping()\n\t\t\tif err != DEADERR {\n\t\t\t\treturn\n\t\t\t}\n\t\t\t// dead\n\t\t\tme.deadChan <- id\n\t\t\tme.deadWorkers.Set(id, []byte(\"OK\"))\n\t\t\tme.workers.Delete(id)\n\t\t})\n\t\ttime.Sleep(15 * time.Second)\n\t}\n}", "func PingTimeout(addr string, timeout int) (PingResponse, error) {\n\tctx := context.Background()\n\tctx, cancel := context.WithTimeout(ctx, time.Duration(timeout)*time.Millisecond)\n\tdefer cancel()\n\treturn PingContext(ctx, addr)\n}", "func (p *peer) Ping() {\n\t// this task is recorded in the waitgroup, so clear waitgroup on return\n\tdefer p.ms.Done()\n\t// This must come after Done and before Reporter (executes in reverse order)\n\tdefer p.ms.Delete(p)\n\n\tif p.ms.Verbose() > 1 {\n\t\tlog.Println(\"ping\", p.Url)\n\t}\n\n\tmaxfail := p.Maxfail // default before thread quits trying\n\tmn := \"TCP RTT\" // CloudWatch metric name\n\tns := \"pingmesh\" // Cloudwatch namespace\n\n\tlimit := p.Limit // number of pings before we quit, \"forever\" if zero\n\tif limit == 0 {\n\t\tlimit = math.MaxInt32\n\t}\n\tif maxfail > limit {\n\t\tmaxfail = limit\n\t}\n\n\t////\n\t// Reporter summarizes ping statistics to stdout at the end of the run\n\tdefer func() { // Reporter\n\t\tif p.Pings == 0 {\n\t\t\tfmt.Printf(\"\\nRecorded 0 valid samples, %d of %d failures\\n\", p.Fails, maxfail)\n\t\t\treturn\n\t\t}\n\n\t\tfc := float64(p.Pings)\n\t\telapsed := Hhmmss_d(p.PingTotals.Start)\n\n\t\tfmt.Printf(\"\\nRecorded %d samples in %s, average values:\\n\"+\"%s\"+\n\t\t\t\"%d %-6s\\t%.03f\\t%.03f\\t%.03f\\t%.03f\\t%.03f\\t%.03f\\t\\t%d\\t%s\\t%s\\n\\n\",\n\t\t\tp.Pings, elapsed, pt.PingTimesHeader(),\n\t\t\tp.Pings, elapsed,\n\t\t\tpt.Msec(p.PingTotals.DnsLk)/fc,\n\t\t\tpt.Msec(p.PingTotals.TcpHs)/fc,\n\t\t\tpt.Msec(p.PingTotals.TlsHs)/fc,\n\t\t\tpt.Msec(p.PingTotals.Reply)/fc,\n\t\t\tpt.Msec(p.PingTotals.Close)/fc,\n\t\t\tpt.Msec(p.PingTotals.RespTime())/fc,\n\t\t\tp.PingTotals.Size/int64(p.Pings),\n\t\t\tpt.LocationOrIp(p.PingTotals.Location),\n\t\t\t*p.PingTotals.DestUrl)\n\t}()\n\n\tp.FirstPing = time.Now().UTC().Truncate(time.Second)\n\tfor {\n\t\tif p.ms.DoneChan() == nil {\n\t\t\t// channel is nil, reading from it will block, return\n\t\t\tif p.ms.Verbose() > 1 {\n\t\t\t\tlog.Println(\"peer.Ping: channel is nil, returning\")\n\t\t\t}\n\t\t\treturn\n\t\t}\n\t\t////\n\t\t// Sleep first, allows risk-free continue from error cases below\n\t\tvar sleepTime int\n\t\tif p.Pings == 0 {\n\t\t\tif sleepTime < p.Delay {\n\t\t\t\tsleepTime++\n\t\t\t}\n\t\t} else {\n\t\t\tsleepTime = p.Delay\n\t\t}\n\n\t\tselect {\n\t\tcase <-time.After(JitterPct(sleepTime, 1)):\n\t\t\t// we waited for the delay and got nothing ... loop around\n\n\t\tcase newdelay, more := <-p.ms.DoneChan():\n\t\t\tif !more {\n\t\t\t\t// channel is closed, we are done -- goodbye\n\t\t\t\tif p.ms.Verbose() > 1 {\n\t\t\t\t\tlog.Println(\"peer.Ping: channel is closed, returning\")\n\t\t\t\t}\n\t\t\t\treturn\n\t\t\t}\n\t\t\t// else we got a new delay on this channel (0 is signal to stop)\n\t\t\tp.Delay = newdelay\n\t\t\tif p.Delay <= 0 {\n\t\t\t\treturn\n\t\t\t}\n\t\t\t// we did not (finish) our sleep in this case ...\n\t\t}\n\n\t\t////\n\t\t// Try to fetch the URL\n\t\tptResult := client.FetchURL(p.Url, p.PeerIP)\n\n\t\tswitch {\n\t\t// result nil, something totally failed\n\t\tcase nil == ptResult:\n\t\t\tfunc() {\n\t\t\t\tp.mu.Lock()\n\t\t\t\tdefer p.mu.Unlock()\n\t\t\t\tp.Fails++\n\t\t\t}()\n\t\t\tlog.Println(\"fetch failure\", p.Fails, \"of\", maxfail, \"on\", p.Url)\n\t\t\tif p.Fails >= maxfail {\n\t\t\t\treturn\n\t\t\t}\n\t\t\tcontinue\n\n\t\t// HTTP 200 OK and 300 series \"OK\" status codes\n\t\tcase ptResult.RespCode <= 304:\n\t\t\t// Take a write lock on this peer before updating values\n\t\t\t// (make each peer read/write reentrant, also []*peers)\n\t\t\tfunc() {\n\t\t\t\tp.mu.Lock()\n\t\t\t\tdefer p.mu.Unlock()\n\t\t\t\tp.Pings++\n\t\t\t\tnow := time.Now().UTC()\n\t\t\t\tp.LatestPing = now.UTC().Truncate(time.Second)\n\t\t\t\tif p.Pings == 1 {\n\t\t\t\t\t////\n\t\t\t\t\t// first ping -- initialize ptResult\n\t\t\t\t\tp.PingTotals = *ptResult\n\t\t\t\t} else {\n\t\t\t\t\tp.PingTotals.DnsLk += ptResult.DnsLk\n\t\t\t\t\tp.PingTotals.TcpHs += ptResult.TcpHs\n\t\t\t\t\tp.PingTotals.TlsHs += ptResult.TlsHs\n\t\t\t\t\tp.PingTotals.Reply += ptResult.Reply\n\t\t\t\t\tp.PingTotals.Close += ptResult.Close\n\t\t\t\t\tp.PingTotals.Total += ptResult.Total\n\t\t\t\t\tp.PingTotals.Size += ptResult.Size\n\t\t\t\t}\n\n\t\t\t\tif len(p.PeerIP) == 0 && len(ptResult.Remote) > 0 {\n\t\t\t\t\tp.PeerIP = ptResult.Remote\n\t\t\t\t}\n\n\t\t\t\tif p.Location == client.LocUnknown {\n\t\t\t\t\tif *ptResult.Location != client.LocUnknown && len(*ptResult.Location) > 0 {\n\t\t\t\t\t\tp.Location = *ptResult.Location\n\t\t\t\t\t\tp.PingTotals.Location = &p.Location\n\t\t\t\t\t\tif p.ms.Verbose() > 1 {\n\t\t\t\t\t\t\tlog.Println(\"Initialize remote location to\", *ptResult.Location)\n\t\t\t\t\t\t}\n\t\t\t\t\t} else {\n\t\t\t\t\t\t// It's not returning a pingmesh Location response, use hostname\n\t\t\t\t\t\tp.Location = p.Url\n\t\t\t\t\t\tp.PingTotals.Location = &p.Location\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}()\n\n\t\t// HTTP 500 series error\n\t\tcase ptResult.RespCode > 304:\n\t\t\tfunc() {\n\t\t\t\tp.mu.Lock()\n\t\t\t\tdefer p.mu.Unlock()\n\t\t\t\tp.Fails++\n\t\t\t}()\n\t\t\tremote := p.Location\n\t\t\tif len(remote) == 0 || remote == client.LocUnknown {\n\t\t\t\tif len(p.PeerIP) > 0 {\n\t\t\t\t\tremote = p.PeerIP\n\t\t\t\t} else {\n\t\t\t\t\tremote = p.Host\n\t\t\t\t}\n\t\t\t}\n\t\t\tif p.ms.Verbose() > 0 {\n\t\t\t\tfmt.Println(p.Pings, ptResult.MsecTsv())\n\t\t\t}\n\t\t\tif p.Fails >= maxfail {\n\t\t\t\tclient.LogSentry(sentry.LevelWarning, \"%s to %s: HTTP error %d hit failure limit %d on %s, Ping quitting\", p.ms.SrvLocation(), remote, ptResult.RespCode, p.Fails, p.Url)\n\t\t\t\treturn\n\t\t\t} else {\n\t\t\t\tlog.Println(p.ms.SrvLocation(), \"to\", remote, \"HTTP\", ptResult.RespCode, \"failure\", p.Fails, \"of\", maxfail, \"on\", p.Url)\n\t\t\t}\n\t\t\tcontinue\n\n\t\t\t////\n\t\t\t// Other HTTP response codes can be coded here (error, redirect)\n\t\t\t////\n\t\t}\n\n\t\t////\n\t\t// Execution should continue here only in NON-ERROR cases; errors\n\t\t// continue the for{} above\n\t\t////\n\n\t\tif p.ms.Verbose() > 0 {\n\t\t\tif p.ms.Verbose() > 1 {\n\t\t\t\tfmt.Println(p.Pings, ptResult.MsecTsv())\n\t\t\t} else {\n\t\t\t\tfmt.Printf(\"%3d %8.03f msec %20s %s\\n\", p.Pings, pt.Msec(ptResult.TcpHs), pt.LocationOrIp(ptResult.Location), ptResult.Remote)\n\t\t\t}\n\t\t}\n\n\t\tif p.ms.CwFlag() {\n\t\t\tmetric := pt.Msec(ptResult.TcpHs)\n\t\t\tmyLocation := p.ms.SrvLocation()\n\t\t\tif p.ms.Verbose() > 2 {\n\t\t\t\tlog.Println(\"publishing TCP RTT\", metric, \"msec to CloudWatch\", ns, \"from\", myLocation)\n\t\t\t}\n\t\t\trespCode := \"0\"\n\t\t\tif ptResult.RespCode >= 0 {\n\t\t\t\t// 000 in cloudwatch indicates it was a zero return code from lower layer\n\t\t\t\t// while single digit 0 indicates an error making the request\n\t\t\t\trespCode = fmt.Sprintf(\"%03d\", ptResult.RespCode)\n\t\t\t}\n\n\t\t\t////\n\t\t\t// Publish my location (IP or REP_LOCATION) and their location\n\t\t\tcw.PublishRespTime(myLocation, p.Location, respCode, metric, mn, ns)\n\t\t\t// NOTE: using network RTT estimate (TcpHs) rather than full page response time\n\t\t\t// TODO: This makes the legends wrong in Cloudwatch. Fix that.\n\t\t}\n\n\t\tif p.Pings >= limit {\n\t\t\t// report stats (see deferred func() above) upon return\n\t\t\treturn\n\t\t}\n\n\t\tif p.Delay <= 0 {\n\t\t\t// we were signaled to stop\n\t\t\treturn\n\t\t}\n\t}\n}", "func (s *ClientState) Ping(pingMsg MsgBody) error {\n\tlogInfo(\"Received ping message\")\n\trs := s.RegisterSystem(pingMsg.System)\n\tif rs.RedeemToken(pingMsg.Token) {\n\t\treturn s.SendPong(rs)\n\t}\n\treturn nil\n}", "func PingScan(outFileName string, workerCount int, targetFileName string, timeoutSeconds int) (error) {\n\n\ttimeout = time.Duration(timeoutSeconds) * time.Second\n\t// GET SUBNET LIST\n\tvar targets[] string\n\tvar err error\n\n\tif targetFileName == \"\" {\n\t\t// GET TARGET LIST\n\t\tDebug(\"Using RFC1918 subnets as targets\")\n\t\ttargets, err = makeHostList(RFC1918Subnets)\n\t}else {\n\t\t// PARSE FROM FILE\n\t\tDebug(\"Reading targets from file \" + targetFileName)\n\t\ttargets, err = getTargetsFromFile(targetFileName)\n\t}\n\t\n\tif err != nil{\n\t\treturn err\n\t}\n\tDebug(fmt.Sprintf(\"Identified %d targets :)\", len(targets)))\n\tnumPings = len(targets)\n\tnumPingsFinished = 0\n\t// ALLOCATE TASKS TO WORKERS\n\tDebug(\"Allocating tasks\")\n\tgo allocate(targets)\n\n\t// HANDLE RESULTS OF WORKER THREADS\n\tdone := make(chan bool)\n\tgo handlePongs(done, outFileName)\n\n\t// START WORKERS\n\tcreateWorkerPool(workerCount)\n\t<- done\n\n\t\n\treturn nil\n}", "func pong(pings <-chan string, pongs chan<- string) {\n\tmsg := <- pings\n\tpongs <- msg\n}", "func PingHosts(ipBase string, ipRange []int) {\n\n\tvar wg sync.WaitGroup\n\tcmd := LibConfig.SysCommands[\"PING\"] + \" -q -W 1 -c 1 \" + ipBase\n\n\tfor i := ipRange[0]; i < ipRange[1]; i++ {\n\t\twg.Add(1)\n\n\t\t// allow threaded system command calls to finish asynchronously\n\t\tgo func(i int, w *sync.WaitGroup) {\n\t\t\tdefer w.Done()\n\t\t\tRunCommand(cmd + strconv.Itoa(i))\n\t\t}(i, &wg)\n\t}\n\n\twg.Wait()\n\n}", "func (p *Pinger) Ping() error {\r\n\tconn, err := p.NewConn()\r\n\tif err != nil {\r\n\t\treturn err\r\n\t}\r\n\tp.conn = conn\r\n\tfor i := 0; i < p.amt; i++ {\r\n\t\terr = p.SendOnePing(i, conn)\r\n\t}\r\n\treturn err\r\n}", "func (e *Hosts) PingPong(ctx context.Context, stream hosts.Hosts_PingPongStream) error {\n\tfor {\n\t\treq, err := stream.Recv()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tlog.Logf(\"Got ping %v\", req.Stroke)\n\t\tif err := stream.Send(&hosts.Pong{Stroke: req.Stroke}); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n}", "func (p *peerAddr) ping(curTime time.Time) {\n\n\t// Advance forward and record the current time.\n\tp.lastPing = p.lastPing.Next()\n\tp.lastPing.Value = curTime\n}", "func ping(c chan<- string) {\n\tfor {\n\t\tvar str string\n\t\tfmt.Scanln(&str)\n\t\tc <- str\n\t}\n}", "func pinger(c chan<- string) {\n\tfor i := 0; ; i++ {\n\t\tc <- \"Ping\"\n\t}\n}", "func (a API) Ping(cmd *None) (e error) {\n\tRPCHandlers[\"ping\"].Call <-API{a.Ch, cmd, nil}\n\treturn\n}", "func periodicPing() {\n\tfor {\n\t\t// Shuffle membership list and get a member\n\t\t// Only executed when the membership list is not empty\n\t\tif CurrentList.Size() > 0 {\n\t\t\tmember := CurrentList.Shuffle()\n\t\t\t// Do not pick itself as the ping target\n\t\t\tif (member.TimeStamp == CurrentMember.TimeStamp) && (member.IP == CurrentMember.IP) {\n\t\t\t\ttime.Sleep(PingSendingPeriod)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tLogger.Info(\"Member (%d, %d) is selected by shuffling\\n\", member.TimeStamp, member.IP)\n\t\t\t// Get update entry from TTL Cache\n\t\t\tupdate, flag, err := getUpdate()\n\t\t\t// if no update there, do pure ping\n\t\t\tif err != nil {\n\t\t\t\tping(member)\n\t\t\t} else {\n\t\t\t\t// Send update as payload of ping\n\t\t\t\tpingWithPayload(member, update, flag)\n\t\t\t}\n\t\t}\n\t\ttime.Sleep(PingSendingPeriod)\n\t}\n}", "func pong(pings <-chan string, pongs chan<- string) {\n\tmsg := <-pings\n\tpongs <- msg\n}", "func pong(pings <-chan string, pongs chan<- string) {\n\tmsg := <-pings\n\tpongs <- msg\n}", "func (conn *Conn) ping() {\n\ttick := time.NewTicker(conn.PingFreq)\n\tfor {\n\t\tselect {\n\t\tcase <-tick.C:\n\t\t\tconn.Raw(fmt.Sprintf(\"PING :%d\", time.Now().UnixNano()))\n\t\tcase <-conn.cPing:\n\t\t\ttick.Stop()\n\t\t\treturn\n\t\t}\n\t}\n}", "func ping(cfg configFlags) {\n\t// Dial a remote server and send a stream to that server.\n\tc, err := vsock.Dial(uint32(cfg.contextID), uint32(cfg.port))\n\tif err != nil {\n\t\tlog.Fatalf(\"failed to dial: %v\", err)\n\t}\n\tdefer c.Close()\n\n\tvar p func(i uint) []byte\n\tp = func(i uint) []byte {\n\t\tc := make([]byte, 8)\n\t\tbinary.LittleEndian.PutUint64(c, uint64(i))\n\t\tb := md5.Sum(c)\n\t\treturn b[:]\n\t}\n\n\tif cfg.pattern != \"\" {\n\t\tb, err := hex.DecodeString(cfg.pattern)\n\t\tif err != nil {\n\t\t\tlog.Println(\"pattern must be specified as hex digits\")\n\t\t\tlog.Fatalf(\"failed to decode pattern: %v\", err)\n\t\t}\n\t\tp = func(i uint) []byte { return b }\n\t\tfmt.Printf(\"PATTERN: %s\", cfg.pattern)\n\t}\n\n\tlogf(\"PING %s FROM %s\", c.LocalAddr(), c.RemoteAddr())\n\n\tbuf := make([]byte, 64)\n\ttick := time.NewTicker(cfg.interval)\n\tfor i := uint(0); cfg.count == 0 || i < cfg.count; i++ {\n\t\tn, err := c.Write(p(i))\n\t\tif err != nil {\n\t\t\tlog.Fatalf(\"error writing to socket: %v\", err)\n\t\t}\n\t\tn, err = c.Read(buf)\n\t\tfmt.Printf(\"%d bytes from %s: ping_seq=%d\\n\", n, c.RemoteAddr(), i)\n\t\t<-tick.C\n\t}\n}", "func (ghost *Ghost) Ping() error {\n\tpingHandler := func(res *Response) error {\n\t\tif res == nil {\n\t\t\tghost.reset = true\n\t\t\treturn errors.New(\"Ping timeout\")\n\t\t}\n\t\treturn nil\n\t}\n\treq := NewRequest(\"ping\", nil)\n\treq.SetTimeout(pingTimeout)\n\treturn ghost.SendRequest(req, pingHandler)\n}", "func (a *AniDBUDP) Ping() <-chan *PingReply {\n\tch := make(chan *PingReply, 2)\n\tgo func() {\n\t\treply := <-a.SendRecv(\"PING\", ParamMap{\"nat\": 1})\n\n\t\tr := &PingReply{APIReply: reply}\n\t\tif r.Error() == nil {\n\t\t\tport, _ := strconv.ParseUint(reply.Lines()[1], 10, 16)\n\t\t\tr.Port = uint16(port)\n\t\t}\n\t\tch <- r\n\t\tclose(ch)\n\t}()\n\treturn ch\n}", "func sleepMonitor() {\n\tc, err := icmp.ListenPacket(\"ip4:icmp\", \"0.0.0.0\")\n\tif err != nil {\n\t\tlog.Printf(\"error listening for udp - sending data to all ports for all connected clients. err: %s\", err)\n\t\treturn\n\t}\n\tgo icmpEchoSender(c)\n\tdefer c.Close()\n\tfor {\n\t\tbuf := make([]byte, 1500)\n\t\tn, peer, err := c.ReadFrom(buf)\n\t\tif err != nil {\n\t\t\tlog.Printf(\"%s\\n\", err.Error())\n\t\t\tcontinue\n\t\t}\n\t\tmsg, err := icmp.ParseMessage(1, buf[:n])\n\t\tif err != nil {\n\t\t\tcontinue\n\t\t}\n\n\t\tip := peer.String()\n\n\t\t// Look for echo replies, mark it as received.\n\t\tif msg.Type == ipv4.ICMPTypeEchoReply {\n\t\t\tnetMutex.Lock()\n\t\t\tpingResponse[ip] = stratuxClock.Time\n\t\t\tnetMutex.Unlock()\n\t\t\tcontinue // No further processing needed.\n\t\t}\n\n\t\t// Only deal with ICMP Unreachable packets (since that's what iOS and Android seem to be sending whenever the apps are not available).\n\t\tif msg.Type != ipv4.ICMPTypeDestinationUnreachable {\n\t\t\tcontinue\n\t\t}\n\t\t// Packet parsing.\n\t\tmb, err := msg.Body.Marshal(1)\n\t\tif err != nil {\n\t\t\tcontinue\n\t\t}\n\t\tif len(mb) < 28 {\n\t\t\tcontinue\n\t\t}\n\n\t\t// The unreachable port.\n\t\tport := (uint16(mb[26]) << 8) | uint16(mb[27])\n\t\tipAndPort := ip + \":\" + strconv.Itoa(int(port))\n\n\t\tnetMutex.Lock()\n\t\tp, ok := outSockets[ipAndPort]\n\t\tif !ok {\n\t\t\t// Can't do anything, the client isn't even technically connected.\n\t\t\tnetMutex.Unlock()\n\t\t\tcontinue\n\t\t}\n\t\tp.LastUnreachable = stratuxClock.Time\n\t\toutSockets[ipAndPort] = p\n\t\tnetMutex.Unlock()\n\t}\n}", "func pinger(c chan string) {\n\tfor i := 0; ; i++ {\n\t\tc <- \"ping\"\n\t}\n}", "func (cc *ClientConn) Ping(ctx context.Context) error {\n\tc := make(chan struct{})\n\t// Generate a random payload\n\tvar p [8]byte\n\tfor {\n\t\tif _, err := rand.Read(p[:]); err != nil {\n\t\t\treturn err\n\t\t}\n\t\tcc.mu.Lock()\n\t\t// check for dup before insert\n\t\tif _, found := cc.pings[p]; !found {\n\t\t\tcc.pings[p] = c\n\t\t\tcc.mu.Unlock()\n\t\t\tbreak\n\t\t}\n\t\tcc.mu.Unlock()\n\t}\n\terrc := make(chan error, 1)\n\tgo func() {\n\t\tcc.wmu.Lock()\n\t\tdefer cc.wmu.Unlock()\n\t\tif err := cc.fr.WritePing(false, p); err != nil {\n\t\t\terrc <- err\n\t\t\treturn\n\t\t}\n\t\tif err := cc.bw.Flush(); err != nil {\n\t\t\terrc <- err\n\t\t\treturn\n\t\t}\n\t}()\n\tselect {\n\tcase <-c:\n\t\treturn nil\n\tcase err := <-errc:\n\t\treturn err\n\tcase <-ctx.Done():\n\t\treturn ctx.Err()\n\tcase <-cc.readerDone:\n\t\t// connection closed\n\t\treturn cc.readerErr\n\t}\n}", "func AddPingTimeout() {}", "func (s *SWIM) indirectProbe(target *Member) error {\n\twg := &sync.WaitGroup{}\n\twg.Add(s.config.K)\n\n\t// with cancel we can send the signal to goroutines which share\n\t// this @ctx context\n\tctx, cancel := context.WithCancel(context.Background())\n\n\tdone := make(chan IndProbeResponse, s.config.K)\n\n\tdefer func() {\n\t\tcancel()\n\t\tclose(done)\n\t}()\n\n\tkMembers := s.memberMap.SelectKRandomMemberID(s.config.K)\n\tfor _, m := range kMembers {\n\t\tgo func(mediator Member) {\n\t\t\tdefer wg.Done()\n\n\t\t\ttask := func() (interface{}, error) {\n\t\t\t\treturn s.indirectPing(mediator, *target)\n\t\t\t}\n\n\t\t\tresp := NewTaskRunner(task, ctx).Start()\n\t\t\tif resp.err != nil {\n\t\t\t\tdone <- IndProbeResponse{\n\t\t\t\t\terr: resp.err,\n\t\t\t\t\tmsg: pb.Message{},\n\t\t\t\t}\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tmsg, ok := resp.payload.(pb.Message)\n\t\t\tif !ok {\n\t\t\t\tdone <- IndProbeResponse{\n\t\t\t\t\terr: ErrInvalidPayloadType,\n\t\t\t\t\tmsg: pb.Message{},\n\t\t\t\t}\n\t\t\t\treturn\n\t\t\t}\n\t\t\tdone <- IndProbeResponse{\n\t\t\t\terr: nil,\n\t\t\t\tmsg: msg,\n\t\t\t}\n\t\t}(m)\n\t}\n\n\t// wait until k-random member sends back response, if response message\n\t// is Ack message, then indirectProbe success because one of k-member\n\t// success UDP communication, if Nack message or Invalid message, increase\n\t// @unexpectedRespCounter then wait other member's response\n\n\tunexpectedResp := make([]IndProbeResponse, 0)\n\n\tfor {\n\t\tresp := <-done\n\n\t\tif !resp.Ok() {\n\t\t\tunexpectedResp = append(unexpectedResp, resp)\n\t\t\tif len(unexpectedResp) >= s.config.K {\n\t\t\t\tiLogger.Infof(nil, \"unexpected responses [%v]\", unexpectedResp)\n\t\t\t\treturn ErrIndProbeFailed\n\t\t\t}\n\t\t\tcontinue\n\t\t}\n\n\t\tcancel()\n\t\twg.Wait()\n\t\treturn nil\n\t}\n}", "func (ip *icmpProxy) Serve(ctx context.Context) error {\n\tgo func() {\n\t\t<-ctx.Done()\n\t\tip.conn.Close()\n\t}()\n\tgo func() {\n\t\tip.srcFunnelTracker.ScheduleCleanup(ctx, ip.idleTimeout)\n\t}()\n\tbuf := make([]byte, mtu)\n\ticmpDecoder := packet.NewICMPDecoder()\n\tfor {\n\t\tn, from, err := ip.conn.ReadFrom(buf)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treply, err := parseReply(from, buf[:n])\n\t\tif err != nil {\n\t\t\tip.logger.Debug().Err(err).Str(\"dst\", from.String()).Msg(\"Failed to parse ICMP reply, continue to parse as full packet\")\n\t\t\t// In unit test, we found out when the listener listens on 0.0.0.0, the socket reads the full packet after\n\t\t\t// the second reply\n\t\t\tif err := ip.handleFullPacket(ctx, icmpDecoder, buf[:n]); err != nil {\n\t\t\t\tip.logger.Debug().Err(err).Str(\"dst\", from.String()).Msg(\"Failed to parse ICMP reply as full packet\")\n\t\t\t}\n\t\t\tcontinue\n\t\t}\n\t\tif !isEchoReply(reply.msg) {\n\t\t\tip.logger.Debug().Str(\"dst\", from.String()).Msgf(\"Drop ICMP %s from reply\", reply.msg.Type)\n\t\t\tcontinue\n\t\t}\n\t\tif err := ip.sendReply(ctx, reply); err != nil {\n\t\t\tip.logger.Debug().Err(err).Str(\"dst\", from.String()).Msg(\"Failed to send ICMP reply\")\n\t\t\tcontinue\n\t\t}\n\t}\n}", "func (e *MyTest) PingPong(ctx context.Context, stream myTest.MyTest_PingPongStream) error {\n\tfor {\n\t\treq, err := stream.Recv()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tlog.Logf(\"Got ping %v\", req.Stroke)\n\t\tif err := stream.Send(&myTest.Pong{Stroke: req.Stroke}); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n}", "func (f *Sink) TimeoutChan() <-chan time.Time {\n\treturn f.timeoutTimer.C\n}", "func pong(pings <- chan string, pongs chan <- string) {\n\tmsg := <- pings\n\tpongs <- msg\n}", "func pingCommand(m discord.Message, parameters []string, received time.Time) (discord.APIPayload, error) {\n\tvar payload discord.APIPayload\n\tsent, _ := m.SentAt.Parse()\n\n\telapsed := received.Sub(sent)\n\tval := fmt.Sprintf(\"Pong! `%vms`\", int64(elapsed/time.Millisecond))\n\terr := payload.Prepare(val, m.ChannelID)\n\treturn payload, err\n}", "func (r *mutationResolver) Ping(ctx context.Context) (*string, error) {\n\tpanic(fmt.Errorf(\"not implemented\"))\n}", "func (db *DB) Ping() <-chan SQLResult {\n\tresult := make(chan SQLResult, 10)\n\tgo func() {\n\t\tdefer close(result)\n\n\t\tticker := time.NewTicker(db.conf.GetFrequency())\n\t\tfor range ticker.C {\n\t\t\tgo func() {\n\t\t\t\tresult <- executeQuery(db.db, db.conf.GetQuery())\n\t\t\t}()\n\t\t}\n\t}()\n\treturn result\n}", "func MockEmitter(consTimeout time.Duration) *Emitter {\n\teb := eventbus.New()\n\trpc := rpcbus.New()\n\tkeys := key.NewRandKeys()\n\n\treturn &Emitter{\n\t\tEventBus: eb,\n\t\tRPCBus: rpc,\n\t\tKeys: keys,\n\t\tTimerLength: consTimeout,\n\t}\n}", "func pingLoop(client *Client) {\n\t// Create ticker to send pings every two minutes.\n\tticker := time.NewTicker(time.Minute * 2)\n\tfor {\n\t\tselect {\n\t\t// If the client is done, stop the time and goroutine.\n\t\tcase <-client.Done:\n\t\t\tticker.Stop()\n\t\t\treturn\n\t\t// Loop pings to keep connection alive.\n\t\tcase <-ticker.C:\n\t\t\tSendPing(client, strconv.FormatInt(time.Now().UnixNano(), 10))\n\t\t}\n\t}\n}", "func (conn *Conn) ping(ctx context.Context) {\n\tdefer conn.wg.Done()\n\ttick := time.NewTicker(conn.cfg.PingFreq)\n\tfor {\n\t\tselect {\n\t\tcase <-tick.C:\n\t\t\tconn.Ping(fmt.Sprintf(\"%d\", time.Now().UnixNano()))\n\t\tcase <-ctx.Done():\n\t\t\t// control channel closed, bail out\n\t\t\ttick.Stop()\n\t\t\treturn\n\t\t}\n\t}\n}", "func PingWithTimeout(addr string, timeout time.Duration) (PingResponse, error) {\n\treturn ping(addr, timeout)\n}", "func (rn *RemoteNode) Ping() error {\n\tmsg, err := NewPingMessage()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t_, err = rn.SendMessageSync(msg)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}", "func PingMessageHandler(px *Proxy) daemon.Handler {\n\treturn func(w daemon.ResponseWriteCloser, msg daemon.Messager) {\n\t\tif msg.Type() != daemon.PingMsgType {\n\t\t\tpx.Printf(\"Mux error, dispatch %s message to ping message handler\\n\", msg.Type())\n\t\t\treturn\n\t\t}\n\n\t\tpx.ResetPingTimer()\n\n\t\tw.Write(&daemon.PongMessage{Value: \"PONG\"})\n\t\tpx.Debugln(\"Send pong message\")\n\t}\n}", "func ping(s *discordgo.Session, m *discordgo.MessageCreate, message []string) {\n\tarrivalTime, err := m.Timestamp.Parse()\n\tif err != nil {\n\t\tlog.Println(err.Error())\n\t\treturn\n\t}\n\tif message[0] == \"ping\" {\n\t\ts.ChannelMessageSend(m.ChannelID, fmt.Sprintf(\"Pong! %dms\",\n\t\t\ttime.Since(arrivalTime).Nanoseconds()/1000000))\n\t} else {\n\t\ts.ChannelMessageSend(m.ChannelID, fmt.Sprintf(\"Ping! %dms\",\n\t\t\ttime.Since(arrivalTime).Nanoseconds()/1000000))\n\t}\n}", "func (c *Control) Ping(ctx context.Context) (time.Duration, error) {\n\tstart := time.Now()\n\n\tif _, err := c.conn.Write([]byte{byte(PingType)}); err != nil {\n\t\treturn 0, err\n\t}\n\n\tselect {\n\tcase <-ctx.Done():\n\t\treturn 0, ctx.Err()\n\n\tcase t, ok := <-c.pongCh:\n\t\tif !ok {\n\t\t\treturn 0, c.err\n\t\t}\n\t\treturn t.Sub(start), nil\n\t}\n}", "func (conn *Conn) Ping(message string) { conn.Raw(PING + \" :\" + message) }", "func PingEveryone(self *State) {\n\tfor i := 0; i < len(self.AllPorts); i ++ {\n\t\tif self.AllPorts[i] != self.ListenPort {\n\t\t\tgo ping(self, self.AllPorts[i])\n\t\t}\n\t}\n}", "func ping(hosts []string, returnUnavailable bool) []string {\n\tvar toReturn []string\n\tvar cmds []*exec.Cmd\n\n\t// Start pinging:\n\tfor _, host := range hosts {\n\t\tlog.Println(\"Pinging\", host)\n\t\t// cmd := exec.Command(\"ssh\", \"-o ConnectTimeout=1\", host, \"echo\")\n\t\tcmd := exec.Command(\"nc\", \"-z\", \"-w 1\", host, \"22\")\n\t\tcmd.Start()\n\t\tcmds = append(cmds, cmd)\n\t}\n\n\t// Read result of the pings:\n\tfor i, cmd := range cmds {\n\t\tif err := cmd.Wait(); err != nil {\n\t\t\tlog.Println(\"Unavailable host:\", hosts[i], \"ping error:\", err)\n\t\t\tif returnUnavailable {\n\t\t\t\ttoReturn = append(toReturn, hosts[i])\n\t\t\t}\n\t\t} else {\n\t\t\tif !returnUnavailable {\n\t\t\t\ttoReturn = append(toReturn, hosts[i])\n\t\t\t}\n\t\t}\n\t}\n\n\treturn toReturn\n}", "func runEmitter() {\n\temitter, err := goka.NewEmitter(brokers, topic, new(codec.String))\n\tif err != nil {\n\t\tlog.Fatalf(\"error creating emitter: %v\", err)\n\t}\n\tdefer emitter.Finish()\n\n\terr = emitter.EmitSync(\"hi\", \"hello\")\n\tif err != nil {\n\t\tlog.Fatalf(\"error emitting message: %v\", err)\n\t}\n\tfmt.Println(\"message emitted\")\n}", "func (conn *Conn) Ping() (time.Duration, error) {\n\tstart := time.Now()\n\tconn.ping = make(chan bool, 1)\n\tif err := conn.WriteMessage(Ping, nil); err != nil {\n\t\treturn 0, err\n\t}\n\ttimer := time.NewTimer(time.Second*5 + conn.latency)\n\tdefer timer.Stop()\n\tselect {\n\tcase <-conn.ping:\n\tcase <-timer.C:\n\t\treturn 0, ErrPingTimeout\n\t}\n\tconn.latency = time.Since(start)\n\tfor _, f := range conn.onPong {\n\t\tgo f(conn.latency)\n\t}\n\treturn conn.latency, nil\n}", "func (p Pinger) PingPong(args ...interface{}) ([]*icmp.Message, error) {\r\n\ttimeout := 1000\r\n\tfor _, arg := range args {\r\n\t\ttemp, ok := arg.(int)\r\n\t\tif ok {\r\n\t\t\ttimeout = temp\r\n\t\t\tcontinue\r\n\t\t}\r\n\t}\r\n\terr := p.Ping()\r\n\tif err != nil {\r\n\t\treturn nil, err\r\n\t}\r\n\treturn p.Pong(timeout)\r\n}", "func (bc *BotConnection) pingpong() {\n\tdefer func() {\n\t\tlog.Println(\" pingpong: dying\")\n\t\tbc.waitGroup.Done()\n\t}()\n\n\t// Send first ping to avoid early EOF:\n\tping := struct {\n\t\tType string `json:\"type\"`\n\t}{\n\t\tType: \"ping\",\n\t}\n\terr := websocket.JSON.Send(bc.ws, &ping)\n\tif err != nil {\n\t\tlog.Printf(\" pingpong: JSON send error: %s\\n\", err)\n\t}\n\n\t// Start a timer to tick every 15 seconds:\n\tticker := time.Tick(time.Second * 15)\n\n\talive := true\n\tfor alive {\n\t\t// Wait on either the timer tick or the `die` channel:\n\t\tselect {\n\t\tcase _ = <-ticker:\n\t\t\t//log.Println(\" pingpong: ping\")\n\t\t\terr = websocket.JSON.Send(bc.ws, &ping)\n\t\t\tif err != nil {\n\t\t\t\tlog.Printf(\" pingpong: JSON send error: %s\\n\", err)\n\t\t\t\tbreak\n\t\t\t}\n\t\t\t// NOTE: `readIncomingMessages` will read the \"pong\" response.\n\t\t\t// Cannot issue a read here because a read is already blocking there.\n\t\t\tbreak\n\t\tcase _ = <-bc.die:\n\t\t\talive = false\n\t\t\tbreak\n\t\t}\n\t}\n}", "func (ps PubSub) Ping(data string) error {\n\tps.conn.Send(\"PING\", data)\n\treturn ps.conn.Flush()\n}", "func (l *PoolListener) Spawn(shutdown <-chan interface{}, poolID string) {\n\tlogger := plog.WithField(\"poolid\", poolID)\n\n\tlogger.Debug(\"Spawning pool listener\")\n\n\tstop := make(chan struct{})\n\tdefer func() { close(stop) }()\n\n\ttimeout := time.NewTimer(1 * time.Second)\n\ttimeout.Stop()\n\n\tfor {\n\t\tpoolPath := Base().Pools().ID(poolID)\n\t\tnode := &PoolNode{ResourcePool: &pool.ResourcePool{}}\n\n\t\tvar poolEvent, ipsEvent, poolExistsEvent, ipsExistsEvent <-chan client.Event\n\n\t\tpoolExists, poolExistsEvent, err := l.connection.ExistsW(poolPath.Path(), stop)\n\t\tif poolExists && err == nil {\n\t\t\tpoolEvent, err = l.connection.GetW(poolPath.Path(), node, stop)\n\t\t\tif err == client.ErrNoNode {\n\t\t\t\tclose(stop)\n\t\t\t\tstop = make(chan struct{})\n\t\t\t\tcontinue\n\t\t\t} else if err != nil {\n\t\t\t\tlogger.WithError(err).Error(\"Unable to watch pool\")\n\t\t\t\treturn\n\t\t\t}\n\t\t} else if err != nil {\n\t\t\tlogger.WithError(err).Error(\"Unable to check if pool exists\")\n\t\t\treturn\n\t\t}\n\n\t\tchildren := []string{}\n\t\tif poolExists {\n\t\t\tvar ipsExists bool\n\t\t\tipsExists, ipsExistsEvent, err = l.connection.ExistsW(poolPath.IPs().Path(), stop)\n\t\t\tif ipsExists && err == nil {\n\t\t\t\tchildren, ipsEvent, err = l.connection.ChildrenW(poolPath.IPs().Path(), stop)\n\t\t\t\tif err != nil {\n\t\t\t\t\tlogger.WithError(err).Error(\"Unable to watch IPs\")\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t} else if err != nil {\n\t\t\t\tlogger.WithError(err).Error(\"Unable to watch IPs node\")\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tassignments, err := l.getAssignmentMap(children)\n\t\t\tif err != nil {\n\t\t\t\tlogger.WithError(err).Error(\"Unable to get assignments\")\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\t// The sync will add nodes to the ips path which will trigger an ipsEvent\n\t\t\t// causing the loop to occur twice.\n\t\t\terr = l.synchronizer.Sync(*node.ResourcePool, assignments)\n\t\t\tif syncError, ok := err.(SyncError); ok {\n\t\t\t\tlogger.WithError(syncError).WithField(\"count\", len(syncError)).\n\t\t\t\t\tWarn(\"Errors encountered while syncing virtual IPs\")\n\n\t\t\t\tfor _, e := range syncError {\n\t\t\t\t\tlogger.WithError(e).Debug(\"Sync error\")\n\t\t\t\t}\n\n\t\t\t\ttimeout.Reset(l.Timeout)\n\t\t\t} else if err != nil {\n\t\t\t\tlogger.WithError(err).Warn(\"Error Syncing\")\n\t\t\t\ttimeout.Reset(l.Timeout)\n\t\t\t}\n\t\t}\n\n\t\tselect {\n\t\tcase <-ipsEvent:\n\t\tcase <-poolEvent:\n\t\tcase <-poolExistsEvent:\n\t\tcase <-ipsExistsEvent:\n\t\tcase <-timeout.C:\n\t\tcase <-shutdown:\n\t\t\treturn\n\t\t}\n\n\t\tif !timeout.Stop() {\n\t\t\tselect {\n\t\t\tcase <-timeout.C:\n\t\t\tdefault:\n\t\t\t}\n\t\t}\n\n\t\tclose(stop)\n\t\tstop = make(chan struct{})\n\t}\n}", "func (c *RedisCacher) Ping() error {\n\treturn c.c.Ping(graceful.GetManager().HammerContext()).Err()\n}", "func main() {\n\n\t// Create a new channel\n\tmessages := make(chan string)\n\n\t// Send a value into a channel using the channel <- syntax\n\tgo func() { messages <- \"ping\" }()\n\n\tmsg := <-messages\n\tfmt.Println(msg)\n\n}", "func (it *messageIterator) pingStream() {\n\tspr := &pb.StreamingPullRequest{}\n\tit.eoMu.RLock()\n\tif it.sendNewAckDeadline {\n\t\tspr.StreamAckDeadlineSeconds = int32(it.ackDeadline())\n\t\tit.sendNewAckDeadline = false\n\t}\n\tit.eoMu.RUnlock()\n\tit.ps.Send(spr)\n}", "func (tm *ServiceTracerouteManager) SetICMPInChan(icmpChan chan gopacket.Packet) {\n\ttm.ICMPChan = icmpChan\n}", "func Ping(conn net.Conn) {\n\tfor {\n\t\tconn.Write([]byte(\"ping\"))\n\t\ttime.Sleep(20 * time.Second)\n\t}\n}", "func Ping(conn net.Conn) {\n\tfor {\n\t\tconn.Write([]byte(\"ping\"))\n\t\ttime.Sleep(20 * time.Second)\n\t}\n}", "func (e *Account) PingPong(ctx context.Context, stream account.Account_PingPongStream) error {\n\tfor {\n\t\treq, err := stream.Recv()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tlog.Infof(\"Got ping %v\", req.Stroke)\n\t\tif err := stream.Send(&account.Pong{Stroke: req.Stroke}); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n}", "func ping(addrs ...string) ([]string, error) {\n\tres := make([]string, 0)\n\tmu := sync.Mutex{}\n\n\tadditionRes := func(addr string) {\n\t\tmu.Lock()\n\t\tdefer mu.Unlock()\n\t\tres = append(res, addr)\n\t}\n\n\tdiag := func(addr string) bool {\n\t\tconn, err := net.Dial(\"tcp\", addr)\n\t\tif err != nil {\n\t\t\treturn false\n\t\t}\n\t\tdefer conn.Close()\n\t\treturn true\n\t}\n\n\tping := func(ctx context.Context, addr string, f func(addr string)) {\n\t\tselect {\n\t\tcase <-ctx.Done():\n\t\t\treturn\n\t\tdefault:\n\t\t\tif diag(addr) {\n\t\t\t\tf(addr)\n\t\t\t}\n\t\t}\n\t}\n\n\twg := sync.WaitGroup{}\n\tfor _, addr := range addrs {\n\t\twg.Add(1)\n\t\tgo func(addr string) {\n\t\t\tdefer wg.Done()\n\t\t\tctx, cancel := context.WithTimeout(context.Background(), time.Second*1)\n\t\t\tdefer cancel()\n\t\t\tping(ctx, addr, additionRes)\n\t\t}(addr)\n\t}\n\twg.Wait()\n\n\treturn res, nil\n}", "func (e *Example) PingPong(ctx context.Context, stream example.Example_PingPongStream) error {\n\tfor {\n\t\treq, err := stream.Recv()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tlog.Logf(\"Got ping %v\", req.Stroke)\n\t\tif err := stream.Send(&example.Pong{Stroke: req.Stroke}); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n}", "func (e *Example) PingPong(ctx context.Context, stream example.Example_PingPongStream) error {\n\tfor {\n\t\treq, err := stream.Recv()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tlog.Logf(\"Got ping %v\", req.Stroke)\n\t\tif err := stream.Send(&example.Pong{Stroke: req.Stroke}); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n}", "func (e *Example) PingPong(ctx context.Context, stream example.Example_PingPongStream) error {\n\tfor {\n\t\treq, err := stream.Recv()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tlog.Logf(\"Got ping %v\", req.Stroke)\n\t\tif err := stream.Send(&example.Pong{Stroke: req.Stroke}); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n}", "func (listener *Listener) HijackPong(address string) error {\n\tif _, err := net.ResolveUDPAddr(\"udp\", address); err != nil {\n\t\treturn fmt.Errorf(\"error resolving UDP address: %v\", err)\n\t}\n\tgo func() {\n\t\tticker := time.NewTicker(time.Second)\n\t\tdefer ticker.Stop()\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase <-ticker.C:\n\t\t\t\tdata, err := Ping(address)\n\t\t\t\tif err != nil {\n\t\t\t\t\t// It's okay if these packets are lost sometimes. There's no need to log this.\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\t//noinspection SpellCheckingInspection\n\t\t\t\tif string(data[:4]) == \"MCPE\" {\n\t\t\t\t\tfragments := bytes.Split(data, []byte{';'})\n\t\t\t\t\tfor len(fragments) < 9 {\n\t\t\t\t\t\t// Append to the fragments if it's not at least 9 elements long.\n\t\t\t\t\t\tfragments = append(fragments, nil)\n\t\t\t\t\t}\n\n\t\t\t\t\tfragments = fragments[:9]\n\t\t\t\t\tfragments[6] = []byte(strconv.Itoa(int(listener.id)))\n\t\t\t\t\tfragments[7] = []byte(\"Proxy\")\n\t\t\t\t\tfragments[8] = []byte{}\n\n\t\t\t\t\tlistener.PongData(bytes.Join(fragments, []byte{';'}))\n\t\t\t\t} else {\n\t\t\t\t\tlistener.PongData(data)\n\t\t\t\t}\n\t\t\tcase <-listener.closeCtx.Done():\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t}()\n\treturn nil\n}", "func pingNodesLoop() {\n\tstop := false\n\tfor !stop {\n\t\tpingNodes()\n\t\t// TODO longer ping interval\n\t\ttime.Sleep(300 * time.Second)\n\t}\n}", "func notifyChanLimiter(maxSpeed time.Duration, inChan chan struct{}, outChan chan struct{}) {\n\n\t// we wait for an initial inChan message and then watch for spam to stop.\n\t// when inChan closes, the func exits\n\tfor range inChan {\n\t\tlog.Infoln(\"channel notify limiter witnessed an upstream message on inChan\")\n\n\t\t// Label for following for-select loop\n\t\tnotifyChannel:\n\t\t\tfor {\n\t\t\t\tlog.Debugln(\"channel notify limiter waiting to receive another inChan or notify after\", maxSpeed)\n\t\t\t\tselect {\n\t\t\t\tcase <-time.After(maxSpeed):\n\t\t\t\t\tlog.Debugln(\"channel notify limiter reached\", maxSpeed, \". Sending output\")\n\t\t\t\t\toutChan <- struct{}{}\n\t\t\t\t\t// break out of the for-select loop and go through next inChan loop iteration if any.\n\t\t\t\t\tbreak notifyChannel\n\t\t\t\tcase <-inChan:\n\t\t\t\t\tlog.Debugln(\"channel notify limiter witnessed an upstream message on inChan and is waiting an additional\", maxSpeed, \"before sending output\")\n\t\t\t\t}\n\t\t\t}\n\n\t\tlog.Debugln(\"channel notify limiter finished going through notifications\")\n\t}\n}", "func pinger(host string, c chan ping.Statistics) {\n\tpinger, _ := ping.NewPinger(host)\n\tpinger.Count = 2\n\tpinger.Timeout = time.Second * 2\n\tpinger.SetPrivileged(true) // Windows requirement\n\terr := pinger.Run()\n\tif err != nil {\n\t\tlog.Printf(\"Failed to ping target host: %s\", err)\n\t}\n\t// Returns ping.Statistics struct: https://github.com/go-ping/ping/blob/master/ping.go#L230\n\tstats := *pinger.Statistics()\n\tc <- stats\n}", "func Ping(host string, timeout time.Duration) bool {\n\treturn (&instance{server: host}).ping(timeout)\n}", "func (c *Client) OnPing(cb PingFunc) {\n\tc.onPingListeners = append(c.onPingListeners, cb)\n}", "func runEmitter() {\n\temitter, err := goka.NewEmitter(brokers, topic, new(codec.String))\n\tif err != nil {\n\t\tlog.Fatalf(\"error creating emitter: %v\", err)\n\t}\n\tdefer emitter.Finish()\n\terr = emitter.EmitSync(\"some-key\", \"some-value\")\n\tif err != nil {\n\t\tlog.Fatalf(\"error emitting message: %v\", err)\n\t}\n\tlog.Println(\"message emitted\")\n}", "func senderTunnel(pings chan<- string, pingString string) {\n\tpings <- pingString\n}", "func (t *Tracker) Ping() {\n\t// acquire mutex\n\tt.mutex.Lock()\n\tdefer t.mutex.Unlock()\n\n\t// increment\n\tt.pings++\n}", "func pinger(wg *sync.WaitGroup, configuration *config) {\n\tfor {\n\t\tfor i := 0; i < len(configuration.Address); i++ {\n\n\t\t\t//Ping syscall, -c ping count, -i interval, -w timeout\n\t\t\tout, _ := exec.Command(\"ping\", configuration.Address[i], \"-c 5\", \"-i 3\", \"-w 10\").Output()\n\t\t\tif (strings.Contains(string(out), \"Destination Host Unreachable\")) || (strings.Contains(string(out), \"100% packet loss\")) {\n\t\t\t\tfmt.Println(\"Server down\")\n\t\t\t\tvar (\n\t\t\t\t\thost = \"xxx\"\n\t\t\t\t\tuser = \"xxx\"\n\t\t\t\t\tpass = \"xxx\"\n\t\t\t\t\trecipent = \"xxx\"\n\t\t\t\t)\n\t\t\t\t//recipent := configuration.Recipient[\"Recipinet\"+strconv.Itoa(i+1)]\n\n\t\t\t\tconfig := mailer.Config{\n\t\t\t\t\tHost: host,\n\t\t\t\t\tPort: 465,\n\t\t\t\t\tUser: user,\n\t\t\t\t\tPass: pass,\n\t\t\t\t}\n\n\t\t\t\tMailer := mailer.NewMailer(config, true)\n\n\t\t\t\tmail := mailer.NewMail()\n\t\t\t\tmail.FromName = \"Go Mailer\"\n\t\t\t\tmail.From = user\n\t\t\t\tmail.SetTo(recipent)\n\t\t\t\tmail.Subject = \"Server \"\n\t\t\t\tmail.Body = \"Your server is down\"\n\n\t\t\t\tif err := Mailer.Send(mail); err != nil {\n\t\t\t\t\tfmt.Println(err)\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\tfmt.Println(\"Server is running\")\n\n\t\t\t}\n\t\t}\n\t\ttime.Sleep(2 * time.Second)\n\t}\n\twg.Done() // need to fix\n}", "func (e *API) PingPong(ctx context.Context, stream business.API_PingPongStream) error {\n\tfor {\n\t\treq, err := stream.Recv()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tlog.Logf(\"Got ping %v\", req.Stroke)\n\t\tif err := stream.Send(&business.Pong{Stroke: req.Stroke}); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n}", "func (tm *ServiceTracerouteManager) GetICMPInChan() chan gopacket.Packet {\n\treturn tm.ICMPChan\n}", "func (e *Payments) PingPong(ctx context.Context, stream payments.Payments_PingPongStream) error {\n\tfor {\n\t\treq, err := stream.Recv()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tlog.Logf(\"Got ping %v\", req.Stroke)\n\t\tif err := stream.Send(&payments.Pong{Stroke: req.Stroke}); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n}", "func (pp *Pinger) Ping() (bool, time.Duration) {\n\t// get the target address\n\tpp.l.RLock()\n\ttarget := pp.address\n\tpp.l.RUnlock()\n\t\n\t// Reset the stats\n\tpp.l.Lock()\n\tpp.iGotAResponse = false\n\tpp.l.Unlock()\n\t\n\t// Add a callback to note when/whether we got a response, and defer\n\t// removing it again\n\tcallback := func (p *lt.LLAPPacket) {\n\t\tif p.LLAPType == 0x82 && p.Src == target {\n\t\t\tpp.markAsResponded()\n\t\t}\n\t}\n\tpp.p.LLAPControlCallbacks.Add(&callback)\n\tdefer pp.p.LLAPControlCallbacks.Remove(&callback)\n\t\n\t// Now fire off some ENQs\n\tstartTime := time.Now()\n\tfor i := 0; i < numberOfEnqs; i++ {\n\t\tpp.p.SendLLAP(lt.LLAPPacket{target, target, 0x81, nil})\n\t\ttime.Sleep(200 * time.Microsecond)\n\t}\n\t\n\t// Give it a second to respond.\n\ttime.Sleep(1 * time.Second)\n\t\n\tresponded, when := pp.Response()\n\treturn responded, when.Sub(startTime)\n}", "func (e *Emitter) Emit(topic string, value interface{}) (done chan struct{}) {\n\te.mu.RLock()\n\tdefer e.mu.RUnlock()\n\n\tdone = make(chan struct{})\n\n\tif e.topicListeners == nil {\n\t\tclose(done)\n\t\treturn done\n\t}\n\tlns, ok := e.topicListeners[topic]\n\tif !ok || len(lns) == 0 {\n\t\tclose(done)\n\t\treturn done\n\t}\n\n\tgo func() {\n\t\tdefer close(done)\n\t\tfor _, lnch := range lns {\n\t\t\tlnch <- value\n\t\t}\n\t}()\n\treturn done\n}", "func setupPurgeReceiver(incomingAddress *string, publisher *Publisher) {\n\treceiver, err := net.Listen(\"tcp\", *incomingAddress)\n\tutils.CheckError(err, logger)\n\n\tgo func() {\n\t\tping := []byte(\"ping\")\n\t\tfor {\n\t\t\ttime.Sleep(5 * time.Second)\n\t\t\tpublisher.Pub(ping)\n\t\t}\n\t}()\n\tfor {\n\t\tconn, err := receiver.Accept()\n\t\tutils.CheckError(err, logger)\n\t\tgo func(c net.Conn) {\n\t\t\tdefer conn.Close()\n\t\t\tb, err := ioutil.ReadAll(conn)\n\t\t\tif err != nil {\n\t\t\t\tlogger.Info(fmt.Sprintln(\"Client connection error:\", err))\n\t\t\t} else {\n\t\t\t\tclean_purge := bytes.TrimSpace(b)\n\t\t\t\tlogger.Info(fmt.Sprintln(\"<-\", utils.ReverseName(conn), string(clean_purge)))\n\t\t\t\tpublisher.Pub(clean_purge)\n\t\t\t\tconn.Write([]byte(\"OK\\n\"))\n\t\t\t}\n\t\t}(conn)\n\t}\n}", "func doPingScan(IP string) {\n\tpinger, err := ping.NewPinger(IP)\n\tif err != nil {\n\t\tpongs <- &pong {\n\t\t\tIP: IP,\n\t\t\tAlive: false,\n\t\t\tError: err,\n\t\t}\n\t}\n\tpinger.Count = 1 // ONLY PING ONCE\n\tpinger.Timeout = timeout\n\t// WHEN PING IS DONE\n\tpinger.OnFinish = func(stats *ping.Statistics) {\n\t\tvar alive bool;\n\t\talive = stats.PacketsRecv > 0\n\t\tpongs <- &pong {\n\t\t\tIP: IP,\n\t\t\tAlive: alive,\n\t\t\tError: nil,\n\t\t}\n\t\tnumPingsFinished++\n\t\tif numPingsFinished % 170000 == 0 {\n\t\t\tDebug(fmt.Sprintf(\"Finished %f percent of %d pings\", (float64(numPingsFinished)/float64(numPings))*100.0, numPings))\n\t\t}\n\t}\n\tpinger.Run()\n}" ]
[ "0.5540986", "0.5505406", "0.5427127", "0.5427127", "0.5427127", "0.5284718", "0.5107711", "0.50829035", "0.5076376", "0.50560343", "0.4966862", "0.49452683", "0.49294308", "0.4921673", "0.49036264", "0.48855567", "0.48285797", "0.47991693", "0.47748682", "0.47645676", "0.47484863", "0.47460136", "0.47277144", "0.46966943", "0.46952093", "0.4677835", "0.46753457", "0.46549135", "0.46512383", "0.46456683", "0.46403807", "0.4637702", "0.46344995", "0.46247455", "0.46114683", "0.4607959", "0.4588968", "0.4587188", "0.4587188", "0.45856178", "0.4573507", "0.4539396", "0.45275292", "0.45266077", "0.45225272", "0.45200562", "0.4496128", "0.4492597", "0.44922164", "0.44922057", "0.44903976", "0.44701874", "0.4458162", "0.44501826", "0.4447827", "0.44293624", "0.44291347", "0.43943867", "0.4385729", "0.43778488", "0.43753687", "0.43753168", "0.43749917", "0.43686414", "0.43645352", "0.43458742", "0.43431795", "0.4318906", "0.431689", "0.4316047", "0.43108612", "0.43071324", "0.43041235", "0.43017873", "0.43006775", "0.4297285", "0.42962432", "0.42962432", "0.428494", "0.42814583", "0.42712674", "0.42712674", "0.42712674", "0.42694342", "0.4267207", "0.4253923", "0.42473683", "0.42433265", "0.42404768", "0.42263576", "0.4224501", "0.42201445", "0.4217319", "0.42145622", "0.42138767", "0.42125627", "0.42116764", "0.42072767", "0.4196172", "0.41934735" ]
0.74014896
0
pingRemote make a synchronous ping to a remote cluster. Return is error if ping is unsuccessful and nil on success.
func (rcs *Service) pingRemote(rc *model.RemoteCluster) error { frame, err := makePingFrame(rc) if err != nil { return err } url := fmt.Sprintf("%s/%s", rc.SiteURL, PingURL) resp, err := rcs.sendFrameToRemote(PingTimeout, rc, frame, url) if err != nil { return err } ping := model.RemoteClusterPing{} err = json.Unmarshal(resp, &ping) if err != nil { return err } if err := rcs.server.GetStore().RemoteCluster().SetLastPingAt(rc.RemoteId); err != nil { rcs.server.Log().Log(mlog.LvlRemoteClusterServiceError, "Failed to update LastPingAt for remote cluster", mlog.String("remote", rc.DisplayName), mlog.String("remoteId", rc.RemoteId), mlog.Err(err), ) } rc.LastPingAt = model.GetMillis() if metrics := rcs.server.GetMetrics(); metrics != nil { sentAt := time.Unix(0, ping.SentAt*int64(time.Millisecond)) elapsed := time.Since(sentAt).Seconds() metrics.ObserveRemoteClusterPingDuration(rc.RemoteId, elapsed) // we approximate clock skew between remotes. skew := elapsed/2 - float64(ping.RecvAt-ping.SentAt)/1000 metrics.ObserveRemoteClusterClockSkew(rc.RemoteId, skew) } rcs.server.Log().Log(mlog.LvlRemoteClusterServiceDebug, "Remote cluster ping", mlog.String("remote", rc.DisplayName), mlog.String("remoteId", rc.RemoteId), mlog.Int64("SentAt", ping.SentAt), mlog.Int64("RecvAt", ping.RecvAt), mlog.Int64("Diff", ping.RecvAt-ping.SentAt), ) return nil }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (rn *RemoteNode) Ping() error {\n\tmsg, err := NewPingMessage()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t_, err = rn.SendMessageSync(msg)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}", "func (a *Client) Ping(params *PingParams, authInfo runtime.ClientAuthInfoWriter) (*PingOK, error) {\n\t// TODO: Validate the params before sending\n\tif params == nil {\n\t\tparams = NewPingParams()\n\t}\n\n\tresult, err := a.transport.Submit(&runtime.ClientOperation{\n\t\tID: \"ping\",\n\t\tMethod: \"GET\",\n\t\tPathPattern: \"/\",\n\t\tProducesMediaTypes: []string{\"application/json\"},\n\t\tConsumesMediaTypes: []string{\"application/json\"},\n\t\tSchemes: []string{\"http\", \"https\"},\n\t\tParams: params,\n\t\tReader: &PingReader{formats: a.formats},\n\t\tAuthInfo: authInfo,\n\t\tContext: params.Context,\n\t\tClient: params.HTTPClient,\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tsuccess, ok := result.(*PingOK)\n\tif ok {\n\t\treturn success, nil\n\t}\n\t// unexpected success response\n\t// safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue\n\tmsg := fmt.Sprintf(\"unexpected success response for ping: API contract not enforced by server. Client expected to get an error, but got: %T\", result)\n\tpanic(msg)\n}", "func ping(hosts []string, returnUnavailable bool) []string {\n\tvar toReturn []string\n\tvar cmds []*exec.Cmd\n\n\t// Start pinging:\n\tfor _, host := range hosts {\n\t\tlog.Println(\"Pinging\", host)\n\t\t// cmd := exec.Command(\"ssh\", \"-o ConnectTimeout=1\", host, \"echo\")\n\t\tcmd := exec.Command(\"nc\", \"-z\", \"-w 1\", host, \"22\")\n\t\tcmd.Start()\n\t\tcmds = append(cmds, cmd)\n\t}\n\n\t// Read result of the pings:\n\tfor i, cmd := range cmds {\n\t\tif err := cmd.Wait(); err != nil {\n\t\t\tlog.Println(\"Unavailable host:\", hosts[i], \"ping error:\", err)\n\t\t\tif returnUnavailable {\n\t\t\t\ttoReturn = append(toReturn, hosts[i])\n\t\t\t}\n\t\t} else {\n\t\t\tif !returnUnavailable {\n\t\t\t\ttoReturn = append(toReturn, hosts[i])\n\t\t\t}\n\t\t}\n\t}\n\n\treturn toReturn\n}", "func (my *MySQL) Ping() (err os.Error) {\n defer my.unlock()\n defer catchOsError(&err)\n my.lock()\n\n if my.conn == nil {\n return NOT_CONN_ERROR\n }\n if my.unreaded_rows {\n return UNREADED_ROWS_ERROR\n }\n\n // Send command\n my.sendCmd(_COM_PING)\n // Get server response\n my.getResult(nil)\n\n return\n}", "func (c *Connector) Ping() (err error) {\n\turl, err := c.getURL(\"\")\n\tif err != nil {\n\t\treturn err\n\t}\n\treq, _ := http.NewRequest(\"GET\", url, nil)\n\treq.Header.Add(\"content-type\", \"application/json\")\n\treq.Header.Add(\"cache-control\", \"no-cache\")\n\n\tres, err := c.getHTTPClient().Do(req)\n\tif err != nil {\n\t\treturn err\n\t} else if res.StatusCode != http.StatusOK {\n\t\tdefer res.Body.Close()\n\t\tbody, _ := ioutil.ReadAll(res.Body)\n\t\terr = fmt.Errorf(\"%s\", string(body))\n\t}\n\treturn err\n}", "func (session *pureSession) ping() error {\n\tif session.connection == nil {\n\t\treturn fmt.Errorf(\"failed to ping: Session has been released\")\n\t}\n\t// send ping request\n\trs, err := session.execute(`RETURN \"NEBULA GO PING\"`)\n\t// check connection level error\n\tif err != nil {\n\t\treturn fmt.Errorf(\"session ping failed, %s\" + err.Error())\n\t}\n\t// check session level error\n\tif !rs.IsSucceed() {\n\t\treturn fmt.Errorf(\"session ping failed, %s\" + rs.GetErrorMsg())\n\t}\n\treturn nil\n}", "func (c Client) Ping() (err error) {\n\tvar pr PingResponse\n\terr = c.decodeResponse(\"ping\", \"GET\", nil, &pr)\n\tif err != nil {\n\t\treturn\n\t}\n\n\tif pr.Result != \"success\" {\n\t\treturn errors.New(pr.Message) // there will be a message, if it failed\n\t}\n\n\treturn\n}", "func (c *Client) Ping(checkAllMetaServers bool) error {\n\tc.mu.RLock()\n\tserver := c.metaServers[0]\n\tc.mu.RUnlock()\n\turl := c.url(server) + \"/ping\"\n\tif checkAllMetaServers {\n\t\turl = url + \"?all=true\"\n\t}\n\n\tresp, err := http.Get(url)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif resp.StatusCode == http.StatusOK {\n\t\treturn nil\n\t}\n\n\tb, err := ioutil.ReadAll(resp.Body)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn fmt.Errorf(string(b))\n}", "func (a *Client) Ping(params *PingParams) (*PingOK, error) {\n\t// TODO: Validate the params before sending\n\tif params == nil {\n\t\tparams = NewPingParams()\n\t}\n\n\tresult, err := a.transport.Submit(&runtime.ClientOperation{\n\t\tID: \"Ping\",\n\t\tMethod: \"GET\",\n\t\tPathPattern: \"/ping\",\n\t\tProducesMediaTypes: []string{\"text/plain\"},\n\t\tConsumesMediaTypes: []string{\"text/plain\"},\n\t\tSchemes: []string{\"http\", \"https\"},\n\t\tParams: params,\n\t\tReader: &PingReader{formats: a.formats},\n\t\tContext: params.Context,\n\t\tClient: params.HTTPClient,\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn result.(*PingOK), nil\n\n}", "func (a *Client) Ping(params *PingParams) (*PingOK, error) {\n\t// TODO: Validate the params before sending\n\tif params == nil {\n\t\tparams = NewPingParams()\n\t}\n\n\tresult, err := a.transport.Submit(&runtime.ClientOperation{\n\t\tID: \"ping\",\n\t\tMethod: \"GET\",\n\t\tPathPattern: \"/ping\",\n\t\tProducesMediaTypes: []string{\"application/json\"},\n\t\tConsumesMediaTypes: []string{\"application/json\"},\n\t\tSchemes: []string{\"http\"},\n\t\tParams: params,\n\t\tReader: &PingReader{formats: a.formats},\n\t\tContext: params.Context,\n\t\tClient: params.HTTPClient,\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tsuccess, ok := result.(*PingOK)\n\tif ok {\n\t\treturn success, nil\n\t}\n\t// unexpected success response\n\t// safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue\n\tmsg := fmt.Sprintf(\"unexpected success response for ping: API contract not enforced by server. Client expected to get an error, but got: %T\", result)\n\tpanic(msg)\n}", "func (c *Cluster) Ping() bool {\n\ttr := &http.Transport{\n\t\tTLSClientConfig: &tls.Config{InsecureSkipVerify: true},\n\t\tDialContext: (&net.Dialer{\n\t\t\tTimeout: time.Second,\n\t\t}).DialContext,\n\t}\n\tclient := &http.Client{Transport: tr}\n\treq, err := http.NewRequest(\"GET\", c.Server, nil)\n\tif err != nil {\n\t\treturn false\n\t}\n\n\tresp, err := client.Do(req)\n\tif err != nil {\n\t\treturn false\n\t}\n\tdefer resp.Body.Close()\n\n\treturn true\n}", "func (a *Client) Ping(params *PingParams) (*PingOK, error) {\n\t// TODO: Validate the params before sending\n\tif params == nil {\n\t\tparams = NewPingParams()\n\t}\n\n\tresult, err := a.transport.Submit(&runtime.ClientOperation{\n\t\tID: \"Ping\",\n\t\tMethod: \"GET\",\n\t\tPathPattern: \"/ping\",\n\t\tProducesMediaTypes: []string{\"text/plain\"},\n\t\tConsumesMediaTypes: []string{\"text/plain\"},\n\t\tSchemes: []string{\"http\", \"https\"},\n\t\tParams: params,\n\t\tReader: &PingReader{formats: a.formats},\n\t\tContext: params.Context,\n\t\tClient: params.HTTPClient,\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tsuccess, ok := result.(*PingOK)\n\tif ok {\n\t\treturn success, nil\n\t}\n\t// unexpected success response\n\tunexpectedSuccess := result.(*PingDefault)\n\treturn nil, runtime.NewAPIError(\"unexpected success response: content available as default response in error\", unexpectedSuccess, unexpectedSuccess.Code())\n}", "func (s *Syncthing) Ping(ctx context.Context, local bool) bool {\n\t_, err := s.APICall(ctx, \"rest/system/ping\", \"GET\", 200, nil, local, nil, false, 0)\n\tif err == nil {\n\t\treturn true\n\t}\n\tif strings.Contains(err.Error(), \"Client.Timeout\") {\n\t\treturn true\n\t}\n\toktetoLog.Infof(\"error pinging syncthing: %s\", err.Error())\n\treturn false\n}", "func (s *RPC) Ping(ctx context.Context) (bool, error) {\n\treturn true, nil\n}", "func (ghost *Ghost) Ping() error {\n\tpingHandler := func(res *Response) error {\n\t\tif res == nil {\n\t\t\tghost.reset = true\n\t\t\treturn errors.New(\"Ping timeout\")\n\t\t}\n\t\treturn nil\n\t}\n\treq := NewRequest(\"ping\", nil)\n\treq.SetTimeout(pingTimeout)\n\treturn ghost.SendRequest(req, pingHandler)\n}", "func (r *vtmClient) Ping() (bool, error) {\n\tif err := r.apiGet(vtmAPIPing, nil, nil); err != nil {\n\t\treturn false, err\n\t}\n\treturn true, nil\n}", "func (r *RPC) Ping(c context.Context, arg *struct{}, res *struct{}) (err error) {\n\treturn\n}", "func (r *RPC) Ping(c context.Context, arg *struct{}, res *struct{}) (err error) {\n\treturn\n}", "func ping(c redis.Conn) (string, error) {\n\t// Send PING command to Redis\n\t// PING command returns a Redis \"Simple String\"\n\t// Use redis.String to convert the interface type to string\n\treturn redis.String(c.Do(\"PING\"))\n}", "func (c *client) Ping() (*Status, error) {\n\tvar (\n\t\tstatus Status\n\n\t\terr = c.Get(\"ping\").Json(&status)\n\t)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn &status, nil\n}", "func (v *vcsCmd) ping(scheme, repo string) error {\n\treturn v.runVerboseOnly(\".\", v.pingCmd, \"scheme\", scheme, \"repo\", repo)\n}", "func (c *MockInfluxClient) Ping(timeout time.Duration) (time.Duration, string, error) {\n\tif !c.Connected {\n\t\treturn time.Millisecond, \"\", errors.New(\"Mock client set to disconnected\")\n\t}\n\n\treturn time.Millisecond, \"\", nil\n}", "func (c *sqlmock) Ping(ctx context.Context) error {\n\tif !c.monitorPings {\n\t\treturn nil\n\t}\n\n\tex, err := c.ping()\n\tif ex != nil {\n\t\tselect {\n\t\tcase <-ctx.Done():\n\t\t\treturn ErrCancelled\n\t\tcase <-time.After(ex.delay):\n\t\t}\n\t}\n\n\treturn err\n}", "func (a *API) Ping(ctx context.Context) (*PingResult, error) {\n\tres := &PingResult{}\n\t_, err := a.get(ctx, ping, nil, res)\n\n\treturn res, err\n}", "func (m *Manager) Ping() string {\n\tnodes := m.Nodes()\n\tcommittedNodesLen := len(nodes)\n\n\tif committedNodesLen > 0 {\n\t\tnode := nodes[0]\n\n\t\tres, err := http.Get(node.LocalIP)\n\n\t\tif err != nil {\n\t\t\tlog.Print(\"Target horde node is either unhealthy or down!\", err)\n\t\t}\n\n\t\tdefer res.Body.Close()\n\n\t\tif res.StatusCode == http.StatusOK {\n\t\t\t_, err := ioutil.ReadAll(res.Body)\n\n\t\t\tif err != nil {\n\t\t\t\tlog.Print(\"Failed to read body\", err)\n\n\t\t\t\treturn \"pang\"\n\t\t\t}\n\n\t\t\treturn \"pong\"\n\t\t}\n\t}\n\n\treturn \"pang\"\n}", "func (c Connector) Ping() string {\n\tpong, err := c.Client.Ping().Result()\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\treturn pong\n}", "func (c *Client) Ping() error {\n\treturn c.request(\"GET\", \"/ping\", nil)\n}", "func (rc *RedisClient) Ping() (string, error) {\n\tconn := rc.pool.Get()\n\tdefer conn.Close()\n\tval, err := redis.String(conn.Do(\"PING\"))\n\treturn val, err\n}", "func (ins *instance) ping(timeout time.Duration) bool {\n\tsession, err := mgo.DialWithTimeout(ins.url(), timeout)\n\tif err != nil {\n\t\treturn false\n\t}\n\tdefer session.Close()\n\tsession.SetSyncTimeout(timeout)\n\tif err = session.Ping(); err != nil {\n\t\treturn false\n\t}\n\treturn true\n}", "func ping(c redis.Conn) error {\n\t// Send PING command to Redis\n\t// PING command returns a Redis \"Simple String\"\n\t// Use redis.String to convert the interface type to string\n\ts, err := redis.String(c.Do(\"PING\"))\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tfmt.Printf(\"PING Response = %s\\n\", s)\n\t// Output: PONG\n\n\treturn nil\n}", "func (c *Conn) Ping() error {\n\tresponse := c.client.Cmd(cmdPing)\n\tif !isOK(response) {\n\t\treturn errx.Errorf(\"ping command failed\")\n\t}\n\treturn nil\n}", "func Ping() error {\n\tc, err := Connect()\n\tdefer c.Close()\n\n\treturn err\n}", "func (c *Client) DeepPing() error {\n\tctx, cancel := context.WithTimeout(context.Background(), time.Second)\n\tdefer cancel()\n\treturn c.DeepPingWithContext(ctx)\n}", "func (c *Client) Ping() error {\n\t_, err := c.Exec(\"ping\")\n\treturn err\n}", "func (s *Status) Ping(args struct{}, reply *struct{}) error {\n\treturn nil\n}", "func (p *MySQLPinger) Ping(ctx context.Context, params PingParams) error {\n\tif err := params.CheckAndSetDefaults(defaults.ProtocolMySQL); err != nil {\n\t\treturn trace.Wrap(err)\n\t}\n\n\tvar nd net.Dialer\n\taddr := fmt.Sprintf(\"%s:%d\", params.Host, params.Port)\n\tconn, err := client.ConnectWithDialer(ctx, \"tcp\", addr,\n\t\tparams.Username,\n\t\t\"\", // no password, we're dialing into a tunnel.\n\t\tparams.DatabaseName,\n\t\tnd.DialContext,\n\t)\n\tif err != nil {\n\t\treturn convertError(err)\n\t}\n\n\tdefer func() {\n\t\tif err := conn.Close(); err != nil {\n\t\t\tlogrus.WithError(err).Info(\"Failed to close connection in MySQLPinger.Ping\")\n\t\t}\n\t}()\n\n\tif err := conn.Ping(); err != nil {\n\t\treturn convertError(err)\n\t}\n\n\treturn nil\n}", "func Ping(host string, timeout time.Duration) bool {\n\treturn (&instance{server: host}).ping(timeout)\n}", "func (reb *rebManager) pingTarget(tsi *cluster.Snode, config *cmn.Config, ver int64, _ *xactGlobalReb) (ok bool) {\n\tvar (\n\t\ttname = reb.t.si.Name()\n\t\tmaxwt = config.Rebalance.DestRetryTime\n\t\tsleep = config.Timeout.CplaneOperation\n\t\tsleepRetry = keepaliveRetryDuration(config)\n\t\tcurwt time.Duration\n\t\targs = callArgs{\n\t\t\tsi: tsi,\n\t\t\treq: cmn.ReqArgs{\n\t\t\t\tMethod: http.MethodGet,\n\t\t\t\tBase: tsi.IntraControlNet.DirectURL,\n\t\t\t\tPath: cmn.URLPath(cmn.Version, cmn.Health),\n\t\t\t},\n\t\t\ttimeout: config.Timeout.CplaneOperation,\n\t\t}\n\t)\n\tfor curwt < maxwt {\n\t\tres := reb.t.call(args)\n\t\tif res.err == nil {\n\t\t\tif curwt > 0 {\n\t\t\t\tglog.Infof(\"%s: %s is online\", tname, tsi.Name())\n\t\t\t}\n\t\t\treturn true\n\t\t}\n\t\targs.timeout = sleepRetry\n\t\tglog.Warningf(\"%s: waiting for %s, err %v\", tname, tsi.Name(), res.err)\n\t\ttime.Sleep(sleep)\n\t\tcurwt += sleep\n\t\tnver := reb.t.smapowner.get().version()\n\t\tif nver > ver {\n\t\t\treturn\n\t\t}\n\t}\n\tglog.Errorf(\"%s: timed-out waiting for %s\", tname, tsi.Name())\n\treturn\n}", "func (client *activeClient) Ping(c *ishell.Context) error {\n\treturn client.RPC.Call(\"API.Ping\", void, &void)\n}", "func (c *JSONRPCSignalClient) Ping() error {\n\tif c.jc == nil {\n\t\treturn errNotConnected\n\t}\n\n\treturn c.jc.Call(c.context, \"ping\", nil, nil)\n}", "func (c *Client) Ping(ping string) {\n\tvar (\n\t\targ = ReqKeepAlive{}\n\t\treply = RespKeepAlive{}\n\t\terr error\n\t)\n\n\tfor {\n\t\tselect {\n\t\tcase <-c.quit:\n\t\t\tgoto closed\n\t\tdefault:\n\t\t}\n\n\t\tif c.Client != nil && c.err == nil {\n\t\t\tif err = c.Call(ping, &arg, &reply); err != nil {\n\t\t\t\tc.err = err\n\t\t\t\tif err != rpc.ErrShutdown {\n\t\t\t\t\tc.Client.Close()\n\t\t\t\t}\n\t\t\t}\n\t\t} else {\n\t\t\tif err = c.dial(); err == nil {\n\t\t\t\tc.err = nil\n\t\t\t}\n\t\t}\n\t\ttime.Sleep(pingDuration)\n\t}\n\nclosed:\n\tif c.Client != nil {\n\t\tc.Client.Close()\n\t}\n}", "func (cloud *K8SCloud) Ping() error {\n\t_, err := cloud.client.CoreV1().Pods(cloud.namespace).List(meta_v1.ListOptions{})\n\treturn err\n}", "func (a API) Ping(cmd *None) (e error) {\n\tRPCHandlers[\"ping\"].Call <-API{a.Ch, cmd, nil}\n\treturn\n}", "func (client *TestClient) Ping() (pong string, err error) {\n\tif client.OK {\n\t\treturn \"pong\", nil\n\t}\n\treturn \"\", errors.New(\"Ping failed\")\n}", "func (a API) PingChk() (isNew bool) {\n\tselect {\n\tcase o := <-a.Ch.(chan PingRes):\n\t\tif o.Err != nil {\n\t\t\ta.Result = o.Err\n\t\t} else {\n\t\t\ta.Result = o.Res\n\t\t}\n\t\tisNew = true\n\tdefault:\n\t}\n\treturn\n}", "func (p *KiteHTTPPinger) Ping() Status {\n\tres, err := p.Client.Get(p.Address)\n\tif err != nil {\n\t\treturn Failure\n\t}\n\tdefer res.Body.Close()\n\n\tresData, err := ioutil.ReadAll(res.Body)\n\tif err != nil {\n\t\treturn Failure\n\t}\n\n\tif string(resData) != kiteHTTPResponse {\n\t\treturn Failure\n\t}\n\n\treturn Success\n}", "func (c *Client) Ping(ctx context.Context) error {\n\treturn c.conn.Ping(ctx)\n}", "func (t *Transport) Ping(ctx context.Context, token string) error {\n\tscheme := \"http\"\n\tif t.sslEnabled {\n\t\tscheme = \"https\"\n\t}\n\n\t// Make a http request\n\tr, err := http.NewRequestWithContext(ctx, http.MethodGet, fmt.Sprintf(\"%s://%s/v1/config/env\", scheme, t.addr), nil)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// Add appropriate headers\n\tr.Header.Add(\"Authorization\", \"Bearer \"+token)\n\tr.Header.Add(\"Content-Type\", contentTypeJSON)\n\n\t// Fire the request\n\tres, err := t.httpClient.Do(r)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer utils.CloseTheCloser(res.Body)\n\n\tif res.StatusCode >= 200 && res.StatusCode < 300 {\n\t\treturn nil\n\t}\n\n\t// Unmarshal the response\n\tresult := types.M{}\n\tif err := json.NewDecoder(res.Body).Decode(&result); err != nil {\n\t\treturn err\n\t}\n\n\treturn fmt.Errorf(\"Service responde with status code (%v) with error message - (%v) \", res.StatusCode, result[\"error\"].(string))\n}", "func (c *Conn) Ping(ctx context.Context) error {\n\treturn nil // TODO(TimSatke): implement\n}", "func FastPing(port string, host string) (string, error) {\n\n\tvar err error\n\n\tvar cmd *exec.Cmd\n\tcmd = exec.Command(\"ping-wrapper.sh\", host, port)\n\tvar out bytes.Buffer\n\tvar stderr bytes.Buffer\n\tcmd.Stdout = &out\n\tcmd.Stderr = &stderr\n\terr = cmd.Run()\n\tif err != nil {\n\t\treturn \"OFFLINE\", err\n\t}\n\tvar rc = out.String()\n\tif rc != \"connected\" {\n\t\treturn \"OFFLINE\", nil\n\t}\n\n\treturn \"RUNNING\", nil\n}", "func (c *Client) Ping() error {\n\treturn c.Client.Ping(ctx, nil)\n}", "func (this *Protocol) ping(peerId PeerId) bool {\n\targs := &PingArgs{Me: this.GetMe()}\n\tvar reply PingReply\n\tsuccess := this.call(peerId, \"Ping\", args, &reply)\n\treturn success\n}", "func (c *HTTPClient) Ping() (bool, error) {\n\tres, err := utils.HTTPRequest(\"GET\",\n\t\tfmt.Sprintf(\"%v/health\", c.serverEndpoint),\n\t\tnil,\n\t\tnil,\n\t\tc.ticket,\n\t)\n\tif err != nil {\n\t\treturn false, err\n\t}\n\tdefer res.Body.Close()\n\n\tif res.StatusCode == http.StatusOK {\n\t\treturn true, nil\n\t} else {\n\t\treturn false, fmt.Errorf(ErrorMsg(res))\n\t}\n}", "func (c *Client) Ping() error {\n\tu := c.endpoint\n\tu.Path = `/`\n\treq, err := http.NewRequest(\"GET\", u.String(), nil)\n\tif err != nil {\n\t\treturn err\n\t}\n\tresp, err := c.Do(req)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer resp.Body.Close()\n\tio.Copy(os.Stdout, resp.Body)\n\treturn nil\n}", "func (p *peer) Ping() {\n\t// this task is recorded in the waitgroup, so clear waitgroup on return\n\tdefer p.ms.Done()\n\t// This must come after Done and before Reporter (executes in reverse order)\n\tdefer p.ms.Delete(p)\n\n\tif p.ms.Verbose() > 1 {\n\t\tlog.Println(\"ping\", p.Url)\n\t}\n\n\tmaxfail := p.Maxfail // default before thread quits trying\n\tmn := \"TCP RTT\" // CloudWatch metric name\n\tns := \"pingmesh\" // Cloudwatch namespace\n\n\tlimit := p.Limit // number of pings before we quit, \"forever\" if zero\n\tif limit == 0 {\n\t\tlimit = math.MaxInt32\n\t}\n\tif maxfail > limit {\n\t\tmaxfail = limit\n\t}\n\n\t////\n\t// Reporter summarizes ping statistics to stdout at the end of the run\n\tdefer func() { // Reporter\n\t\tif p.Pings == 0 {\n\t\t\tfmt.Printf(\"\\nRecorded 0 valid samples, %d of %d failures\\n\", p.Fails, maxfail)\n\t\t\treturn\n\t\t}\n\n\t\tfc := float64(p.Pings)\n\t\telapsed := Hhmmss_d(p.PingTotals.Start)\n\n\t\tfmt.Printf(\"\\nRecorded %d samples in %s, average values:\\n\"+\"%s\"+\n\t\t\t\"%d %-6s\\t%.03f\\t%.03f\\t%.03f\\t%.03f\\t%.03f\\t%.03f\\t\\t%d\\t%s\\t%s\\n\\n\",\n\t\t\tp.Pings, elapsed, pt.PingTimesHeader(),\n\t\t\tp.Pings, elapsed,\n\t\t\tpt.Msec(p.PingTotals.DnsLk)/fc,\n\t\t\tpt.Msec(p.PingTotals.TcpHs)/fc,\n\t\t\tpt.Msec(p.PingTotals.TlsHs)/fc,\n\t\t\tpt.Msec(p.PingTotals.Reply)/fc,\n\t\t\tpt.Msec(p.PingTotals.Close)/fc,\n\t\t\tpt.Msec(p.PingTotals.RespTime())/fc,\n\t\t\tp.PingTotals.Size/int64(p.Pings),\n\t\t\tpt.LocationOrIp(p.PingTotals.Location),\n\t\t\t*p.PingTotals.DestUrl)\n\t}()\n\n\tp.FirstPing = time.Now().UTC().Truncate(time.Second)\n\tfor {\n\t\tif p.ms.DoneChan() == nil {\n\t\t\t// channel is nil, reading from it will block, return\n\t\t\tif p.ms.Verbose() > 1 {\n\t\t\t\tlog.Println(\"peer.Ping: channel is nil, returning\")\n\t\t\t}\n\t\t\treturn\n\t\t}\n\t\t////\n\t\t// Sleep first, allows risk-free continue from error cases below\n\t\tvar sleepTime int\n\t\tif p.Pings == 0 {\n\t\t\tif sleepTime < p.Delay {\n\t\t\t\tsleepTime++\n\t\t\t}\n\t\t} else {\n\t\t\tsleepTime = p.Delay\n\t\t}\n\n\t\tselect {\n\t\tcase <-time.After(JitterPct(sleepTime, 1)):\n\t\t\t// we waited for the delay and got nothing ... loop around\n\n\t\tcase newdelay, more := <-p.ms.DoneChan():\n\t\t\tif !more {\n\t\t\t\t// channel is closed, we are done -- goodbye\n\t\t\t\tif p.ms.Verbose() > 1 {\n\t\t\t\t\tlog.Println(\"peer.Ping: channel is closed, returning\")\n\t\t\t\t}\n\t\t\t\treturn\n\t\t\t}\n\t\t\t// else we got a new delay on this channel (0 is signal to stop)\n\t\t\tp.Delay = newdelay\n\t\t\tif p.Delay <= 0 {\n\t\t\t\treturn\n\t\t\t}\n\t\t\t// we did not (finish) our sleep in this case ...\n\t\t}\n\n\t\t////\n\t\t// Try to fetch the URL\n\t\tptResult := client.FetchURL(p.Url, p.PeerIP)\n\n\t\tswitch {\n\t\t// result nil, something totally failed\n\t\tcase nil == ptResult:\n\t\t\tfunc() {\n\t\t\t\tp.mu.Lock()\n\t\t\t\tdefer p.mu.Unlock()\n\t\t\t\tp.Fails++\n\t\t\t}()\n\t\t\tlog.Println(\"fetch failure\", p.Fails, \"of\", maxfail, \"on\", p.Url)\n\t\t\tif p.Fails >= maxfail {\n\t\t\t\treturn\n\t\t\t}\n\t\t\tcontinue\n\n\t\t// HTTP 200 OK and 300 series \"OK\" status codes\n\t\tcase ptResult.RespCode <= 304:\n\t\t\t// Take a write lock on this peer before updating values\n\t\t\t// (make each peer read/write reentrant, also []*peers)\n\t\t\tfunc() {\n\t\t\t\tp.mu.Lock()\n\t\t\t\tdefer p.mu.Unlock()\n\t\t\t\tp.Pings++\n\t\t\t\tnow := time.Now().UTC()\n\t\t\t\tp.LatestPing = now.UTC().Truncate(time.Second)\n\t\t\t\tif p.Pings == 1 {\n\t\t\t\t\t////\n\t\t\t\t\t// first ping -- initialize ptResult\n\t\t\t\t\tp.PingTotals = *ptResult\n\t\t\t\t} else {\n\t\t\t\t\tp.PingTotals.DnsLk += ptResult.DnsLk\n\t\t\t\t\tp.PingTotals.TcpHs += ptResult.TcpHs\n\t\t\t\t\tp.PingTotals.TlsHs += ptResult.TlsHs\n\t\t\t\t\tp.PingTotals.Reply += ptResult.Reply\n\t\t\t\t\tp.PingTotals.Close += ptResult.Close\n\t\t\t\t\tp.PingTotals.Total += ptResult.Total\n\t\t\t\t\tp.PingTotals.Size += ptResult.Size\n\t\t\t\t}\n\n\t\t\t\tif len(p.PeerIP) == 0 && len(ptResult.Remote) > 0 {\n\t\t\t\t\tp.PeerIP = ptResult.Remote\n\t\t\t\t}\n\n\t\t\t\tif p.Location == client.LocUnknown {\n\t\t\t\t\tif *ptResult.Location != client.LocUnknown && len(*ptResult.Location) > 0 {\n\t\t\t\t\t\tp.Location = *ptResult.Location\n\t\t\t\t\t\tp.PingTotals.Location = &p.Location\n\t\t\t\t\t\tif p.ms.Verbose() > 1 {\n\t\t\t\t\t\t\tlog.Println(\"Initialize remote location to\", *ptResult.Location)\n\t\t\t\t\t\t}\n\t\t\t\t\t} else {\n\t\t\t\t\t\t// It's not returning a pingmesh Location response, use hostname\n\t\t\t\t\t\tp.Location = p.Url\n\t\t\t\t\t\tp.PingTotals.Location = &p.Location\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}()\n\n\t\t// HTTP 500 series error\n\t\tcase ptResult.RespCode > 304:\n\t\t\tfunc() {\n\t\t\t\tp.mu.Lock()\n\t\t\t\tdefer p.mu.Unlock()\n\t\t\t\tp.Fails++\n\t\t\t}()\n\t\t\tremote := p.Location\n\t\t\tif len(remote) == 0 || remote == client.LocUnknown {\n\t\t\t\tif len(p.PeerIP) > 0 {\n\t\t\t\t\tremote = p.PeerIP\n\t\t\t\t} else {\n\t\t\t\t\tremote = p.Host\n\t\t\t\t}\n\t\t\t}\n\t\t\tif p.ms.Verbose() > 0 {\n\t\t\t\tfmt.Println(p.Pings, ptResult.MsecTsv())\n\t\t\t}\n\t\t\tif p.Fails >= maxfail {\n\t\t\t\tclient.LogSentry(sentry.LevelWarning, \"%s to %s: HTTP error %d hit failure limit %d on %s, Ping quitting\", p.ms.SrvLocation(), remote, ptResult.RespCode, p.Fails, p.Url)\n\t\t\t\treturn\n\t\t\t} else {\n\t\t\t\tlog.Println(p.ms.SrvLocation(), \"to\", remote, \"HTTP\", ptResult.RespCode, \"failure\", p.Fails, \"of\", maxfail, \"on\", p.Url)\n\t\t\t}\n\t\t\tcontinue\n\n\t\t\t////\n\t\t\t// Other HTTP response codes can be coded here (error, redirect)\n\t\t\t////\n\t\t}\n\n\t\t////\n\t\t// Execution should continue here only in NON-ERROR cases; errors\n\t\t// continue the for{} above\n\t\t////\n\n\t\tif p.ms.Verbose() > 0 {\n\t\t\tif p.ms.Verbose() > 1 {\n\t\t\t\tfmt.Println(p.Pings, ptResult.MsecTsv())\n\t\t\t} else {\n\t\t\t\tfmt.Printf(\"%3d %8.03f msec %20s %s\\n\", p.Pings, pt.Msec(ptResult.TcpHs), pt.LocationOrIp(ptResult.Location), ptResult.Remote)\n\t\t\t}\n\t\t}\n\n\t\tif p.ms.CwFlag() {\n\t\t\tmetric := pt.Msec(ptResult.TcpHs)\n\t\t\tmyLocation := p.ms.SrvLocation()\n\t\t\tif p.ms.Verbose() > 2 {\n\t\t\t\tlog.Println(\"publishing TCP RTT\", metric, \"msec to CloudWatch\", ns, \"from\", myLocation)\n\t\t\t}\n\t\t\trespCode := \"0\"\n\t\t\tif ptResult.RespCode >= 0 {\n\t\t\t\t// 000 in cloudwatch indicates it was a zero return code from lower layer\n\t\t\t\t// while single digit 0 indicates an error making the request\n\t\t\t\trespCode = fmt.Sprintf(\"%03d\", ptResult.RespCode)\n\t\t\t}\n\n\t\t\t////\n\t\t\t// Publish my location (IP or REP_LOCATION) and their location\n\t\t\tcw.PublishRespTime(myLocation, p.Location, respCode, metric, mn, ns)\n\t\t\t// NOTE: using network RTT estimate (TcpHs) rather than full page response time\n\t\t\t// TODO: This makes the legends wrong in Cloudwatch. Fix that.\n\t\t}\n\n\t\tif p.Pings >= limit {\n\t\t\t// report stats (see deferred func() above) upon return\n\t\t\treturn\n\t\t}\n\n\t\tif p.Delay <= 0 {\n\t\t\t// we were signaled to stop\n\t\t\treturn\n\t\t}\n\t}\n}", "func (p *protocol) Ping() error {\n\tlog.Debugf(\"[T %s > %s] Sending ping\", p.conn.LocalAddr().String(), p.conn.RemoteAddr().String())\n\treturn p.conn.SendMessage(&protocolPING{})\n}", "func (tc *TokenCache) Ping() error {\n\t_, err := tc.Client.Ping().Result() // testing the cache connection\n\tif err != nil {\n\t\t// when the cache connection fails.\n\t\treturn ex.NewErr(&ex.ErrCacheQuery{}, err, \"Failed to connect to cache\", \"TokenCache.Ping\")\n\t}\n\treturn nil\n}", "func (ng Ngrok) Ping() error {\n\tresp, err := http.Get(fmt.Sprintf(\"http://%s:%d/api/tunnels\", ng.host, ng.port))\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif resp.StatusCode == 200 {\n\t\treturn nil\n\t}\n\n\treturn fmt.Errorf(\"unknown error\")\n}", "func Ping(host net.IP) (ok bool, err error) {\n\treq := &rmcp_ping{\n\t\tver: 0x06,\n\t\tseq: 0xFF,\n\t\tclass: 0x06,\n\t\tiana: 4542,\n\t\tmsg_type: PRESENCE_PING,\n\t\tmsg_tag: 'J',\n\t}\n\n\tconn, err := net.DialUDP(\"udp\", net.JoinHostPort(host.String(), PRI_RMCP_PORT))\n\tif err != nil {\n\t\treturn\n\t}\n\tdefer conn.Close()\n\tconn.SetDeadline(PING_TIMEOUT)\n\n\t_, err = binary.Write(con, binary.BigEndian, req)\n\tif err != nil {\n\t\treturn\n\t}\n\n\tres := &rmcp_pong{}\n\n\tn, err := binary.Read(c, binary.BigEndian, res)\n\tif err != nil {\n\t\treturn\n\t}\n\tif n < binary.Size(res) {\n\t\terr = errors.New(\"short read\")\n\t\treturn\n\t}\n\n\tif res.asf.msg_type == PRESENCE_PONG &&\n\t\tres.asf.msg_tag == 'J' &&\n\t\tres.sup_entities == 0x81 {\n\t\tok = true\n\t}\n\treturn\n}", "func (s *RedisStore) ping() (bool, error) {\n\tdata, err := s.Client.Ping().Result()\n\treturn data == \"PONG\", err\n}", "func (replayer *replayer) ping(ctx context.Context) error {\n\tif replayer.rpcClient == nil {\n\t\treturn log.Errf(ctx, nil, \"cannot ping without gapir connection\")\n\t}\n\n\tctx = attachAuthToken(ctx, replayer.deviceConnectionInfo.authToken)\n\tr, err := replayer.rpcClient.Ping(ctx, &replaysrv.PingRequest{})\n\tif err != nil {\n\t\treturn log.Err(ctx, err, \"Sending ping\")\n\t}\n\tif r == nil {\n\t\treturn log.Err(ctx, nil, \"No response for ping\")\n\t}\n\n\treturn nil\n}", "func (n *Node) Ping() (string, error) {\n\tres, err := n.Talk(\"/ping\", false, nil)\n\tif err != nil {\n\t\tlog.Println(\"/ping\", n.Nodestr, err)\n\t\treturn \"\", err\n\t}\n\tif len(res) == 2 && res[0] == \"PONG\" {\n\t\tlog.Println(\"ponged,i am\", res[1])\n\t\tn.Myself.setIP(res[1])\n\t\treturn res[1], nil\n\t}\n\tlog.Println(\"/ping\", n.Nodestr, \"error\")\n\treturn \"\", errors.New(\"connected,but not ponged\")\n}", "func ping(c *bm.Context) {\n\tif err := Svc.Ping(c); err != nil {\n\t\tlog.Error(\"svr.Ping error(%v)\", err)\n\t\tc.AbortWithStatus(http.StatusServiceUnavailable)\n\t}\n}", "func (s *SWIM) ping(target *Member) error {\n\tstats, err := s.mbrStatsMsgStore.Get()\n\tif err != nil {\n\t\tiLogger.Error(nil, err.Error())\n\t}\n\n\t// send ping message\n\taddr := target.Address()\n\tpingId := xid.New().String()\n\tping := createPingMessage(pingId, s.member.Address(), &stats)\n\n\tres, err := s.messageEndpoint.SyncSend(addr, ping)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// update piggyback data to store\n\ts.handlePbk(res.PiggyBack)\n\n\treturn nil\n}", "func Ping(hostname string, tube string) {\n\tc := connect(hostname)\n\tc.Tube = beanstalk.Tube{c, tube}\n\tbody := []byte(\"check_beanstalk_ping\")\n\n\tputid, err := c.Put(body, 1, 0, 5*time.Second)\n\tif err != nil {\n\t\tfmt.Fprintf(os.Stderr, \"Put failed: %s\\n\", err)\n\t\tos.Exit(2)\n\t}\n\n\tp, err := c.Peek(putid)\n\tif err != nil {\n\t\tfmt.Fprintf(os.Stderr, \"Peek failed: %s\\n\", err)\n\t\tos.Exit(2)\n\t}\n\n\tc.Delete(putid)\n\n\tif bytes.Equal(p, body) != true {\n\t\tfmt.Fprintf(os.Stderr, \"Unknown jobs in test tube\\n\")\n\t\tos.Exit(2)\n\t}\n\n\tfmt.Fprintf(os.Stderr, \"PUT->Peek OK\\n\")\n\tos.Exit(0)\n}", "func (sc *ServerConn) Ping(ctx context.Context) error {\n\treturn sc.Request(ctx, \"server.ping\", nil, nil)\n}", "func (p *Ping) Ping(target p2pcrypto.PublicKey, msg string) (string, error) {\n\tvar response string\n\treqid := crypto.NewUUID()\n\tping := &pb.Ping{\n\t\tReqID: reqid[:],\n\t\tReq: true,\n\t\tMessage: msg,\n\t}\n\tpchan, err := p.sendRequest(target, reqid, ping)\n\tif err != nil {\n\t\treturn response, err\n\t}\n\n\ttimer := time.NewTimer(PingTimeout)\n\tselect {\n\tcase res := <-pchan:\n\t\tresponse = res.Message\n\t\tp.pendMuxtex.Lock()\n\t\tdelete(p.pending, reqid)\n\t\tp.pendMuxtex.Unlock()\n\tcase <-timer.C:\n\t\treturn response, errPingTimedOut\n\t}\n\n\treturn response, nil\n}", "func (c *Client) Ping() (string, error) {\n\tresp, err := c.Get(c.Endpoint + \"/ping\")\n\n\tif err != nil {\n\t\tfmt.Println(\"[RIAK DEBUG] \" + err.Error())\n\t\treturn \"Ping Error!\", err\n\t}\n\n\tdefer resp.Body.Close()\n\n\tbody, err := ioutil.ReadAll(resp.Body)\n\n\tif debug {\n\t\tfmt.Println(\"[RIAK DEBUG] GET: \" + c.Endpoint + \"/ping => \" + string(body))\n\t}\n\treturn string(body), nil\n}", "func (c *arbiterClient) Ping() bool {\n\tclient, err := rpc.Dial(\"tcp\", c.remoteAddr.ToStr())\n\tif err != nil {\n\t\t*c.netErr = err\n\t\treturn false\n\t}\n\tdefer client.Close()\n\n\tvar resp bool\n\t*c.netErr = client.Call(\"ArbiterServer.Ping\", 0, &resp)\n\treturn resp\n}", "func (r *RedisStorage) Ping() (string, error) {\n\tconn := r.connectionPool.Get()\n\tdefer conn.Close()\n\n\tres, err := redis.String(conn.Do(\"PING\"))\n\treturn res, err\n}", "func (sdk *SDK) Ping() error {\n\treturn sdk.Verify()\n}", "func (p *protocol) Ping(ctx context.Context, peer p2pcrypto.PublicKey) error {\n\tplogger := p.logger.WithFields(log.String(\"type\", \"ping\"), log.String(\"to\", peer.String()))\n\tplogger.Debug(\"send ping request\")\n\n\tdata, err := types.InterfaceToBytes(p.local)\n\tif err != nil {\n\t\treturn err\n\t}\n\tch := make(chan []byte, 1)\n\tfoo := func(msg []byte) {\n\t\tplogger.Debug(\"handle ping response\")\n\t\tsender := &node.Info{}\n\t\terr := types.BytesToInterface(msg, sender)\n\n\t\tif err != nil {\n\t\t\tplogger.With().Warning(\"got unreadable pong\", log.Err(err))\n\t\t\treturn\n\t\t}\n\t\t// TODO: if we pinged it we already have id so no need to update,\n\t\t// but what if id or listen address has changed?\n\t\tch <- sender.ID.Bytes()\n\t}\n\n\terr = p.msgServer.SendRequest(ctx, server.PingPong, data, peer, foo, func(err error) {})\n\n\tif err != nil {\n\t\treturn err\n\t}\n\n\ttimeout := time.NewTimer(MessageTimeout) // todo: check whether this is useless because of `requestLifetime`\n\tselect {\n\tcase id := <-ch:\n\t\tif id == nil {\n\t\t\treturn errors.New(\"failed sending message\")\n\t\t}\n\t\tif !bytes.Equal(id, peer.Bytes()) {\n\t\t\treturn errors.New(\"got pong with different public key\")\n\t\t}\n\tcase <-timeout.C:\n\t\treturn errors.New(\"ping timeout\")\n\t}\n\n\treturn nil\n}", "func (registry *Registry) Ping(ctx context.Context) error {\n\turl := registry.url(\"/v2/\")\n\tregistry.Logf(\"registry.ping url=%s\", url)\n\treq, err := http.NewRequest(\"GET\", url, nil)\n\tif err != nil {\n\t\treturn err\n\t}\n\tresp, err := registry.Client.Do(req.WithContext(ctx))\n\tif resp != nil {\n\t\tdefer resp.Body.Close()\n\t}\n\treturn err\n}", "func (a *Client) V1Ping(params *V1PingParams) (*V1PingOK, error) {\n\t// TODO: Validate the params before sending\n\tif params == nil {\n\t\tparams = NewV1PingParams()\n\t}\n\n\tresult, err := a.transport.Submit(&runtime.ClientOperation{\n\t\tID: \"V1Ping\",\n\t\tMethod: \"GET\",\n\t\tPathPattern: \"/v1/ping\",\n\t\tProducesMediaTypes: []string{\"text/plain\"},\n\t\tConsumesMediaTypes: []string{\"text/plain\"},\n\t\tSchemes: []string{\"http\", \"https\"},\n\t\tParams: params,\n\t\tReader: &V1PingReader{formats: a.formats},\n\t\tContext: params.Context,\n\t\tClient: params.HTTPClient,\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tsuccess, ok := result.(*V1PingOK)\n\tif ok {\n\t\treturn success, nil\n\t}\n\t// unexpected success response\n\tunexpectedSuccess := result.(*V1PingDefault)\n\treturn nil, runtime.NewAPIError(\"unexpected success response: content available as default response in error\", unexpectedSuccess, unexpectedSuccess.Code())\n}", "func (p *Endpoint) Ping() error {\n\treturn p.send(p.session, p.encodePingCmd())\n}", "func (r *mutationResolver) Ping(ctx context.Context) (*string, error) {\n\tpanic(fmt.Errorf(\"not implemented\"))\n}", "func (r *RedisCache) Ping() error {\n\tif _, err := r.client.Ping().Result(); err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}", "func (s *Syncthing) WaitForPing(ctx context.Context, local bool) error {\n\tticker := time.NewTicker(300 * time.Millisecond)\n\tto := time.Now().Add(s.timeout)\n\n\toktetoLog.Infof(\"waiting for syncthing local=%t to be ready\", local)\n\tfor retries := 0; ; retries++ {\n\t\tselect {\n\t\tcase <-ticker.C:\n\t\t\tif s.Ping(ctx, local) {\n\t\t\t\treturn nil\n\t\t\t}\n\t\t\tif retries%5 == 0 {\n\t\t\t\toktetoLog.Infof(\"syncthing local=%t is not ready yet\", local)\n\t\t\t}\n\n\t\t\tif time.Now().After(to) && retries > 10 {\n\t\t\t\treturn fmt.Errorf(\"syncthing local=%t didn't respond after %s\", local, s.timeout.String())\n\t\t\t}\n\n\t\tcase <-ctx.Done():\n\t\t\toktetoLog.Infof(\"syncthing.WaitForPing cancelled local=%t\", local)\n\t\t\treturn ctx.Err()\n\t\t}\n\t}\n}", "func (a *Client) V1Ping(params *V1PingParams) (*V1PingOK, error) {\n\t// TODO: Validate the params before sending\n\tif params == nil {\n\t\tparams = NewV1PingParams()\n\t}\n\n\tresult, err := a.transport.Submit(&runtime.ClientOperation{\n\t\tID: \"V1Ping\",\n\t\tMethod: \"GET\",\n\t\tPathPattern: \"/v1/ping\",\n\t\tProducesMediaTypes: []string{\"text/plain\"},\n\t\tConsumesMediaTypes: []string{\"text/plain\"},\n\t\tSchemes: []string{\"http\", \"https\"},\n\t\tParams: params,\n\t\tReader: &V1PingReader{formats: a.formats},\n\t\tContext: params.Context,\n\t\tClient: params.HTTPClient,\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn result.(*V1PingOK), nil\n\n}", "func (p *McPinger) Ping() (*ServerInfo, error) {\n\tres, err := mcpinger.New(p.Host, p.Port).Ping()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &ServerInfo{\n\t\tProtocol: res.Version.Protocol,\n\t\tVersion: res.Version.Name,\n\t\tMaxPlayers: res.Players.Max,\n\t\tOnlinePlayers: res.Players.Online,\n\t}, nil\n}", "func (v *connection) Ping(ctx context.Context) error {\n\tstmt, err := v.PrepareContext(ctx, \"select 1 as test\")\n\tif err != nil {\n\t\treturn driver.ErrBadConn\n\t}\n\tdefer stmt.Close()\n\t// If we are preparing statements server side, successfully preparing verifies the connection\n\tif v.usePreparedStmts {\n\t\treturn nil\n\t}\n\tqueryContext := stmt.(driver.StmtQueryContext)\n\trows, err := queryContext.QueryContext(ctx, nil)\n\tif err != nil {\n\t\treturn driver.ErrBadConn\n\t}\n\tvar val interface{}\n\tif err := rows.Next([]driver.Value{val}); err != nil {\n\t\treturn driver.ErrBadConn\n\t}\n\trows.Close()\n\treturn nil\n}", "func (mc *mysqlConn) Ping() (e error) {\n\t// Send command\n\te = mc.writeCommandPacket(COM_PING)\n\tif e != nil {\n\t\treturn\n\t}\n\n\t// Read Result\n\te = mc.readResultOK()\n\treturn\n}", "func (a *Server) updateRemoteClusterStatus(ctx context.Context, netConfig types.ClusterNetworkingConfig, remoteCluster types.RemoteCluster) (updated bool, err error) {\n\tkeepAliveCountMax := netConfig.GetKeepAliveCountMax()\n\tkeepAliveInterval := netConfig.GetKeepAliveInterval()\n\n\t// fetch tunnel connections for the cluster to update runtime status\n\tconnections, err := a.GetTunnelConnections(remoteCluster.GetName())\n\tif err != nil {\n\t\treturn false, trace.Wrap(err)\n\t}\n\tlastConn, err := services.LatestTunnelConnection(connections)\n\tif err != nil {\n\t\tif !trace.IsNotFound(err) {\n\t\t\treturn false, trace.Wrap(err)\n\t\t}\n\t\t// No tunnel connections are known, mark the cluster offline (if it\n\t\t// wasn't already).\n\t\tif remoteCluster.GetConnectionStatus() != teleport.RemoteClusterStatusOffline {\n\t\t\tremoteCluster.SetConnectionStatus(teleport.RemoteClusterStatusOffline)\n\t\t\tif err := a.UpdateRemoteCluster(ctx, remoteCluster); err != nil {\n\t\t\t\t// if the cluster was concurrently updated, ignore the update. either\n\t\t\t\t// the update was consistent with our view of the world, in which case\n\t\t\t\t// retrying would be pointless, or the update was not consistent, in which\n\t\t\t\t// case we should prioritize presenting our view in an internally-consistent\n\t\t\t\t// manner rather than competing with another task.\n\t\t\t\tif !trace.IsCompareFailed(err) {\n\t\t\t\t\treturn false, trace.Wrap(err)\n\t\t\t\t}\n\t\t\t}\n\t\t\treturn true, nil\n\t\t}\n\t\treturn false, nil\n\t}\n\n\tofflineThreshold := time.Duration(keepAliveCountMax) * keepAliveInterval\n\ttunnelStatus := services.TunnelConnectionStatus(a.clock, lastConn, offlineThreshold)\n\n\t// Update remoteCluster based on lastConn. If anything changed, update it\n\t// in the backend too.\n\tprevConnectionStatus := remoteCluster.GetConnectionStatus()\n\tprevLastHeartbeat := remoteCluster.GetLastHeartbeat()\n\tremoteCluster.SetConnectionStatus(tunnelStatus)\n\t// Only bump LastHeartbeat if it's newer.\n\tif lastConn.GetLastHeartbeat().After(prevLastHeartbeat) {\n\t\tremoteCluster.SetLastHeartbeat(lastConn.GetLastHeartbeat().UTC())\n\t}\n\tif prevConnectionStatus != remoteCluster.GetConnectionStatus() || !prevLastHeartbeat.Equal(remoteCluster.GetLastHeartbeat()) {\n\t\tif err := a.UpdateRemoteCluster(ctx, remoteCluster); err != nil {\n\t\t\t// if the cluster was concurrently updated, ignore the update. either\n\t\t\t// the update was consistent with our view of the world, in which case\n\t\t\t// retrying would be pointless, or the update was not consistent, in which\n\t\t\t// case we should prioritize presenting our view in an internally-consistent\n\t\t\t// manner rather than competing with another task.\n\t\t\tif !trace.IsCompareFailed(err) {\n\t\t\t\treturn false, trace.Wrap(err)\n\t\t\t}\n\t\t}\n\t\treturn true, nil\n\t}\n\n\treturn false, nil\n}", "func (b *Backend) Ping(ctx context.Context, req *dashboard.Hello) (*dashboard.Pong, error) {\n\tif req.GetMessage() == \"Expect-Error\" {\n\t\treturn nil, newStatus(codes.Canceled, \"operation canceled because client sent Expect-Error\").err()\n\t}\n\treturn &dashboard.Pong{\n\t\tReply: req.GetMessage(),\n\t}, nil\n}", "func (s *RedisStore) ping() (bool, error) {\r\n\tconn := s.Pool.Get()\r\n\tdefer conn.Close()\r\n\tdata, err := conn.Do(\"PING\")\r\n\tif err != nil || data == nil {\r\n\t\treturn false, err\r\n\t}\r\n\treturn (data == \"PONG\"), nil\r\n}", "func (k *Kademlia) InternalPing(host net.IP, port uint16) (*Contact, error) {\n\tping := PingMessage{k.SelfContact, NewRandomID()}\n\tvar pong PongMessage\n\taddress := host.String() + \":\" + strconv.Itoa(int(port))\n\tpath := rpc.DefaultRPCPath + strconv.Itoa(int(port))\n\tclient, err := rpc.DialHTTPPath(\"tcp\", address, path)\n\tdefer client.Close()\n\tif err != nil {\n\t\treturn nil, &CommandFailed{\"HTTP Connect Error\"}\n\t}\n\t/*\n\t\tUse channel to decide time out\n\t*/\n\terrorChannel := make(chan error, 1)\n\tgo func() {\n\t\terrorChannel <- client.Call(\"KademliaRPC.Ping\", ping, &pong)\n\t}()\n\tselect {\n\tcase err := <-errorChannel:\n\t\tif err != nil {\n\t\t\tlog.Fatal(\"CallDoPing:\", err)\n\t\t\treturn nil, err\n\t\t}\n\tcase <-time.After(10 * time.Second):\n\t\treturn nil, &CommandFailed{\"Time Out\"}\n\t}\n\tif !ping.MsgID.Equals(pong.MsgID) {\n\t\treturn nil, &CommandFailed{\"Wrong MsgID\"}\n\t}\n\tresult := pong.Sender\n\treturn &result, nil\n}", "func (finux *Finux) Ping() (err error) {\n\terr = finux.do(\"ping\", http.MethodGet, nil, nil)\n\treturn\n}", "func (c *Client) Ping() bool {\n\turl := fmt.Sprintf(\"%sping\", c.URL)\n\t_, code, err := c.query(url)\n\tif err != nil {\n\t\treturn false\n\t}\n\tif code == 200 {\n\t\treturn true\n\t}\n\treturn false\n}", "func (conn *Conn) Ping(message string) { conn.Raw(PING + \" :\" + message) }", "func (pinger *PerpetualPinger) pingAsync(self phi.Task) {\n\tresponder := make(chan phi.Message, 1)\n\tok := pinger.ponger.Send(Ping{Responder: responder})\n\tif !ok {\n\t\tpanic(\"failed to send ping\")\n\t}\n\tgo func() {\n\t\tfor m := range responder {\n\t\t\tok := self.Send(m)\n\t\t\tif !ok {\n\t\t\t\tpanic(\"failed to receive pong\")\n\t\t\t}\n\t\t}\n\t}()\n}", "func (rcs *Service) pingLoop(done <-chan struct{}) {\n\tpingChan := make(chan *model.RemoteCluster, MaxConcurrentSends*2)\n\n\t// create a thread pool to send pings concurrently to remotes.\n\tfor i := 0; i < MaxConcurrentSends; i++ {\n\t\tgo rcs.pingEmitter(pingChan, done)\n\t}\n\n\tgo rcs.pingGenerator(pingChan, done)\n}", "func (s *SystemService) Ping() (*Ping, *Response, error) {\n\tu := \"/api/v1/system/ping\"\n\tv := new(Ping)\n\n\tresp, err := s.client.Call(\"GET\", u, nil, v)\n\treturn v, resp, err\n}", "func (s *Session) Ping() (rtt time.Duration, err error) {\n\tnow := time.Now()\n\tdeadline := time.NewTimer(s.config.KeepAliveTimeout)\n\tdefer deadline.Stop()\n\tsid, done := s.pings.new()\n\tdefer s.pings.close(sid)\n\tif err = s.sendFrame(typePing, flagSYN, sid, 0); err != nil {\n\t\treturn\n\t}\n\n\tselect {\n\tcase <-done:\n\t\trtt = time.Now().Sub(now)\n\tcase <-deadline.C:\n\t\terr = ErrTimeout\n\tcase <-s.done:\n\t\terr = ErrSessionClosed\n\t}\n\treturn\n}", "func connectRemote() {\n\taddress := *server + \":\" + *port\n\tfmt.Println(\"Attempting to connect to remote @\" + address)\n\tconn, err := net.Dial(\"tcp\", address)\n\tif err != nil {\n\t\tremoteError(err)\n\t}\n\tfmt.Println(\"Connected to remote @\" + address)\n\tRemoteServer = conn\n\n\t//TODO reply to the keep alive messages?\n\n}", "func (c *Connection) Ping(ctx context.Context) (time.Duration, error) {\n\tresp, err := c.Request(ctx).\n\t\tGet(\"/status\")\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\treturn resp.Time(), nil\n}", "func (c *Connection) ExecuteRemoteWithRetry(node, command string, printStdout bool, sleep, timeout time.Duration) error {\n\tctx, cancel := context.WithTimeout(context.Background(), timeout)\n\tdefer cancel()\n\tch := make(chan error)\n\tvar mostRecentExecuteRemoteWithRetryError error\n\tgo func() {\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase <-ctx.Done():\n\t\t\t\treturn\n\t\t\tdefault:\n\t\t\t\tch <- c.ExecuteRemote(node, command, printStdout)\n\t\t\t\ttime.Sleep(sleep)\n\t\t\t}\n\t\t}\n\t}()\n\tfor {\n\t\tselect {\n\t\tcase result := <-ch:\n\t\t\tmostRecentExecuteRemoteWithRetryError = result\n\t\t\tif mostRecentExecuteRemoteWithRetryError == nil {\n\t\t\t\treturn nil\n\t\t\t}\n\t\tcase <-ctx.Done():\n\t\t\treturn errors.Errorf(\"ExecuteRemoteWithRetry timed out: %s\\n\", mostRecentExecuteRemoteWithRetryError)\n\t\t}\n\t}\n}", "func (s *Server) Ping(context.Context, *empty.Empty) (*empty.Empty, error) {\n\treturn &empty.Empty{}, nil\n}", "func (client *RedisClient) Ping() (string, error) {\n\treturn client.redisdb.Ping().Result()\n\n}", "func (db *DB) Ping() (string, error) {\n\n\tvar response string\n\ttimeout := time.Second * 3\n\tconn, err := net.DialTimeout(\"tcp\", net.JoinHostPort(db.host, \"8093\"), timeout)\n\tif err != nil {\n\t\tresponse = fmt.Sprintf(\"Connection error %v\", err.Error())\n\t\treturn response, err\n\t}\n\tif conn != nil {\n\t\tdefer conn.Close()\n\t\tresponse = fmt.Sprintf(\"Connection successful to %v\", net.JoinHostPort(db.host, \"8093\"))\n\t}\n\n\treturn response, nil\n}", "func Remote(s *scen.Scenario, stdout, stderr io.Writer, addr string) (float64, error) {\n\treturn RemoteTimeout(s, stdout, stderr, addr, DefaultTimeout)\n}" ]
[ "0.5757455", "0.54401505", "0.54391694", "0.53251994", "0.532496", "0.53082156", "0.5287487", "0.523717", "0.52331144", "0.52265793", "0.5207792", "0.51742476", "0.5154612", "0.51186514", "0.5075052", "0.5067131", "0.5061404", "0.5061404", "0.5060334", "0.5044561", "0.5040924", "0.5012752", "0.4977211", "0.49763006", "0.49604002", "0.49545842", "0.49523723", "0.49487513", "0.494454", "0.49081075", "0.49066123", "0.49051332", "0.49046248", "0.4893044", "0.4870188", "0.48503295", "0.4834985", "0.48029485", "0.4802162", "0.4797744", "0.4762578", "0.47473636", "0.4736198", "0.47323424", "0.47290373", "0.47275618", "0.47163028", "0.47118956", "0.4703176", "0.4699182", "0.46937668", "0.4690463", "0.46898094", "0.4683221", "0.46807817", "0.46765766", "0.4670229", "0.46647322", "0.46589154", "0.4658597", "0.46570653", "0.46538764", "0.46485823", "0.4642321", "0.46398175", "0.46381566", "0.46368602", "0.46354073", "0.46345538", "0.46202245", "0.46058056", "0.45990893", "0.4596896", "0.45839512", "0.45827538", "0.45773202", "0.45760766", "0.45546854", "0.45384708", "0.45224774", "0.45071536", "0.44988784", "0.4491571", "0.44793984", "0.44783333", "0.44778267", "0.44750863", "0.44701833", "0.4467094", "0.44646034", "0.44613147", "0.44593832", "0.44564632", "0.44542238", "0.44504482", "0.44442722", "0.444365", "0.44432765", "0.44412458", "0.4441074" ]
0.7208622
0
IsUserInProtectBranchWhitelist returns true if given user is in the whitelist of a branch in a repository.
func IsUserInProtectBranchWhitelist(repoID, userID int64, branch string) bool { has, err := x.Where("repo_id = ?", repoID).And("user_id = ?", userID).And("name = ?", branch).Get(new(ProtectBranchWhitelist)) return has && err == nil }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (cfg *Config) inWhitelist(addr net.Addr) bool {\n\tif len(cfg.Whitelists) == 0 {\n\t\treturn false\n\t}\n\n\thost, _, err := net.SplitHostPort(addr.String())\n\tif err != nil {\n\t\tlog.Warnf(\"Unable to SplitHostPort on '%s': %v\", addr, err)\n\t\treturn false\n\t}\n\tip := net.ParseIP(host)\n\tif ip == nil {\n\t\tlog.Warnf(\"Unable to parse IP '%s'\", addr)\n\t\treturn false\n\t}\n\n\tfor _, ipnet := range cfg.Whitelists {\n\t\tif ipnet.Contains(ip) {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}", "func RefIsBranch(dir string, ref string, gitter Gitter) (bool, error) {\n\tremoteBranches, err := gitter.RemoteBranches(dir)\n\tif err != nil {\n\t\treturn false, errors.Wrapf(err, \"error getting remote branches to find provided ref %s\", ref)\n\t}\n\tfor _, b := range remoteBranches {\n\t\tif strings.Contains(b, ref) {\n\t\t\treturn true, nil\n\t\t}\n\t}\n\treturn false, nil\n}", "func isAllowedUser(request admissionctl.Request) bool {\n\tif utils.SliceContains(request.UserInfo.Username, allowedUsers) {\n\t\treturn true\n\t}\n\n\tfor _, group := range sreAdminGroups {\n\t\tif utils.SliceContains(group, request.UserInfo.Groups) {\n\t\t\treturn true\n\t\t}\n\t}\n\n\treturn false\n}", "func (_Consents *consentsCaller) IsAllowed(ctx context.Context, userId [8]byte, appName string, action uint8, dataType string) (bool, error) {\n\tvar (\n\t\tret0 = new(bool)\n\t)\n\tout := ret0\n\n\terr := _Consents.contract.Call(&bind.CallOpts{Context: ctx}, out, \"isAllowed\", userId, appName, action, dataType)\n\treturn *ret0, err\n}", "func (r ReferenceName) IsBranch() bool {\n\treturn strings.HasPrefix(string(r), refHeadPrefix)\n}", "func (_Token *TokenCaller) IsWhitelisted(opts *bind.CallOpts, account common.Address) (bool, error) {\n\tvar (\n\t\tret0 = new(bool)\n\t)\n\tout := ret0\n\terr := _Token.contract.Call(opts, out, \"isWhitelisted\", account)\n\treturn *ret0, err\n}", "func IsBranch(name string) bool {\n\treturn plumbing.ReferenceName(name).IsBranch()\n}", "func IsBranchInSync(branchName string) bool {\n\tif HasTrackingBranch(branchName) {\n\t\tlocalSha := GetBranchSha(branchName)\n\t\tremoteSha := GetBranchSha(GetTrackingBranchName(branchName))\n\t\treturn localSha == remoteSha\n\t}\n\treturn true\n}", "func (_Token *TokenCallerSession) IsWhitelisted(account common.Address) (bool, error) {\n\treturn _Token.Contract.IsWhitelisted(&_Token.CallOpts, account)\n}", "func UpdateOrgProtectBranch(repo *Repository, protectBranch *ProtectBranch, whitelistUserIDs, whitelistTeamIDs string) (err error) {\n\tif err = repo.GetOwner(); err != nil {\n\t\treturn fmt.Errorf(\"GetOwner: %v\", err)\n\t} else if !repo.Owner.IsOrganization() {\n\t\treturn fmt.Errorf(\"expect repository owner to be an organization\")\n\t}\n\n\thasUsersChanged := false\n\tvalidUserIDs := tool.StringsToInt64s(strings.Split(protectBranch.WhitelistUserIDs, \",\"))\n\tif protectBranch.WhitelistUserIDs != whitelistUserIDs {\n\t\thasUsersChanged = true\n\t\tuserIDs := tool.StringsToInt64s(strings.Split(whitelistUserIDs, \",\"))\n\t\tvalidUserIDs = make([]int64, 0, len(userIDs))\n\t\tfor _, userID := range userIDs {\n\t\t\tif !Perms.Authorize(context.TODO(), userID, repo.ID, AccessModeWrite,\n\t\t\t\tAccessModeOptions{\n\t\t\t\t\tOwnerID: repo.OwnerID,\n\t\t\t\t\tPrivate: repo.IsPrivate,\n\t\t\t\t},\n\t\t\t) {\n\t\t\t\tcontinue // Drop invalid user ID\n\t\t\t}\n\n\t\t\tvalidUserIDs = append(validUserIDs, userID)\n\t\t}\n\n\t\tprotectBranch.WhitelistUserIDs = strings.Join(tool.Int64sToStrings(validUserIDs), \",\")\n\t}\n\n\thasTeamsChanged := false\n\tvalidTeamIDs := tool.StringsToInt64s(strings.Split(protectBranch.WhitelistTeamIDs, \",\"))\n\tif protectBranch.WhitelistTeamIDs != whitelistTeamIDs {\n\t\thasTeamsChanged = true\n\t\tteamIDs := tool.StringsToInt64s(strings.Split(whitelistTeamIDs, \",\"))\n\t\tteams, err := GetTeamsHaveAccessToRepo(repo.OwnerID, repo.ID, AccessModeWrite)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"GetTeamsHaveAccessToRepo [org_id: %d, repo_id: %d]: %v\", repo.OwnerID, repo.ID, err)\n\t\t}\n\t\tvalidTeamIDs = make([]int64, 0, len(teams))\n\t\tfor i := range teams {\n\t\t\tif teams[i].HasWriteAccess() && com.IsSliceContainsInt64(teamIDs, teams[i].ID) {\n\t\t\t\tvalidTeamIDs = append(validTeamIDs, teams[i].ID)\n\t\t\t}\n\t\t}\n\n\t\tprotectBranch.WhitelistTeamIDs = strings.Join(tool.Int64sToStrings(validTeamIDs), \",\")\n\t}\n\n\t// Make sure protectBranch.ID is not 0 for whitelists\n\tif protectBranch.ID == 0 {\n\t\tif _, err = x.Insert(protectBranch); err != nil {\n\t\t\treturn fmt.Errorf(\"Insert: %v\", err)\n\t\t}\n\t}\n\n\t// Merge users and members of teams\n\tvar whitelists []*ProtectBranchWhitelist\n\tif hasUsersChanged || hasTeamsChanged {\n\t\tmergedUserIDs := make(map[int64]bool)\n\t\tfor _, userID := range validUserIDs {\n\t\t\t// Empty whitelist users can cause an ID with 0\n\t\t\tif userID != 0 {\n\t\t\t\tmergedUserIDs[userID] = true\n\t\t\t}\n\t\t}\n\n\t\tfor _, teamID := range validTeamIDs {\n\t\t\tmembers, err := GetTeamMembers(teamID)\n\t\t\tif err != nil {\n\t\t\t\treturn fmt.Errorf(\"GetTeamMembers [team_id: %d]: %v\", teamID, err)\n\t\t\t}\n\n\t\t\tfor i := range members {\n\t\t\t\tmergedUserIDs[members[i].ID] = true\n\t\t\t}\n\t\t}\n\n\t\twhitelists = make([]*ProtectBranchWhitelist, 0, len(mergedUserIDs))\n\t\tfor userID := range mergedUserIDs {\n\t\t\twhitelists = append(whitelists, &ProtectBranchWhitelist{\n\t\t\t\tProtectBranchID: protectBranch.ID,\n\t\t\t\tRepoID: repo.ID,\n\t\t\t\tName: protectBranch.Name,\n\t\t\t\tUserID: userID,\n\t\t\t})\n\t\t}\n\t}\n\n\tsess := x.NewSession()\n\tdefer sess.Close()\n\tif err = sess.Begin(); err != nil {\n\t\treturn err\n\t}\n\n\tif _, err = sess.ID(protectBranch.ID).AllCols().Update(protectBranch); err != nil {\n\t\treturn fmt.Errorf(\"Update: %v\", err)\n\t}\n\n\t// Refresh whitelists\n\tif hasUsersChanged || hasTeamsChanged {\n\t\tif _, err = sess.Delete(&ProtectBranchWhitelist{ProtectBranchID: protectBranch.ID}); err != nil {\n\t\t\treturn fmt.Errorf(\"delete old protect branch whitelists: %v\", err)\n\t\t} else if _, err = sess.Insert(whitelists); err != nil {\n\t\t\treturn fmt.Errorf(\"insert new protect branch whitelists: %v\", err)\n\t\t}\n\t}\n\n\treturn sess.Commit()\n}", "func IsBranchOfRepoRequirePullRequest(repoID int64, name string) bool {\n\tprotectBranch, err := GetProtectBranchOfRepoByName(repoID, name)\n\tif err != nil {\n\t\treturn false\n\t}\n\treturn protectBranch.Protected && protectBranch.RequirePullRequest\n}", "func IsUserAuthorizedForProjectTree(user *auth.User, projectSFID string) bool {\n\t// Previously, we checked for user.Admin - admins should be in a separate role\n\t// Previously, we checked for user.Allowed, which is currently not used (future flag that is currently not implemented)\n\treturn user.IsUserAuthorized(auth.Project, projectSFID, true)\n}", "func isWhitelisted(ip net.IP, whitelist []*net.IPNet) bool {\n\tfor _, network := range whitelist {\n\t\tif network.Contains(ip) {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}", "func (a *ACLs) IsAllowed(c context.Context, host, project string) (bool, error) {\n\t// Convert Gerrit to Git hosts.\n\tif strings.HasSuffix(host, \"-review.googlesource.com\") {\n\t\thost = strings.TrimSuffix(host, \"-review.googlesource.com\") + \".googlesource.com\"\n\t}\n\thacls, configured := a.hosts[host]\n\tif !configured {\n\t\treturn false, nil\n\t}\n\tif pacls, projKnown := hacls.projects[project]; projKnown {\n\t\treturn a.belongsTo(c, hacls.readers, pacls.readers)\n\t}\n\treturn a.belongsTo(c, hacls.readers)\n}", "func (k Keeper) IsUserBlocked(ctx sdk.Context, blocker, blocked, subspace string) bool {\n\treturn k.rk.HasUserBlocked(ctx, blocker, blocked, subspace)\n}", "func (_Token *TokenSession) IsWhitelisted(account common.Address) (bool, error) {\n\treturn _Token.Contract.IsWhitelisted(&_Token.CallOpts, account)\n}", "func (r *RepoAllowlistChecker) IsAllowlisted(repoFullName string, vcsHostname string) bool {\n\tcandidate := fmt.Sprintf(\"%s/%s\", vcsHostname, repoFullName)\n\tshouldInclude := r.matchesAtLeastOneRule(r.includeRules, candidate)\n\tshouldOmit := r.matchesAtLeastOneRule(r.omitRules, candidate)\n\treturn shouldInclude && !shouldOmit\n}", "func (p GithubRepoHost) AddBranchProtection(repoID string) (BranchProtectionRule, error) {\n\tif isDebug() {\n\t\tfmt.Printf(\"Adding branch protection on %s\\n\", repoID)\n\t}\n\n\trules := fetchBranchProtectionRules()\n\tinput := githubv4.CreateBranchProtectionRuleInput{\n\t\tRepositoryID: repoID,\n\t\tPattern: *githubv4.NewString(githubv4.String(rules.Pattern)),\n\t\tDismissesStaleReviews: githubv4.NewBoolean(githubv4.Boolean(rules.DismissesStaleReviews)),\n\t\tIsAdminEnforced: githubv4.NewBoolean(githubv4.Boolean(rules.IsAdminEnforced)),\n\t\tRequiresApprovingReviews: githubv4.NewBoolean(githubv4.Boolean(rules.RequiresApprovingReviews)),\n\t\tRequiredApprovingReviewCount: githubv4.NewInt(githubv4.Int(rules.RequiredApprovingReviewCount)),\n\t\tRequiresStatusChecks: githubv4.NewBoolean(githubv4.Boolean(rules.RequiresStatusChecks)),\n\t}\n\n\tchecks := make([]githubv4.String, len(rules.RequiredStatusCheckContexts))\n\tfor i, name := range rules.RequiredStatusCheckContexts {\n\t\tchecks[i] = *githubv4.NewString(githubv4.String(name))\n\t}\n\tinput.RequiredStatusCheckContexts = &checks\n\n\tvar m CreateRuleMutation\n\tclient := buildClient()\n\terr := client.Mutate(context.Background(), &m, input, nil)\n\treturn m.CreateBranchProtectionRule.BranchProtectionRule, err\n}", "func CheckWhitelist(ipString string, whitelistedCountry []string) (bool, error) {\n\twhitelisted := false\n\tcountry, err := GetCountryData(ipString)\n\tif err != nil {\n\t\tLog(log.ErrorLevel, err.Error(), flag.Lookup(\"test.v\") == nil)\n\t\treturn whitelisted, err\n\t}\n\tfor _, v := range whitelistedCountry {\n\t\tif strings.ToUpper(v) == strings.ToUpper(country.Name) {\n\t\t\twhitelisted = true\n\t\t}\n\t}\n\treturn whitelisted, nil\n}", "func CheckUseNodesPermForUser(businessID string, user string, nodes []string) bool {\n\tbizID, err := strconv.Atoi(businessID)\n\tif err != nil {\n\t\terrMsg := fmt.Errorf(\"strconv BusinessID to int failed: %v\", err)\n\t\tblog.Errorf(errMsg.Error())\n\t\treturn false\n\t}\n\treturn canUseHosts(bizID, user, nodes)\n}", "func (v *VictimBase) IsWhitelisted() bool {\n\tif config.WhitelistEnabled() {\n\t\twhitelist := config.WhitelistedNamespaces()\n\t\treturn whitelist.Has(v.namespace)\n\t}\n\treturn true\n}", "func (v *VictimBase) IsWhitelisted() bool {\n\tif config.WhitelistEnabled() {\n\t\twhitelist := config.WhitelistedNamespaces()\n\t\treturn whitelist.Has(v.namespace)\n\t}\n\treturn true\n}", "func (f fileauth) IsUserAuthorizedForScope(user string, scope string) bool {\n\tscanner := bufio.NewScanner(f.fh)\n\tfor scanner.Scan() {\n\t\tline := scanner.Text()\n\n\t\tif strings.Contains(line, user) {\n\t\t\tuserline := strings.SplitN(line, \":\", 3)\n\t\t\tscopes := userline[2]\n\t\t\tfor _, s := range strings.Split(scopes, \",\") {\n\t\t\t\tif strings.Compare(s, scope) == 0 {\n\t\t\t\t\treturn true\n\t\t\t\t}\n\t\t\t}\n\n\t\t}\n\t}\n\n\treturn false\n}", "func HasBranch(branchName string) bool {\n\tfor _, line := range strings.Split(command.New(\"git\", \"branch\", \"-a\").Output(), \"\\n\") {\n\t\tline = strings.Trim(line, \"* \")\n\t\tline = strings.TrimSpace(line)\n\t\tline = strings.Replace(line, \"remotes/origin/\", \"\", 1)\n\t\tif line == branchName {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}", "func (bp *Breakpoint) IsUser() bool {\n\treturn bp.Kind&UserBreakpoint != 0\n}", "func (k Keeper) IsUserBlocked(ctx sdk.Context, blocker, blocked sdk.AccAddress) bool {\n\tblockedUsers := k.GetUserBlocks(ctx, blocker)\n\tfor _, user := range blockedUsers {\n\t\tif user.Blocked.Equals(blocked) {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}", "func (i Instruction) IsBranch() bool {\n\tif i.Opcode == \"CALL\" {\n\t\treturn false\n\t}\n\tfor _, f := range i.Forms {\n\t\tfor _, op := range f.Operands {\n\t\t\tif strings.HasPrefix(op.Type, \"rel\") {\n\t\t\t\treturn true\n\t\t\t}\n\t\t}\n\t}\n\treturn false\n}", "func DeployWhitelist(auth *bind.TransactOpts, backend bind.ContractBackend, _factory common.Address) (common.Address, *types.Transaction, *Whitelist, error) {\n\tparsed, err := abi.JSON(strings.NewReader(WhitelistABI))\n\tif err != nil {\n\t\treturn common.Address{}, nil, nil, err\n\t}\n\taddress, tx, contract, err := bind.DeployContract(auth, parsed, common.FromHex(WhitelistBin), backend, _factory)\n\tif err != nil {\n\t\treturn common.Address{}, nil, nil, err\n\t}\n\treturn address, tx, &Whitelist{WhitelistCaller: WhitelistCaller{contract: contract}, WhitelistTransactor: WhitelistTransactor{contract: contract}}, nil\n}", "func GetBranchProtection(ctx *context.APIContext) {\n\t// swagger:operation GET /repos/{owner}/{repo}/branch_protections/{name} repository repoGetBranchProtection\n\t// ---\n\t// summary: Get a specific branch protection for the repository\n\t// produces:\n\t// - application/json\n\t// parameters:\n\t// - name: owner\n\t// in: path\n\t// description: owner of the repo\n\t// type: string\n\t// required: true\n\t// - name: repo\n\t// in: path\n\t// description: name of the repo\n\t// type: string\n\t// required: true\n\t// - name: name\n\t// in: path\n\t// description: name of protected branch\n\t// type: string\n\t// required: true\n\t// responses:\n\t// \"200\":\n\t// \"$ref\": \"#/responses/BranchProtection\"\n\t// \"404\":\n\t// \"$ref\": \"#/responses/notFound\"\n\n\trepo := ctx.Repo.Repository\n\tbpName := ctx.Params(\":name\")\n\tbp, err := git_model.GetProtectedBranchRuleByName(ctx, repo.ID, bpName)\n\tif err != nil {\n\t\tctx.Error(http.StatusInternalServerError, \"GetProtectedBranchByID\", err)\n\t\treturn\n\t}\n\tif bp == nil || bp.RepoID != repo.ID {\n\t\tctx.NotFound()\n\t\treturn\n\t}\n\n\tctx.JSON(http.StatusOK, convert.ToBranchProtection(bp))\n}", "func (config *Config) IsVendorAllowed(vendorName string) bool {\n\t// vendor whitelist is empty, allow everything\n\tif len(config.VendorWhitelist) == 0 {\n\t\treturn true\n\t}\n\n\t// vendor whitelist is enabled, only allow specified vendors\n\tfor _, name := range config.VendorWhitelist {\n\t\tif name == vendorName {\n\t\t\treturn true\n\t\t}\n\t}\n\n\treturn false\n}", "func IsUserAuthorizedForProjectOrganizationTree(user *auth.User, projectSFID, companySFID string) bool {\n\t// Previously, we checked for user.Admin - admins should be in a separate role\n\t// Previously, we checked for user.Allowed, which is currently not used (future flag that is currently not implemented)\n\treturn user.IsUserAuthorized(auth.ProjectOrganization, projectSFID+\"|\"+companySFID, true)\n}", "func (gt GtwyMgr) IsPermitted(ctx context.Context, appcontext, remoteAddress string) (bool, error) {\n\tif EnvDebugOn {\n\t\tlblog.LogEvent(\"GtwyMgr\", \"IsPermitted\", \"info\", \"start\")\n\t}\n\n\t//check the approval list\n\tq := datastore.NewQuery(gt.bc.GetConfigValue(ctx, \"EnvGtwayDsKind\")).\n\t\tNamespace(gt.bc.GetConfigValue(ctx, \"EnvGtwayDsNamespace\")).\n\t\tFilter(\"appcontext =\", appcontext).\n\t\tFilter(\"remoteaddress =\", remoteAddress).\n\t\tKeysOnly()\n\n\t//get the count\n\tn, err := gt.ds.Count(ctx, q)\n\t//if there was an error return it and false\n\tif err != nil {\n\t\tif err != datastore.ErrNoSuchEntity {\n\t\t\treturn false, err\n\t\t}\n\t\treturn false, nil\n\t}\n\n\t//return false if the count was zero\n\tif n == 0 {\n\t\treturn false, nil\n\t}\n\n\tif EnvDebugOn {\n\t\tlblog.LogEvent(\"GtwyMgr\", \"IsPermitted\", \"info\", strconv.Itoa(n))\n\t\tlblog.LogEvent(\"GtwyMgr\", \"IsPermitted\", \"info\", \"end\")\n\t}\n\n\t//otherwise the address is valid\n\treturn true, nil\n}", "func (e PolicyEntity) IsUser() bool {\n\treturn e.Type == entityTypeUser\n}", "func (p *OAuthProxy) IsWhitelistedRequest(req *http.Request) bool {\n\tif p.upstreamConfig.SkipAuthPreflight && req.Method == \"OPTIONS\" {\n\t\treturn true\n\t}\n\n\tfor _, re := range p.upstreamConfig.SkipAuthCompiledRegex {\n\t\tif re.MatchString(req.URL.Path) {\n\t\t\t// This upstream has a matching skip auth regex\n\t\t\treturn true\n\t\t}\n\t}\n\n\treturn false\n}", "func (white *Whitelist) Allowed(item string) bool {\n\tif white.Size() == 0 {\n\t\tif white.strict {\n\t\t\t// in strict mode, we disallow everything when the list is empty\n\t\t\treturn false\n\t\t}\n\n\t\t// by default, we allow everything when the list is empty\n\t\treturn true\n\t}\n\n\treturn white.Has(item)\n}", "func ListBranchProtections(ctx *context.APIContext) {\n\t// swagger:operation GET /repos/{owner}/{repo}/branch_protections repository repoListBranchProtection\n\t// ---\n\t// summary: List branch protections for a repository\n\t// produces:\n\t// - application/json\n\t// parameters:\n\t// - name: owner\n\t// in: path\n\t// description: owner of the repo\n\t// type: string\n\t// required: true\n\t// - name: repo\n\t// in: path\n\t// description: name of the repo\n\t// type: string\n\t// required: true\n\t// responses:\n\t// \"200\":\n\t// \"$ref\": \"#/responses/BranchProtectionList\"\n\n\trepo := ctx.Repo.Repository\n\tbps, err := git_model.FindRepoProtectedBranchRules(ctx, repo.ID)\n\tif err != nil {\n\t\tctx.Error(http.StatusInternalServerError, \"GetProtectedBranches\", err)\n\t\treturn\n\t}\n\tapiBps := make([]*api.BranchProtection, len(bps))\n\tfor i := range bps {\n\t\tapiBps[i] = convert.ToBranchProtection(bps[i])\n\t}\n\n\tctx.JSON(http.StatusOK, apiBps)\n}", "func (n *Node) IsAllowed() bool {\n\tif !n.NodeAllow.Check(n.Nodestr) && n.NodeDeny.Check(n.Nodestr) {\n\t\treturn false\n\t}\n\treturn true\n}", "func (ctx *serverRequestContextImpl) CanManageUser(user user.User) error {\n\tuserAff := strings.Join(user.GetAffiliationPath(), \".\")\n\terr := ctx.ContainsAffiliation(userAff)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tuserType := user.GetType()\n\terr = ctx.CanActOnType(userType)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}", "func (svc *ServiceDefinition) IsRoleWhitelistEnabled() bool {\n\treturn len(svc.DefaultRoleWhitelist) > 0\n}", "func IsAllowed(ctx context.Context, req *http.Request, subject string, aclContext AccessContext) error {\n\twarden := ctx.Value(ladonWardenKey)\n\n\tif warden == nil {\n\t\treturn fmt.Errorf(\"not ACL protected\")\n\t}\n\n\tladonWarden, ok := warden.(ladon.Warden)\n\tif !ok {\n\t\treturn fmt.Errorf(\"warden is not ladon.Warden\")\n\t}\n\n\treturn ladonWarden.IsAllowed(toLadonRequest(req, subject, aclContext))\n}", "func (ctx *Context) IsUserRepoAdmin() bool {\n\treturn ctx.Repo.IsAdmin()\n}", "func isAuthorizedToDeploy(ctx coretypes.Sandbox) bool {\n\tif ctx.Caller() == ctx.ChainOwnerID() {\n\t\t// chain owner is always authorized\n\t\treturn true\n\t}\n\tif !ctx.Caller().IsAddress() {\n\t\t// smart contract from the same chain is always authorize\n\t\treturn ctx.Caller().MustContractID().ChainID() == ctx.ContractID().ChainID()\n\t}\n\treturn collections.NewMap(ctx.State(), VarDeployPermissions).MustHasAt(ctx.Caller().Bytes())\n}", "func (c *client) GetBranchProtection(org, repo, branch string) (*BranchProtection, error) {\n\tdurationLogger := c.log(\"GetBranchProtection\", org, repo, branch)\n\tdefer durationLogger()\n\n\tcode, body, err := c.requestRaw(&request{\n\t\tmethod: http.MethodGet,\n\t\tpath: fmt.Sprintf(\"/repos/%s/%s/branches/%s/protection\", org, repo, branch),\n\t\torg: org,\n\t\t// GitHub returns 404 for this call if either:\n\t\t// - The branch is not protected\n\t\t// - The access token used does not have sufficient privileges\n\t\t// We therefore need to introspect the response body.\n\t\texitCodes: []int{200, 404},\n\t})\n\n\tswitch {\n\tcase err != nil:\n\t\treturn nil, err\n\tcase code == 200:\n\t\tvar bp BranchProtection\n\t\tif err := json.Unmarshal(body, &bp); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn &bp, nil\n\tcase code == 404:\n\t\t// continue\n\tdefault:\n\t\treturn nil, fmt.Errorf(\"unexpected status code: %d\", code)\n\t}\n\n\tvar ge githubError\n\tif err := json.Unmarshal(body, &ge); err != nil {\n\t\treturn nil, err\n\t}\n\n\t// If the error was because the branch is not protected, we return a\n\t// nil pointer to indicate this.\n\tif ge.Message == \"Branch not protected\" {\n\t\treturn nil, nil\n\t}\n\n\t// Otherwise we got some other 404 error.\n\treturn nil, fmt.Errorf(\"getting branch protection 404: %s\", ge.Message)\n}", "func CreateBranchProtection(ctx *context.APIContext) {\n\t// swagger:operation POST /repos/{owner}/{repo}/branch_protections repository repoCreateBranchProtection\n\t// ---\n\t// summary: Create a branch protections for a repository\n\t// consumes:\n\t// - application/json\n\t// produces:\n\t// - application/json\n\t// parameters:\n\t// - name: owner\n\t// in: path\n\t// description: owner of the repo\n\t// type: string\n\t// required: true\n\t// - name: repo\n\t// in: path\n\t// description: name of the repo\n\t// type: string\n\t// required: true\n\t// - name: body\n\t// in: body\n\t// schema:\n\t// \"$ref\": \"#/definitions/CreateBranchProtectionOption\"\n\t// responses:\n\t// \"201\":\n\t// \"$ref\": \"#/responses/BranchProtection\"\n\t// \"403\":\n\t// \"$ref\": \"#/responses/forbidden\"\n\t// \"404\":\n\t// \"$ref\": \"#/responses/notFound\"\n\t// \"422\":\n\t// \"$ref\": \"#/responses/validationError\"\n\n\tform := web.GetForm(ctx).(*api.CreateBranchProtectionOption)\n\trepo := ctx.Repo.Repository\n\n\truleName := form.RuleName\n\tif ruleName == \"\" {\n\t\truleName = form.BranchName //nolint\n\t}\n\tif len(ruleName) == 0 {\n\t\tctx.Error(http.StatusBadRequest, \"both rule_name and branch_name are empty\", \"both rule_name and branch_name are empty\")\n\t\treturn\n\t}\n\n\tisPlainRule := !git_model.IsRuleNameSpecial(ruleName)\n\tvar isBranchExist bool\n\tif isPlainRule {\n\t\tisBranchExist = git.IsBranchExist(ctx.Req.Context(), ctx.Repo.Repository.RepoPath(), ruleName)\n\t}\n\n\tprotectBranch, err := git_model.GetProtectedBranchRuleByName(ctx, repo.ID, ruleName)\n\tif err != nil {\n\t\tctx.Error(http.StatusInternalServerError, \"GetProtectBranchOfRepoByName\", err)\n\t\treturn\n\t} else if protectBranch != nil {\n\t\tctx.Error(http.StatusForbidden, \"Create branch protection\", \"Branch protection already exist\")\n\t\treturn\n\t}\n\n\tvar requiredApprovals int64\n\tif form.RequiredApprovals > 0 {\n\t\trequiredApprovals = form.RequiredApprovals\n\t}\n\n\twhitelistUsers, err := user_model.GetUserIDsByNames(ctx, form.PushWhitelistUsernames, false)\n\tif err != nil {\n\t\tif user_model.IsErrUserNotExist(err) {\n\t\t\tctx.Error(http.StatusUnprocessableEntity, \"User does not exist\", err)\n\t\t\treturn\n\t\t}\n\t\tctx.Error(http.StatusInternalServerError, \"GetUserIDsByNames\", err)\n\t\treturn\n\t}\n\tmergeWhitelistUsers, err := user_model.GetUserIDsByNames(ctx, form.MergeWhitelistUsernames, false)\n\tif err != nil {\n\t\tif user_model.IsErrUserNotExist(err) {\n\t\t\tctx.Error(http.StatusUnprocessableEntity, \"User does not exist\", err)\n\t\t\treturn\n\t\t}\n\t\tctx.Error(http.StatusInternalServerError, \"GetUserIDsByNames\", err)\n\t\treturn\n\t}\n\tapprovalsWhitelistUsers, err := user_model.GetUserIDsByNames(ctx, form.ApprovalsWhitelistUsernames, false)\n\tif err != nil {\n\t\tif user_model.IsErrUserNotExist(err) {\n\t\t\tctx.Error(http.StatusUnprocessableEntity, \"User does not exist\", err)\n\t\t\treturn\n\t\t}\n\t\tctx.Error(http.StatusInternalServerError, \"GetUserIDsByNames\", err)\n\t\treturn\n\t}\n\tvar whitelistTeams, mergeWhitelistTeams, approvalsWhitelistTeams []int64\n\tif repo.Owner.IsOrganization() {\n\t\twhitelistTeams, err = organization.GetTeamIDsByNames(repo.OwnerID, form.PushWhitelistTeams, false)\n\t\tif err != nil {\n\t\t\tif organization.IsErrTeamNotExist(err) {\n\t\t\t\tctx.Error(http.StatusUnprocessableEntity, \"Team does not exist\", err)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tctx.Error(http.StatusInternalServerError, \"GetTeamIDsByNames\", err)\n\t\t\treturn\n\t\t}\n\t\tmergeWhitelistTeams, err = organization.GetTeamIDsByNames(repo.OwnerID, form.MergeWhitelistTeams, false)\n\t\tif err != nil {\n\t\t\tif organization.IsErrTeamNotExist(err) {\n\t\t\t\tctx.Error(http.StatusUnprocessableEntity, \"Team does not exist\", err)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tctx.Error(http.StatusInternalServerError, \"GetTeamIDsByNames\", err)\n\t\t\treturn\n\t\t}\n\t\tapprovalsWhitelistTeams, err = organization.GetTeamIDsByNames(repo.OwnerID, form.ApprovalsWhitelistTeams, false)\n\t\tif err != nil {\n\t\t\tif organization.IsErrTeamNotExist(err) {\n\t\t\t\tctx.Error(http.StatusUnprocessableEntity, \"Team does not exist\", err)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tctx.Error(http.StatusInternalServerError, \"GetTeamIDsByNames\", err)\n\t\t\treturn\n\t\t}\n\t}\n\n\tprotectBranch = &git_model.ProtectedBranch{\n\t\tRepoID: ctx.Repo.Repository.ID,\n\t\tRuleName: ruleName,\n\t\tCanPush: form.EnablePush,\n\t\tEnableWhitelist: form.EnablePush && form.EnablePushWhitelist,\n\t\tEnableMergeWhitelist: form.EnableMergeWhitelist,\n\t\tWhitelistDeployKeys: form.EnablePush && form.EnablePushWhitelist && form.PushWhitelistDeployKeys,\n\t\tEnableStatusCheck: form.EnableStatusCheck,\n\t\tStatusCheckContexts: form.StatusCheckContexts,\n\t\tEnableApprovalsWhitelist: form.EnableApprovalsWhitelist,\n\t\tRequiredApprovals: requiredApprovals,\n\t\tBlockOnRejectedReviews: form.BlockOnRejectedReviews,\n\t\tBlockOnOfficialReviewRequests: form.BlockOnOfficialReviewRequests,\n\t\tDismissStaleApprovals: form.DismissStaleApprovals,\n\t\tRequireSignedCommits: form.RequireSignedCommits,\n\t\tProtectedFilePatterns: form.ProtectedFilePatterns,\n\t\tUnprotectedFilePatterns: form.UnprotectedFilePatterns,\n\t\tBlockOnOutdatedBranch: form.BlockOnOutdatedBranch,\n\t}\n\n\terr = git_model.UpdateProtectBranch(ctx, ctx.Repo.Repository, protectBranch, git_model.WhitelistOptions{\n\t\tUserIDs: whitelistUsers,\n\t\tTeamIDs: whitelistTeams,\n\t\tMergeUserIDs: mergeWhitelistUsers,\n\t\tMergeTeamIDs: mergeWhitelistTeams,\n\t\tApprovalsUserIDs: approvalsWhitelistUsers,\n\t\tApprovalsTeamIDs: approvalsWhitelistTeams,\n\t})\n\tif err != nil {\n\t\tctx.Error(http.StatusInternalServerError, \"UpdateProtectBranch\", err)\n\t\treturn\n\t}\n\n\tif isBranchExist {\n\t\tif err = pull_service.CheckPRsForBaseBranch(ctx, ctx.Repo.Repository, ruleName); err != nil {\n\t\t\tctx.Error(http.StatusInternalServerError, \"CheckPRsForBaseBranch\", err)\n\t\t\treturn\n\t\t}\n\t} else {\n\t\tif !isPlainRule {\n\t\t\tif ctx.Repo.GitRepo == nil {\n\t\t\t\tctx.Repo.GitRepo, err = git.OpenRepository(ctx, ctx.Repo.Repository.RepoPath())\n\t\t\t\tif err != nil {\n\t\t\t\t\tctx.Error(http.StatusInternalServerError, \"OpenRepository\", err)\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\tdefer func() {\n\t\t\t\t\tctx.Repo.GitRepo.Close()\n\t\t\t\t\tctx.Repo.GitRepo = nil\n\t\t\t\t}()\n\t\t\t}\n\t\t\t// FIXME: since we only need to recheck files protected rules, we could improve this\n\t\t\tmatchedBranches, err := git_model.FindAllMatchedBranches(ctx, ctx.Repo.Repository.ID, ruleName)\n\t\t\tif err != nil {\n\t\t\t\tctx.Error(http.StatusInternalServerError, \"FindAllMatchedBranches\", err)\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tfor _, branchName := range matchedBranches {\n\t\t\t\tif err = pull_service.CheckPRsForBaseBranch(ctx, ctx.Repo.Repository, branchName); err != nil {\n\t\t\t\t\tctx.Error(http.StatusInternalServerError, \"CheckPRsForBaseBranch\", err)\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\n\t// Reload from db to get all whitelists\n\tbp, err := git_model.GetProtectedBranchRuleByName(ctx, ctx.Repo.Repository.ID, ruleName)\n\tif err != nil {\n\t\tctx.Error(http.StatusInternalServerError, \"GetProtectedBranchByID\", err)\n\t\treturn\n\t}\n\tif bp == nil || bp.RepoID != ctx.Repo.Repository.ID {\n\t\tctx.Error(http.StatusInternalServerError, \"New branch protection not found\", err)\n\t\treturn\n\t}\n\n\tctx.JSON(http.StatusCreated, convert.ToBranchProtection(bp))\n}", "func NewBranchProtection(ctx *pulumi.Context,\n\tname string, args *BranchProtectionArgs, opts ...pulumi.ResourceOption) (*BranchProtection, error) {\n\tif args == nil {\n\t\treturn nil, errors.New(\"missing one or more required arguments\")\n\t}\n\n\tif args.Branch == nil {\n\t\treturn nil, errors.New(\"invalid value for required argument 'Branch'\")\n\t}\n\tif args.Project == nil {\n\t\treturn nil, errors.New(\"invalid value for required argument 'Project'\")\n\t}\n\topts = internal.PkgResourceDefaultOpts(opts)\n\tvar resource BranchProtection\n\terr := ctx.RegisterResource(\"gitlab:index/branchProtection:BranchProtection\", name, args, &resource, opts...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &resource, nil\n}", "func (s *Setting) InBlacklist(remote string) bool {\n\th, _, err := net.SplitHostPort(remote)\n\tif err == nil {\n\t\tremote = h\n\t}\n\tfor _, ip := range s.Blacklists {\n\t\tif ip == remote {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}", "func (db *Database) IsUserBanned(username string, ip string) (bool, error) {\n\trow := db.db.QueryRow(`\n\t\tSELECT banned FROM melodious.accounts WHERE username=$1 OR ip=$2;\n\t`, username, ip)\n\tvar banned bool\n\terr := row.Scan(&banned)\n\tif err == sql.ErrNoRows {\n\t\treturn false, nil\n\t} else if err != nil {\n\t\treturn false, err\n\t}\n\treturn banned, nil\n}", "func IsGroupIsUserGroupOrWhitelisted(name string, whitelist ...string) bool {\n\t// check whitelist of groups\n\tfor _, el := range whitelist {\n\t\tif el == name {\n\t\t\treturn true\n\t\t}\n\t}\n\n\tgroup, err := user.LookupGroup(name)\n\tif err != nil {\n\t\treturn false // fail on lookup error\n\t}\n\n\tgid, err := strconv.ParseUint(group.Gid, 10, 32)\n\tif err != nil {\n\t\treturn false // fail on parse error\n\t}\n\n\tminGID, maxGiD := ReadUserGIDRange(LoginDefsPath)\n\n\tif gid < minGID {\n\t\treturn false // group not in lower range\n\t}\n\n\tif gid > maxGiD {\n\t\treturn false // group not in upper range\n\t}\n\n\treturn true\n}", "func IsBranchOp(op string) bool {\n\tswitch op {\n\tcase JMP, BEQ, BNE, BLT, BLE, BGT, BGE:\n\t\treturn true\n\t}\n\treturn false\n}", "func (_Consents *consentsCaller) IsAllowedAt(ctx context.Context, userId [8]byte, appName string, action uint8, dataType string, blockNumber *big.Int) (bool, error) {\n\tvar (\n\t\tret0 = new(bool)\n\t)\n\tout := ret0\n\n\terr := _Consents.contract.Call(&bind.CallOpts{Context: ctx}, out, \"isAllowedAt\", userId, appName, action, dataType, blockNumber)\n\treturn *ret0, err\n}", "func (u *User) IsUser() bool {\n\treturn u.UserGroupID == USER\n}", "func (r *RBAC) IsPermit(system, uid, permission string) (bool, error) {\n\treturn r.Cache.IsPermit(system, uid, permission)\n}", "func UpdateBranchProtection() error {\n\tvar wg sync.WaitGroup\n\trequests, err := getBranchProtectionRequests()\n\n\tif err != nil {\n\t\treturn err\n\t}\n\n\twg.Add(len(requests))\n\towner, repo := getOwnerRepo()\n\n\tfor _, bp := range requests {\n\t\tgo func(bp BranchProtection) {\n\t\t\tdefer wg.Done()\n\t\t\t_, _, err := cli.Repositories.UpdateBranchProtection(ctx, owner, repo, bp.Branch, bp.Protection)\n\n\t\t\tif err != nil {\n\t\t\t\tlog.Fatal(err)\n\t\t\t}\n\n\t\t\t_, err = fmt.Fprintln(writer, fmt.Sprintf(\"branch %v has been protected\", bp.Branch))\n\n\t\t\tif err != nil {\n\t\t\t\tlog.Fatal(err)\n\t\t\t}\n\t\t}(bp)\n\t}\n\n\twg.Wait()\n\n\treturn nil\n}", "func (sys *IAMSys) IsAllowed(args iampolicy.Args) bool {\n\t// If opa is configured, use OPA always.\n\tif globalPolicyOPA != nil {\n\t\treturn globalPolicyOPA.IsAllowed(args)\n\t}\n\n\t// With claims set, we should do STS related checks and validation.\n\tif len(args.Claims) > 0 {\n\t\treturn sys.IsAllowedSTS(args)\n\t}\n\n\tsys.RLock()\n\tdefer sys.RUnlock()\n\n\t// If policy is available for given user, check the policy.\n\tif mp, found := sys.iamUserPolicyMap[args.AccountName]; found {\n\t\tp, ok := sys.iamPolicyDocsMap[mp.Policy]\n\t\treturn ok && p.IsAllowed(args)\n\t}\n\n\t// As policy is not available and OPA is not configured,\n\t// return the owner value.\n\treturn args.IsOwner\n}", "func IsUserAuthorizedForProject(user *auth.User, projectSFID string) bool {\n\t// Previously, we checked for user.Admin - admins should be in a separate role\n\t// Previously, we checked for user.Allowed, which is currently not used (future flag that is currently not implemented)\n\treturn user.IsUserAuthorizedForProjectScope(projectSFID)\n}", "func (l LowPrivilegeRaiseConfiguration) IsAllowedForProjectsIn(domainName string) bool {\n\tif l.ExcludeProjectDomainRx != nil && l.ExcludeProjectDomainRx.MatchString(domainName) {\n\t\treturn false\n\t}\n\tif l.IncludeProjectDomainRx == nil {\n\t\treturn true\n\t}\n\treturn l.IncludeProjectDomainRx.MatchString(domainName)\n}", "func (_FeeCurrencyWhitelist *FeeCurrencyWhitelistCallerSession) Whitelist(arg0 *big.Int) (common.Address, error) {\n\treturn _FeeCurrencyWhitelist.Contract.Whitelist(&_FeeCurrencyWhitelist.CallOpts, arg0)\n}", "func GetProtectBranchesByRepoID(repoID int64) ([]*ProtectBranch, error) {\n\tprotectBranches := make([]*ProtectBranch, 0, 2)\n\treturn protectBranches, x.Where(\"repo_id = ? and protected = ?\", repoID, true).Asc(\"name\").Find(&protectBranches)\n}", "func WhiteListChecker(ctx context.Context,\n\treq interface{},\n\tinfo *grpc.UnaryServerInfo,\n\thandler grpc.UnaryHandler) (interface{}, error) {\n\tpeerinfo, ok := peer.FromContext(ctx)\n\tif !ok {\n\t\treturn nil, status.Errorf(codes.Internal, \"failed to retrieve peer info\")\n\t}\n\n\thost, _, err := net.SplitHostPort(peerinfo.Addr.String())\n\tif err != nil {\n\t\treturn nil, status.Errorf(codes.Internal, err.Error())\n\t}\n\n\tserv := info.Server.(*Server)\n\tif !helpers.Includes(serv.Config.Whitelist, host) {\n\t\treturn nil, status.Errorf(codes.PermissionDenied, \"host %s is not in whitelist\", host)\n\t}\n\n\t// Calls the handler\n\th, err := handler(ctx, req)\n\n\treturn h, err\n}", "func CanAccess(userId, path string) bool {\n\tpath = filepath.FromSlash(path)\n\n\tuserWorkspace := conf.GetUserWorkspace(userId)\n\tworkspaces := filepath.SplitList(userWorkspace)\n\n\tfor _, workspace := range workspaces {\n\t\tif strings.HasPrefix(path, workspace) {\n\t\t\treturn true\n\t\t}\n\t}\n\n\treturn false\n}", "func AuthorizeUserIsBusinessCaseRequester(\n\tctx context.Context,\n\tbizCase *models.BusinessCase,\n) (bool, error) {\n\tlogger := appcontext.ZLogger(ctx)\n\tprincipal := appcontext.Principal(ctx)\n\tif !principal.AllowEASi() {\n\t\tlogger.Info(\"does not have EASi job code\")\n\t\treturn false, nil\n\t}\n\n\t// If business case is owned by user, authorize\n\tif principal.ID() == bizCase.EUAUserID {\n\t\treturn true, nil\n\t}\n\t// Default to failure to authorize and create a quick audit log\n\tlogger.With(zap.Bool(\"Authorized\", false)).\n\t\tInfo(\"user unauthorized as owning the business case\")\n\treturn false, nil\n}", "func (m *MgoUserManager) Can(user *auth.User, do string) bool {\n\tfor _, pri := range user.Privilege {\n\t\tif do == pri {\n\t\t\treturn true\n\t\t}\n\t}\n\n\taid := make([]interface{}, 0, len(user.BriefGroups))\n\tfor _, v := range user.BriefGroups {\n\t\taid = append(aid, v.Id)\n\t}\n\n\tgroups, err := m.GroupMngr.FindSomeGroup(aid...)\n\tif err != nil {\n\t\tlog.Println(\"mgoauth: cannot find user group to determine privilege - \", err)\n\t\treturn false\n\t}\n\n\tfor _, v := range groups {\n\t\tfor _, pri := range v.Privilege {\n\t\t\tif do == pri {\n\t\t\t\treturn true\n\t\t\t}\n\t\t}\n\t}\n\n\treturn false\n}", "func (p *Provider) checkUser(userId bson.ObjectId, users []models.MachineUser) error {\n\t// check if the incoming user is in the list of permitted user list\n\tfor _, u := range users {\n\t\tif userId == u.Id && (u.Owner || (u.Permanent && u.Approved)) {\n\t\t\treturn nil // ok he/she is good to go!\n\t\t}\n\t}\n\n\treturn fmt.Errorf(\"permission denied. user not in the list of permitted users\")\n}", "func isMainBranch(br string) bool {\n\treturn br == \"master\" || strings.HasPrefix(br, \"dev.\")\n}", "func (_Authority *AuthorityCaller) IsBuyerBlocked(opts *bind.CallOpts, buyer common.Address) (bool, error) {\n\tvar (\n\t\tret0 = new(bool)\n\t)\n\tout := ret0\n\terr := _Authority.contract.Call(opts, out, \"isBuyerBlocked\", buyer)\n\treturn *ret0, err\n}", "func (r *Registry) IsPrunable(obj client.Object) error {\n\tisPrunable, ok := r.prunables[obj.GetObjectKind().GroupVersionKind()]\n\tif !ok {\n\t\treturn nil\n\t}\n\n\treturn isPrunable(obj)\n}", "func (_FeeCurrencyWhitelist *FeeCurrencyWhitelistSession) Whitelist(arg0 *big.Int) (common.Address, error) {\n\treturn _FeeCurrencyWhitelist.Contract.Whitelist(&_FeeCurrencyWhitelist.CallOpts, arg0)\n}", "func (p GithubRepoHost) UpdateBranchProtection(repoID string, rule BranchProtectionRule) error {\n\tif isDebug() {\n\t\tfmt.Printf(\"Updating branch protection on %s\\n\", repoID)\n\t}\n\n\trules := fetchBranchProtectionRules()\n\tinput := githubv4.UpdateBranchProtectionRuleInput{\n\t\tBranchProtectionRuleID: rule.ID,\n\t\tPattern: githubv4.NewString(githubv4.String(rules.Pattern)),\n\t\tDismissesStaleReviews: githubv4.NewBoolean(githubv4.Boolean(rules.DismissesStaleReviews)),\n\t\tIsAdminEnforced: githubv4.NewBoolean(githubv4.Boolean(rules.IsAdminEnforced)),\n\t\tRequiresApprovingReviews: githubv4.NewBoolean(githubv4.Boolean(rules.RequiresApprovingReviews)),\n\t\tRequiredApprovingReviewCount: githubv4.NewInt(githubv4.Int(rules.RequiredApprovingReviewCount)),\n\t\tRequiresStatusChecks: githubv4.NewBoolean(githubv4.Boolean(rules.RequiresStatusChecks)),\n\t\tRequiredStatusCheckContexts: &[]githubv4.String{\n\t\t\t*githubv4.NewString(\"build\"),\n\t\t},\n\t}\n\n\tvar m UpdateBranchProtectionRuleMutation\n\tclient := buildClient()\n\terr := client.Mutate(context.Background(), &m, input, nil)\n\treturn err\n}", "func (statement Statement) IsAllowed(args Args) bool {\n\tcheck := func() bool {\n\t\tif !statement.Principal.Match(args.AccountName) {\n\t\t\treturn false\n\t\t}\n\n\t\tif !statement.Actions.Contains(args.Action) {\n\t\t\treturn false\n\t\t}\n\n\t\tresource := args.BucketName\n\t\tif args.ObjectName != \"\" {\n\t\t\tif !strings.HasPrefix(args.ObjectName, \"/\") {\n\t\t\t\tresource += \"/\"\n\t\t\t}\n\n\t\t\tresource += args.ObjectName\n\t\t}\n\n\t\tif !statement.Resources.Match(resource, args.ConditionValues) {\n\t\t\treturn false\n\t\t}\n\n\t\treturn statement.Conditions.Evaluate(args.ConditionValues)\n\t}\n\n\treturn statement.Effect.IsAllowed(check())\n}", "func CheckAllowlist(r *library.Repo, allowlist []string) bool {\n\t// check if all repos are allowed to be enabled\n\tif len(allowlist) == 1 && allowlist[0] == \"*\" {\n\t\treturn true\n\t}\n\n\tfor _, repo := range allowlist {\n\t\t// allow all repos in org\n\t\tif strings.Contains(repo, \"/*\") {\n\t\t\tif strings.HasPrefix(repo, r.GetOrg()) {\n\t\t\t\treturn true\n\t\t\t}\n\t\t}\n\n\t\t// allow specific repo within org\n\t\tif repo == r.GetFullName() {\n\t\t\treturn true\n\t\t}\n\t}\n\n\treturn false\n}", "func (policy Policy) IsAllowed(args Args) bool {\n\t// Check all deny statements. If any one statement denies, return false.\n\tfor _, statement := range policy.Statements {\n\t\tif statement.Effect == Deny {\n\t\t\tif !statement.IsAllowed(args) {\n\t\t\t\treturn false\n\t\t\t}\n\t\t}\n\t}\n\n\t// For owner, its allowed by default.\n\tif args.IsOwner {\n\t\treturn true\n\t}\n\n\t// Check all allow statements. If any one statement allows, return true.\n\tfor _, statement := range policy.Statements {\n\t\tif statement.Effect == Allow {\n\t\t\tif statement.IsAllowed(args) {\n\t\t\t\treturn true\n\t\t\t}\n\t\t}\n\t}\n\n\treturn false\n}", "func (a *AllowedVault) IsAccountAllowed(account sdk.AccAddress) bool {\n\t// Anyone can deposit to non-private vaults\n\tif !a.IsPrivateVault {\n\t\treturn true\n\t}\n\n\tfor _, addr := range a.AllowedDepositors {\n\t\tif addr.Equals(account) {\n\t\t\treturn true\n\t\t}\n\t}\n\n\treturn false\n}", "func (_Token *TokenTransactor) ClaimWhitelistAdmin(opts *bind.TransactOpts) (*types.Transaction, error) {\n\treturn _Token.contract.Transact(opts, \"claimWhitelistAdmin\")\n}", "func EditBranchProtection(ctx *context.APIContext) {\n\t// swagger:operation PATCH /repos/{owner}/{repo}/branch_protections/{name} repository repoEditBranchProtection\n\t// ---\n\t// summary: Edit a branch protections for a repository. Only fields that are set will be changed\n\t// consumes:\n\t// - application/json\n\t// produces:\n\t// - application/json\n\t// parameters:\n\t// - name: owner\n\t// in: path\n\t// description: owner of the repo\n\t// type: string\n\t// required: true\n\t// - name: repo\n\t// in: path\n\t// description: name of the repo\n\t// type: string\n\t// required: true\n\t// - name: name\n\t// in: path\n\t// description: name of protected branch\n\t// type: string\n\t// required: true\n\t// - name: body\n\t// in: body\n\t// schema:\n\t// \"$ref\": \"#/definitions/EditBranchProtectionOption\"\n\t// responses:\n\t// \"200\":\n\t// \"$ref\": \"#/responses/BranchProtection\"\n\t// \"404\":\n\t// \"$ref\": \"#/responses/notFound\"\n\t// \"422\":\n\t// \"$ref\": \"#/responses/validationError\"\n\tform := web.GetForm(ctx).(*api.EditBranchProtectionOption)\n\trepo := ctx.Repo.Repository\n\tbpName := ctx.Params(\":name\")\n\tprotectBranch, err := git_model.GetProtectedBranchRuleByName(ctx, repo.ID, bpName)\n\tif err != nil {\n\t\tctx.Error(http.StatusInternalServerError, \"GetProtectedBranchByID\", err)\n\t\treturn\n\t}\n\tif protectBranch == nil || protectBranch.RepoID != repo.ID {\n\t\tctx.NotFound()\n\t\treturn\n\t}\n\n\tif form.EnablePush != nil {\n\t\tif !*form.EnablePush {\n\t\t\tprotectBranch.CanPush = false\n\t\t\tprotectBranch.EnableWhitelist = false\n\t\t\tprotectBranch.WhitelistDeployKeys = false\n\t\t} else {\n\t\t\tprotectBranch.CanPush = true\n\t\t\tif form.EnablePushWhitelist != nil {\n\t\t\t\tif !*form.EnablePushWhitelist {\n\t\t\t\t\tprotectBranch.EnableWhitelist = false\n\t\t\t\t\tprotectBranch.WhitelistDeployKeys = false\n\t\t\t\t} else {\n\t\t\t\t\tprotectBranch.EnableWhitelist = true\n\t\t\t\t\tif form.PushWhitelistDeployKeys != nil {\n\t\t\t\t\t\tprotectBranch.WhitelistDeployKeys = *form.PushWhitelistDeployKeys\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\n\tif form.EnableMergeWhitelist != nil {\n\t\tprotectBranch.EnableMergeWhitelist = *form.EnableMergeWhitelist\n\t}\n\n\tif form.EnableStatusCheck != nil {\n\t\tprotectBranch.EnableStatusCheck = *form.EnableStatusCheck\n\t}\n\n\tif form.StatusCheckContexts != nil {\n\t\tprotectBranch.StatusCheckContexts = form.StatusCheckContexts\n\t}\n\n\tif form.RequiredApprovals != nil && *form.RequiredApprovals >= 0 {\n\t\tprotectBranch.RequiredApprovals = *form.RequiredApprovals\n\t}\n\n\tif form.EnableApprovalsWhitelist != nil {\n\t\tprotectBranch.EnableApprovalsWhitelist = *form.EnableApprovalsWhitelist\n\t}\n\n\tif form.BlockOnRejectedReviews != nil {\n\t\tprotectBranch.BlockOnRejectedReviews = *form.BlockOnRejectedReviews\n\t}\n\n\tif form.BlockOnOfficialReviewRequests != nil {\n\t\tprotectBranch.BlockOnOfficialReviewRequests = *form.BlockOnOfficialReviewRequests\n\t}\n\n\tif form.DismissStaleApprovals != nil {\n\t\tprotectBranch.DismissStaleApprovals = *form.DismissStaleApprovals\n\t}\n\n\tif form.RequireSignedCommits != nil {\n\t\tprotectBranch.RequireSignedCommits = *form.RequireSignedCommits\n\t}\n\n\tif form.ProtectedFilePatterns != nil {\n\t\tprotectBranch.ProtectedFilePatterns = *form.ProtectedFilePatterns\n\t}\n\n\tif form.UnprotectedFilePatterns != nil {\n\t\tprotectBranch.UnprotectedFilePatterns = *form.UnprotectedFilePatterns\n\t}\n\n\tif form.BlockOnOutdatedBranch != nil {\n\t\tprotectBranch.BlockOnOutdatedBranch = *form.BlockOnOutdatedBranch\n\t}\n\n\tvar whitelistUsers []int64\n\tif form.PushWhitelistUsernames != nil {\n\t\twhitelistUsers, err = user_model.GetUserIDsByNames(ctx, form.PushWhitelistUsernames, false)\n\t\tif err != nil {\n\t\t\tif user_model.IsErrUserNotExist(err) {\n\t\t\t\tctx.Error(http.StatusUnprocessableEntity, \"User does not exist\", err)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tctx.Error(http.StatusInternalServerError, \"GetUserIDsByNames\", err)\n\t\t\treturn\n\t\t}\n\t} else {\n\t\twhitelistUsers = protectBranch.WhitelistUserIDs\n\t}\n\tvar mergeWhitelistUsers []int64\n\tif form.MergeWhitelistUsernames != nil {\n\t\tmergeWhitelistUsers, err = user_model.GetUserIDsByNames(ctx, form.MergeWhitelistUsernames, false)\n\t\tif err != nil {\n\t\t\tif user_model.IsErrUserNotExist(err) {\n\t\t\t\tctx.Error(http.StatusUnprocessableEntity, \"User does not exist\", err)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tctx.Error(http.StatusInternalServerError, \"GetUserIDsByNames\", err)\n\t\t\treturn\n\t\t}\n\t} else {\n\t\tmergeWhitelistUsers = protectBranch.MergeWhitelistUserIDs\n\t}\n\tvar approvalsWhitelistUsers []int64\n\tif form.ApprovalsWhitelistUsernames != nil {\n\t\tapprovalsWhitelistUsers, err = user_model.GetUserIDsByNames(ctx, form.ApprovalsWhitelistUsernames, false)\n\t\tif err != nil {\n\t\t\tif user_model.IsErrUserNotExist(err) {\n\t\t\t\tctx.Error(http.StatusUnprocessableEntity, \"User does not exist\", err)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tctx.Error(http.StatusInternalServerError, \"GetUserIDsByNames\", err)\n\t\t\treturn\n\t\t}\n\t} else {\n\t\tapprovalsWhitelistUsers = protectBranch.ApprovalsWhitelistUserIDs\n\t}\n\n\tvar whitelistTeams, mergeWhitelistTeams, approvalsWhitelistTeams []int64\n\tif repo.Owner.IsOrganization() {\n\t\tif form.PushWhitelistTeams != nil {\n\t\t\twhitelistTeams, err = organization.GetTeamIDsByNames(repo.OwnerID, form.PushWhitelistTeams, false)\n\t\t\tif err != nil {\n\t\t\t\tif organization.IsErrTeamNotExist(err) {\n\t\t\t\t\tctx.Error(http.StatusUnprocessableEntity, \"Team does not exist\", err)\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\tctx.Error(http.StatusInternalServerError, \"GetTeamIDsByNames\", err)\n\t\t\t\treturn\n\t\t\t}\n\t\t} else {\n\t\t\twhitelistTeams = protectBranch.WhitelistTeamIDs\n\t\t}\n\t\tif form.MergeWhitelistTeams != nil {\n\t\t\tmergeWhitelistTeams, err = organization.GetTeamIDsByNames(repo.OwnerID, form.MergeWhitelistTeams, false)\n\t\t\tif err != nil {\n\t\t\t\tif organization.IsErrTeamNotExist(err) {\n\t\t\t\t\tctx.Error(http.StatusUnprocessableEntity, \"Team does not exist\", err)\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\tctx.Error(http.StatusInternalServerError, \"GetTeamIDsByNames\", err)\n\t\t\t\treturn\n\t\t\t}\n\t\t} else {\n\t\t\tmergeWhitelistTeams = protectBranch.MergeWhitelistTeamIDs\n\t\t}\n\t\tif form.ApprovalsWhitelistTeams != nil {\n\t\t\tapprovalsWhitelistTeams, err = organization.GetTeamIDsByNames(repo.OwnerID, form.ApprovalsWhitelistTeams, false)\n\t\t\tif err != nil {\n\t\t\t\tif organization.IsErrTeamNotExist(err) {\n\t\t\t\t\tctx.Error(http.StatusUnprocessableEntity, \"Team does not exist\", err)\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\tctx.Error(http.StatusInternalServerError, \"GetTeamIDsByNames\", err)\n\t\t\t\treturn\n\t\t\t}\n\t\t} else {\n\t\t\tapprovalsWhitelistTeams = protectBranch.ApprovalsWhitelistTeamIDs\n\t\t}\n\t}\n\n\terr = git_model.UpdateProtectBranch(ctx, ctx.Repo.Repository, protectBranch, git_model.WhitelistOptions{\n\t\tUserIDs: whitelistUsers,\n\t\tTeamIDs: whitelistTeams,\n\t\tMergeUserIDs: mergeWhitelistUsers,\n\t\tMergeTeamIDs: mergeWhitelistTeams,\n\t\tApprovalsUserIDs: approvalsWhitelistUsers,\n\t\tApprovalsTeamIDs: approvalsWhitelistTeams,\n\t})\n\tif err != nil {\n\t\tctx.Error(http.StatusInternalServerError, \"UpdateProtectBranch\", err)\n\t\treturn\n\t}\n\n\tisPlainRule := !git_model.IsRuleNameSpecial(bpName)\n\tvar isBranchExist bool\n\tif isPlainRule {\n\t\tisBranchExist = git.IsBranchExist(ctx.Req.Context(), ctx.Repo.Repository.RepoPath(), bpName)\n\t}\n\n\tif isBranchExist {\n\t\tif err = pull_service.CheckPRsForBaseBranch(ctx, ctx.Repo.Repository, bpName); err != nil {\n\t\t\tctx.Error(http.StatusInternalServerError, \"CheckPrsForBaseBranch\", err)\n\t\t\treturn\n\t\t}\n\t} else {\n\t\tif !isPlainRule {\n\t\t\tif ctx.Repo.GitRepo == nil {\n\t\t\t\tctx.Repo.GitRepo, err = git.OpenRepository(ctx, ctx.Repo.Repository.RepoPath())\n\t\t\t\tif err != nil {\n\t\t\t\t\tctx.Error(http.StatusInternalServerError, \"OpenRepository\", err)\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\tdefer func() {\n\t\t\t\t\tctx.Repo.GitRepo.Close()\n\t\t\t\t\tctx.Repo.GitRepo = nil\n\t\t\t\t}()\n\t\t\t}\n\n\t\t\t// FIXME: since we only need to recheck files protected rules, we could improve this\n\t\t\tmatchedBranches, err := git_model.FindAllMatchedBranches(ctx, ctx.Repo.Repository.ID, protectBranch.RuleName)\n\t\t\tif err != nil {\n\t\t\t\tctx.Error(http.StatusInternalServerError, \"FindAllMatchedBranches\", err)\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tfor _, branchName := range matchedBranches {\n\t\t\t\tif err = pull_service.CheckPRsForBaseBranch(ctx, ctx.Repo.Repository, branchName); err != nil {\n\t\t\t\t\tctx.Error(http.StatusInternalServerError, \"CheckPrsForBaseBranch\", err)\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\n\t// Reload from db to ensure get all whitelists\n\tbp, err := git_model.GetProtectedBranchRuleByName(ctx, repo.ID, bpName)\n\tif err != nil {\n\t\tctx.Error(http.StatusInternalServerError, \"GetProtectedBranchBy\", err)\n\t\treturn\n\t}\n\tif bp == nil || bp.RepoID != ctx.Repo.Repository.ID {\n\t\tctx.Error(http.StatusInternalServerError, \"New branch protection not found\", err)\n\t\treturn\n\t}\n\n\tctx.JSON(http.StatusOK, convert.ToBranchProtection(bp))\n}", "func (c *AuthorizedApp) IsAllowedRegion(s string) bool {\n\tif len(c.AllowedRegions) == 0 {\n\t\treturn true\n\t}\n\n\t_, ok := c.AllowedRegions[s]\n\treturn ok\n}", "func (b *userBiz) Authorize(user web.User, h web.HandlerInfo) bool {\n\tif au, ok := user.(*model.AuthUser); ok {\n\t\treturn au.IsAllowed(h.Name())\n\t}\n\treturn false\n}", "func (c *Credentials) IsAuthorised(token string) bool {\n\t_, ok := c.whitelist[token]\n\treturn ok\n}", "func (r *Router) CanApproveToUser(c *gin.Context) {\n\treqUser := user.ExtractRequestUser(c)\n\tif reqUser == nil {\n\t\tc.AbortWithStatus(http.StatusUnauthorized)\n\t\treturn\n\t}\n\n\tuserID := c.Param(ParamUserID)\n\tif userID == \"\" {\n\t\tc.String(http.StatusBadRequest, fmt.Sprintf(\"%s field is required\", ParamUserID))\n\t\treturn\n\t}\n\n\tapproverID := c.Param(ParamApproverID)\n\tif approverID == \"\" {\n\t\tc.String(http.StatusBadRequest, fmt.Sprintf(\"%s field is required\", ParamApproverID))\n\t\treturn\n\t}\n\n\tdestination := c.GetHeader(HeaderDestionation)\n\tif destination == \"\" {\n\t\tc.String(http.StatusBadRequest, fmt.Sprintf(\"%s header is required\", HeaderDestionation))\n\t\treturn\n\t}\n\tif destination != viper.GetString(ConfigCtsDest) && destination != viper.GetString(ConfigTomcalDest) {\n\t\tc.String(http.StatusBadRequest, fmt.Sprintf(\"destination %s doesnt supported\", destination))\n\t\treturn\n\t}\n\n\tcanApproveToUserRequest := &drp.CanApproveToUserRequest{\n\t\tApproverID: approverID,\n\t\tUserID: userID,\n\t\tDestination: destination,\n\t}\n\n\tcanApproveToUserInfo, err := r.dropboxClient().CanApproveToUser(c.Request.Context(), canApproveToUserRequest)\n\n\tif err != nil {\n\t\thttpStatusCode := gwruntime.HTTPStatusFromCode(status.Code(err))\n\t\tloggermiddleware.LogError(r.logger, c.AbortWithError(httpStatusCode, err))\n\t\treturn\n\t}\n\n\tc.JSON(http.StatusOK, canApproveToUserInfo)\n}", "func (sp *SaneProxy) Banned(backend string) bool {\n\treturn sp.BackendsBan[sp.BackendHostMap[backend]]\n}", "func UserIsAdmin(r *http.Request) bool {\n\tval := r.Context().Value(request.CtxAccess)\n\tif val == nil {\n\t\treturn false\n\t}\n\n\tua := val.(*UserAccess)\n\treturn ua.Admin\n}", "func (s *UserRepository) IsUser(email, password string) bool {\n\treturn false\n}", "func (ctx *serverRequestContextImpl) CanModifyUser(req *api.ModifyIdentityRequest, checkAff bool, checkType bool, checkAttrs bool, userToModify user.User) error {\n\tif checkAff {\n\t\treqAff := req.Affiliation\n\t\tlog.Debugf(\"Checking if caller is authorized to change affiliation to '%s'\", reqAff)\n\t\terr := ctx.ContainsAffiliation(reqAff)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tif checkType {\n\t\treqType := req.Type\n\t\tlog.Debugf(\"Checking if caller is authorized to change type to '%s'\", reqType)\n\t\terr := ctx.CanActOnType(reqType)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tif checkAttrs {\n\t\treqAttrs := req.Attributes\n\t\tlog.Debugf(\"Checking if caller is authorized to change attributes to %+v\", reqAttrs)\n\t\terr := attr.CanRegisterRequestedAttributes(reqAttrs, userToModify, ctx.caller)\n\t\tif err != nil {\n\t\t\treturn caerrors.NewAuthorizationErr(caerrors.ErrRegAttrAuth, \"Failed to register attributes: %s\", err)\n\t\t}\n\t}\n\n\treturn nil\n}", "func handleRepo(ctx context.Context, client *github.Client, repo *github.Repository) error {\n\topt := &github.ListOptions{\n\t\tPerPage: 100,\n\t}\n\n\tbranches, resp, err := client.Repositories.ListBranches(ctx, *repo.Owner.Login, *repo.Name, opt)\n\tif resp.StatusCode == http.StatusNotFound || resp.StatusCode == http.StatusForbidden {\n\t\treturn nil\n\t}\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tfor _, branch := range branches {\n\t\tif branch.GetName() == \"master\" && in(orgs, *repo.Owner.Login) {\n\t\t\t// we must get the individual branch for the branch protection to work\n\t\t\tb, _, err := client.Repositories.GetBranch(ctx, *repo.Owner.Login, *repo.Name, branch.GetName())\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\t// return early if it is already protected\n\t\t\tif b.GetProtected() {\n\t\t\t\tfmt.Printf(\"[OK] %s:%s is already protected\\n\", *repo.FullName, b.GetName())\n\t\t\t\treturn nil\n\t\t\t}\n\n\t\t\tfmt.Printf(\"[UPDATE] %s:%s will be changed to protected\\n\", *repo.FullName, b.GetName())\n\t\t\tif dryrun {\n\t\t\t\t// return early\n\t\t\t\treturn nil\n\t\t\t}\n\n\t\t\t// set the branch to be protected\n\t\t\tif _, _, err := client.Repositories.UpdateBranchProtection(ctx, *repo.Owner.Login, *repo.Name, b.GetName(), &github.ProtectionRequest{\n\t\t\t\tRequiredStatusChecks: &github.RequiredStatusChecks{\n\t\t\t\t\tStrict: false,\n\t\t\t\t\tContexts: []string{},\n\t\t\t\t},\n\t\t\t}); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t}\n\n\treturn nil\n}", "func (ctx *Context) IsUserSiteAdmin() bool {\n\treturn ctx.IsSigned && ctx.Doer.IsAdmin\n}", "func (b *BranchDAG) BranchIDsContainRejectedBranch(branchIDs BranchIDs) (rejected bool, rejectedBranchID BranchID) {\n\tfor rejectedBranchID = range branchIDs {\n\t\tif !b.Branch(rejectedBranchID).Consume(func(branch Branch) {\n\t\t\trejected = branch.InclusionState() == Rejected\n\t\t}) {\n\t\t\tpanic(fmt.Sprintf(\"failed to load Branch with %s\", rejectedBranchID))\n\t\t}\n\n\t\tif rejected {\n\t\t\treturn\n\t\t}\n\t}\n\n\trejectedBranchID = UndefinedBranchID\n\n\treturn\n}", "func HasLocalBranch(branchName string) bool {\n\treturn util.DoesStringArrayContain(GetLocalBranches(), branchName)\n}", "func (_BREMFactory *BREMFactoryCallerSession) IsSuperuser(_addr common.Address) (bool, error) {\n\treturn _BREMFactory.Contract.IsSuperuser(&_BREMFactory.CallOpts, _addr)\n}", "func IsWhiteListed(number string, allowed *[]string) bool {\n\tfor _, n := range *allowed {\n\t\tif n == number {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}", "func (_FeeCurrencyWhitelist *FeeCurrencyWhitelistSession) IsOwner() (bool, error) {\n\treturn _FeeCurrencyWhitelist.Contract.IsOwner(&_FeeCurrencyWhitelist.CallOpts)\n}", "func BranchPushed(project, branch string) bool {\n\tp, err := FindProject(project)\n\tif err != nil {\n\t\treturn false\n\t}\n\n\tb, _, err := lab.Branches.GetBranch(p.ID, branch)\n\tif err != nil {\n\t\treturn false\n\t}\n\treturn b != nil\n}", "func (_BREM *BREMCaller) IsSuperuser(opts *bind.CallOpts, _addr common.Address) (bool, error) {\n\tvar (\n\t\tret0 = new(bool)\n\t)\n\tout := ret0\n\terr := _BREM.contract.Call(opts, out, \"isSuperuser\", _addr)\n\treturn *ret0, err\n}", "func (black *Blacklist) Allowed(item string) bool {\n\tif black.Size() == 0 {\n\t\tif black.strict {\n\t\t\t// in strict mode, we disallow everything when the list is empty\n\t\t\treturn false\n\t\t}\n\n\t\t// by default, we allow everything when the list is empty\n\t\treturn true\n\t}\n\n\treturn !black.Has(item)\n}", "func (srv *Service) InsertUserIntoWhitelist(to string) (*string, error) {\n\t//verify if user exists\n\tuser, err := srv.mongoRepository.GetUserByEmail(to)\n\tif err != nil {\n\t\tif mongoError := err.(*pkg.Error); mongoError.Code != http.StatusNotFound {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\tif user != nil {\n\t\t//return a custom error\n\t\treturn nil, &pkg.Error{Err: err, Code: http.StatusNotFound, Message: \"A user with this email already exists\"}\n\t}\n\n\t//verify if user is already invited\n\tinvited, err := srv.mongoRepository.GetUserFromWhitelistByEmail(to)\n\tif err != nil {\n\t\tif mongoError := err.(*pkg.Error); mongoError.Code != http.StatusNotFound {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\tif invited != nil {\n\t\t//return a custom error\n\t\treturn nil, &pkg.Error{Err: err, Code: http.StatusNotFound, Message: \"User already invited\"}\n\t}\n\n\t//add user to whitelist\n\t_, err = srv.mongoRepository.InsertUserIntoWhitelist(to)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tdata := &email.Email{\n\t\tSenderName: \"Pod Chef team\",\n\t\tSubject: \"You have been added to the Pod Chef whitelist!\",\n\t}\n\n\temailBody :=\n\t\t\"Hi,\\n\" +\n\t\t\t\"You are now in the whitelist of the Pod Chef project and can register https://www.podchef.cf\\n\" +\n\t\t\t\"\\nWith The best regards, \" + data.SenderName + \".\"\n\n\t//call driven adapter responsible for sending an email\n\t_ = srv.emailRepository.SendEmailSMTP(to, data.Subject, emailBody)\n\n\tmessage := \"User invited sucessfully\"\n\n\treturn &message, nil\n}", "func (bs BranchInfos) HasLocalBranch(localBranch LocalBranchName) bool {\n\tfor _, branch := range bs {\n\t\tif branch.Name == localBranch {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}", "func IsUserApprovedForClient(\n\tconfig_obj *api_proto.Config,\n\tmd *metadata.MD,\n\tclient_id string) bool {\n\treturn true\n}", "func (b Branch) Check(ctx context.Context, c *github.Client, owner,\n\trepo string) (*policydef.Result, error) {\n\treturn check(ctx, c.Repositories, c, owner, repo)\n}", "func (c *client) RemoveBranchProtection(org, repo, branch string) error {\n\tdurationLogger := c.log(\"RemoveBranchProtection\", org, repo, branch)\n\tdefer durationLogger()\n\n\t_, err := c.request(&request{\n\t\tmethod: http.MethodDelete,\n\t\tpath: fmt.Sprintf(\"/repos/%s/%s/branches/%s/protection\", org, repo, branch),\n\t\torg: org,\n\t\texitCodes: []int{204},\n\t}, nil)\n\treturn err\n}", "func (_Superuserable *SuperuserableCaller) IsSuperuser(opts *bind.CallOpts, _addr common.Address) (bool, error) {\n\tvar (\n\t\tret0 = new(bool)\n\t)\n\tout := ret0\n\terr := _Superuserable.contract.Call(opts, out, \"isSuperuser\", _addr)\n\treturn *ret0, err\n}", "func (bs BranchInfos) HasMatchingRemoteBranchFor(localBranch LocalBranchName) bool {\n\tremoteBranch := localBranch.RemoteBranch()\n\tfor _, branch := range bs {\n\t\tif branch.RemoteName == remoteBranch {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}", "func (repo *Repo) UserIsInLobby(userID int) (bool, error) {\n\tlobbyStmt, err := repo.DB.Prepare(\"SELECT UserId FROM LobbyUsers WHERE UserId = ?\")\n\tdefer lobbyStmt.Close()\n\tif err != nil {\n\t\treturn false, err\n\t}\n\t\n\t/*An error is thrown if we try to scan an empty result set, \n\t\tuse this to determine if user is in lobby*/\n\tvar user int\n\terr = lobbyStmt.QueryRow(userID).Scan(&user)\n\treturn err == nil, err\n}" ]
[ "0.56163293", "0.5373006", "0.52983207", "0.5277685", "0.5266368", "0.5255747", "0.51754534", "0.5144733", "0.51208335", "0.50630087", "0.5051098", "0.4973124", "0.4966368", "0.49533182", "0.4888079", "0.4856383", "0.48513022", "0.4842797", "0.48176417", "0.4758936", "0.47584724", "0.47584724", "0.47091612", "0.47003186", "0.46977127", "0.46721143", "0.46610528", "0.46153012", "0.461005", "0.45937172", "0.45930687", "0.4572737", "0.45584598", "0.45081055", "0.45017317", "0.44684076", "0.44656938", "0.44636443", "0.44615355", "0.445897", "0.4452816", "0.44511992", "0.44483337", "0.44444856", "0.4434849", "0.44104254", "0.44094843", "0.44077164", "0.44034293", "0.44022596", "0.43933275", "0.4350923", "0.43495974", "0.4342249", "0.43346557", "0.43324417", "0.43307897", "0.4328186", "0.43280393", "0.4326559", "0.43158376", "0.4311498", "0.43086118", "0.4308161", "0.43077707", "0.43025047", "0.4293117", "0.4287541", "0.4274396", "0.42651471", "0.4263449", "0.42510617", "0.42504817", "0.4243528", "0.42188284", "0.42171204", "0.4216248", "0.42155787", "0.4207252", "0.41991735", "0.41979933", "0.41913155", "0.41867504", "0.41836223", "0.41745695", "0.41722468", "0.41706732", "0.41599774", "0.41591457", "0.41591033", "0.41542983", "0.41460556", "0.4145216", "0.4141806", "0.4131286", "0.41239202", "0.41232568", "0.412301", "0.4122931", "0.4122739" ]
0.89497715
0
GetProtectBranchOfRepoByName returns ProtectBranch by branch name in given repository.
func GetProtectBranchOfRepoByName(repoID int64, name string) (*ProtectBranch, error) { protectBranch := &ProtectBranch{ RepoID: repoID, Name: name, } has, err := x.Get(protectBranch) if err != nil { return nil, err } else if !has { return nil, ErrBranchNotExist{args: map[string]any{"name": name}} } return protectBranch, nil }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func GetBranchProtection(ctx *context.APIContext) {\n\t// swagger:operation GET /repos/{owner}/{repo}/branch_protections/{name} repository repoGetBranchProtection\n\t// ---\n\t// summary: Get a specific branch protection for the repository\n\t// produces:\n\t// - application/json\n\t// parameters:\n\t// - name: owner\n\t// in: path\n\t// description: owner of the repo\n\t// type: string\n\t// required: true\n\t// - name: repo\n\t// in: path\n\t// description: name of the repo\n\t// type: string\n\t// required: true\n\t// - name: name\n\t// in: path\n\t// description: name of protected branch\n\t// type: string\n\t// required: true\n\t// responses:\n\t// \"200\":\n\t// \"$ref\": \"#/responses/BranchProtection\"\n\t// \"404\":\n\t// \"$ref\": \"#/responses/notFound\"\n\n\trepo := ctx.Repo.Repository\n\tbpName := ctx.Params(\":name\")\n\tbp, err := git_model.GetProtectedBranchRuleByName(ctx, repo.ID, bpName)\n\tif err != nil {\n\t\tctx.Error(http.StatusInternalServerError, \"GetProtectedBranchByID\", err)\n\t\treturn\n\t}\n\tif bp == nil || bp.RepoID != repo.ID {\n\t\tctx.NotFound()\n\t\treturn\n\t}\n\n\tctx.JSON(http.StatusOK, convert.ToBranchProtection(bp))\n}", "func (c *client) GetBranchProtection(org, repo, branch string) (*BranchProtection, error) {\n\tdurationLogger := c.log(\"GetBranchProtection\", org, repo, branch)\n\tdefer durationLogger()\n\n\tcode, body, err := c.requestRaw(&request{\n\t\tmethod: http.MethodGet,\n\t\tpath: fmt.Sprintf(\"/repos/%s/%s/branches/%s/protection\", org, repo, branch),\n\t\torg: org,\n\t\t// GitHub returns 404 for this call if either:\n\t\t// - The branch is not protected\n\t\t// - The access token used does not have sufficient privileges\n\t\t// We therefore need to introspect the response body.\n\t\texitCodes: []int{200, 404},\n\t})\n\n\tswitch {\n\tcase err != nil:\n\t\treturn nil, err\n\tcase code == 200:\n\t\tvar bp BranchProtection\n\t\tif err := json.Unmarshal(body, &bp); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn &bp, nil\n\tcase code == 404:\n\t\t// continue\n\tdefault:\n\t\treturn nil, fmt.Errorf(\"unexpected status code: %d\", code)\n\t}\n\n\tvar ge githubError\n\tif err := json.Unmarshal(body, &ge); err != nil {\n\t\treturn nil, err\n\t}\n\n\t// If the error was because the branch is not protected, we return a\n\t// nil pointer to indicate this.\n\tif ge.Message == \"Branch not protected\" {\n\t\treturn nil, nil\n\t}\n\n\t// Otherwise we got some other 404 error.\n\treturn nil, fmt.Errorf(\"getting branch protection 404: %s\", ge.Message)\n}", "func (g *github) GetBranchName() string { return g.branchName }", "func (novis *Novis) GetBranch(name string) *Branch {\n\treturn novis.Get(name)\n}", "func GetProtectBranchesByRepoID(repoID int64) ([]*ProtectBranch, error) {\n\tprotectBranches := make([]*ProtectBranch, 0, 2)\n\treturn protectBranches, x.Where(\"repo_id = ? and protected = ?\", repoID, true).Asc(\"name\").Find(&protectBranches)\n}", "func GetBranch(name string) *Branch {\n\treturn novis.Get(name)\n}", "func GetBranchProtection(ctx *pulumi.Context,\n\tname string, id pulumi.IDInput, state *BranchProtectionState, opts ...pulumi.ResourceOption) (*BranchProtection, error) {\n\tvar resource BranchProtection\n\terr := ctx.ReadResource(\"gitlab:index/branchProtection:BranchProtection\", name, id, state, &resource, opts...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &resource, nil\n}", "func handleRepo(ctx context.Context, client *github.Client, repo *github.Repository) error {\n\topt := &github.ListOptions{\n\t\tPerPage: 100,\n\t}\n\n\tbranches, resp, err := client.Repositories.ListBranches(ctx, *repo.Owner.Login, *repo.Name, opt)\n\tif resp.StatusCode == http.StatusNotFound || resp.StatusCode == http.StatusForbidden {\n\t\treturn nil\n\t}\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tfor _, branch := range branches {\n\t\tif branch.GetName() == \"master\" && in(orgs, *repo.Owner.Login) {\n\t\t\t// we must get the individual branch for the branch protection to work\n\t\t\tb, _, err := client.Repositories.GetBranch(ctx, *repo.Owner.Login, *repo.Name, branch.GetName())\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\t// return early if it is already protected\n\t\t\tif b.GetProtected() {\n\t\t\t\tfmt.Printf(\"[OK] %s:%s is already protected\\n\", *repo.FullName, b.GetName())\n\t\t\t\treturn nil\n\t\t\t}\n\n\t\t\tfmt.Printf(\"[UPDATE] %s:%s will be changed to protected\\n\", *repo.FullName, b.GetName())\n\t\t\tif dryrun {\n\t\t\t\t// return early\n\t\t\t\treturn nil\n\t\t\t}\n\n\t\t\t// set the branch to be protected\n\t\t\tif _, _, err := client.Repositories.UpdateBranchProtection(ctx, *repo.Owner.Login, *repo.Name, b.GetName(), &github.ProtectionRequest{\n\t\t\t\tRequiredStatusChecks: &github.RequiredStatusChecks{\n\t\t\t\t\tStrict: false,\n\t\t\t\t\tContexts: []string{},\n\t\t\t\t},\n\t\t\t}); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t}\n\n\treturn nil\n}", "func (s *splicer) branch(name string) error {\n\treturn s.gitCall(\"checkout\", \"-B\", name, \"master\")\n}", "func (o FunctionBuildConfigSourceRepoSourceOutput) BranchName() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v FunctionBuildConfigSourceRepoSource) *string { return v.BranchName }).(pulumi.StringPtrOutput)\n}", "func checkBranchName(repo *models.Repository, name string) error {\n\tgitRepo, err := git.OpenRepository(repo.RepoPath())\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer gitRepo.Close()\n\n\tbranches, _, err := GetBranches(repo, 0, 0)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tfor _, branch := range branches {\n\t\tif branch.Name == name {\n\t\t\treturn models.ErrBranchAlreadyExists{\n\t\t\t\tBranchName: branch.Name,\n\t\t\t}\n\t\t} else if (len(branch.Name) < len(name) && branch.Name+\"/\" == name[0:len(branch.Name)+1]) ||\n\t\t\t(len(branch.Name) > len(name) && name+\"/\" == branch.Name[0:len(name)+1]) {\n\t\t\treturn models.ErrBranchNameConflict{\n\t\t\t\tBranchName: branch.Name,\n\t\t\t}\n\t\t}\n\t}\n\n\tif _, err := gitRepo.GetTag(name); err == nil {\n\t\treturn models.ErrTagAlreadyExists{\n\t\t\tTagName: name,\n\t\t}\n\t}\n\n\treturn nil\n}", "func (r *Repository) GitLabGetRepositoryByName(ctx context.Context, repositoryName string) (*repoModels.RepositoryDBModel, error) {\n\tcondition := expression.Key(repoModels.RepositoryNameColumn).Equal(expression.Value(repositoryName))\n\tfilter := expression.Name(repoModels.RepositoryTypeColumn).Equal(expression.Value(utils.GitLabLower))\n\trecord, err := r.getRepositoryWithConditionFilter(ctx, condition, filter, repoModels.RepositoryNameIndex)\n\tif err != nil {\n\t\t// Catch the error - return the same error with the appropriate details\n\t\tif _, ok := err.(*utils.GitLabRepositoryNotFound); ok {\n\t\t\treturn nil, &utils.GitLabRepositoryNotFound{\n\t\t\t\tRepositoryName: repositoryName,\n\t\t\t}\n\t\t}\n\t\t// Catch the error - return the same error with the appropriate details\n\t\tif _, ok := err.(*utils.GitLabDuplicateRepositoriesFound); ok {\n\t\t\treturn nil, &utils.GitLabDuplicateRepositoriesFound{\n\t\t\t\tRepositoryName: repositoryName,\n\t\t\t}\n\t\t}\n\t\t// Some other error\n\t\treturn nil, err\n\t}\n\n\treturn record, nil\n}", "func (g *github) GetRepoName() string { return g.repoName }", "func (o FunctionBuildConfigSourceRepoSourcePtrOutput) BranchName() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v *FunctionBuildConfigSourceRepoSource) *string {\n\t\tif v == nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn v.BranchName\n\t}).(pulumi.StringPtrOutput)\n}", "func (s *BucketService) FindBucketByName(ctx context.Context, orgID influxdb.ID, n string) (*influxdb.Bucket, error) {\n\tspan, ctx := tracing.StartSpanFromContext(ctx)\n\tdefer span.Finish()\n\n\tb, err := s.s.FindBucketByName(ctx, orgID, n)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif err := authorizeReadBucket(ctx, b.OrgID, b.ID); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn b, nil\n}", "func NewBranchProtection(ctx *pulumi.Context,\n\tname string, args *BranchProtectionArgs, opts ...pulumi.ResourceOption) (*BranchProtection, error) {\n\tif args == nil {\n\t\treturn nil, errors.New(\"missing one or more required arguments\")\n\t}\n\n\tif args.Branch == nil {\n\t\treturn nil, errors.New(\"invalid value for required argument 'Branch'\")\n\t}\n\tif args.Project == nil {\n\t\treturn nil, errors.New(\"invalid value for required argument 'Project'\")\n\t}\n\topts = internal.PkgResourceDefaultOpts(opts)\n\tvar resource BranchProtection\n\terr := ctx.RegisterResource(\"gitlab:index/branchProtection:BranchProtection\", name, args, &resource, opts...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &resource, nil\n}", "func (o TriggerBuildSourceRepoSourceOutput) BranchName() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v TriggerBuildSourceRepoSource) *string { return v.BranchName }).(pulumi.StringPtrOutput)\n}", "func findBranch(brs []Branch, name string) *Branch {\n\tfor i, b := range brs {\n\t\tif b.Name == name {\n\t\t\treturn &(brs[i])\n\t\t}\n\t}\n\treturn nil\n}", "func getModuleBranch(moduleName string, proj *model.Project) (string, error) {\n\t// find the module of the patch\n\tfor _, module := range proj.Modules {\n\t\tif module.Name == moduleName {\n\t\t\treturn module.Branch, nil\n\t\t}\n\t}\n\treturn \"\", errors.Errorf(\"module '%s' unknown or not found\", moduleName)\n}", "func (o TriggerBuildSourceRepoSourcePtrOutput) BranchName() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v *TriggerBuildSourceRepoSource) *string {\n\t\tif v == nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn v.BranchName\n\t}).(pulumi.StringPtrOutput)\n}", "func CreateBranchProtection(ctx *context.APIContext) {\n\t// swagger:operation POST /repos/{owner}/{repo}/branch_protections repository repoCreateBranchProtection\n\t// ---\n\t// summary: Create a branch protections for a repository\n\t// consumes:\n\t// - application/json\n\t// produces:\n\t// - application/json\n\t// parameters:\n\t// - name: owner\n\t// in: path\n\t// description: owner of the repo\n\t// type: string\n\t// required: true\n\t// - name: repo\n\t// in: path\n\t// description: name of the repo\n\t// type: string\n\t// required: true\n\t// - name: body\n\t// in: body\n\t// schema:\n\t// \"$ref\": \"#/definitions/CreateBranchProtectionOption\"\n\t// responses:\n\t// \"201\":\n\t// \"$ref\": \"#/responses/BranchProtection\"\n\t// \"403\":\n\t// \"$ref\": \"#/responses/forbidden\"\n\t// \"404\":\n\t// \"$ref\": \"#/responses/notFound\"\n\t// \"422\":\n\t// \"$ref\": \"#/responses/validationError\"\n\n\tform := web.GetForm(ctx).(*api.CreateBranchProtectionOption)\n\trepo := ctx.Repo.Repository\n\n\truleName := form.RuleName\n\tif ruleName == \"\" {\n\t\truleName = form.BranchName //nolint\n\t}\n\tif len(ruleName) == 0 {\n\t\tctx.Error(http.StatusBadRequest, \"both rule_name and branch_name are empty\", \"both rule_name and branch_name are empty\")\n\t\treturn\n\t}\n\n\tisPlainRule := !git_model.IsRuleNameSpecial(ruleName)\n\tvar isBranchExist bool\n\tif isPlainRule {\n\t\tisBranchExist = git.IsBranchExist(ctx.Req.Context(), ctx.Repo.Repository.RepoPath(), ruleName)\n\t}\n\n\tprotectBranch, err := git_model.GetProtectedBranchRuleByName(ctx, repo.ID, ruleName)\n\tif err != nil {\n\t\tctx.Error(http.StatusInternalServerError, \"GetProtectBranchOfRepoByName\", err)\n\t\treturn\n\t} else if protectBranch != nil {\n\t\tctx.Error(http.StatusForbidden, \"Create branch protection\", \"Branch protection already exist\")\n\t\treturn\n\t}\n\n\tvar requiredApprovals int64\n\tif form.RequiredApprovals > 0 {\n\t\trequiredApprovals = form.RequiredApprovals\n\t}\n\n\twhitelistUsers, err := user_model.GetUserIDsByNames(ctx, form.PushWhitelistUsernames, false)\n\tif err != nil {\n\t\tif user_model.IsErrUserNotExist(err) {\n\t\t\tctx.Error(http.StatusUnprocessableEntity, \"User does not exist\", err)\n\t\t\treturn\n\t\t}\n\t\tctx.Error(http.StatusInternalServerError, \"GetUserIDsByNames\", err)\n\t\treturn\n\t}\n\tmergeWhitelistUsers, err := user_model.GetUserIDsByNames(ctx, form.MergeWhitelistUsernames, false)\n\tif err != nil {\n\t\tif user_model.IsErrUserNotExist(err) {\n\t\t\tctx.Error(http.StatusUnprocessableEntity, \"User does not exist\", err)\n\t\t\treturn\n\t\t}\n\t\tctx.Error(http.StatusInternalServerError, \"GetUserIDsByNames\", err)\n\t\treturn\n\t}\n\tapprovalsWhitelistUsers, err := user_model.GetUserIDsByNames(ctx, form.ApprovalsWhitelistUsernames, false)\n\tif err != nil {\n\t\tif user_model.IsErrUserNotExist(err) {\n\t\t\tctx.Error(http.StatusUnprocessableEntity, \"User does not exist\", err)\n\t\t\treturn\n\t\t}\n\t\tctx.Error(http.StatusInternalServerError, \"GetUserIDsByNames\", err)\n\t\treturn\n\t}\n\tvar whitelistTeams, mergeWhitelistTeams, approvalsWhitelistTeams []int64\n\tif repo.Owner.IsOrganization() {\n\t\twhitelistTeams, err = organization.GetTeamIDsByNames(repo.OwnerID, form.PushWhitelistTeams, false)\n\t\tif err != nil {\n\t\t\tif organization.IsErrTeamNotExist(err) {\n\t\t\t\tctx.Error(http.StatusUnprocessableEntity, \"Team does not exist\", err)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tctx.Error(http.StatusInternalServerError, \"GetTeamIDsByNames\", err)\n\t\t\treturn\n\t\t}\n\t\tmergeWhitelistTeams, err = organization.GetTeamIDsByNames(repo.OwnerID, form.MergeWhitelistTeams, false)\n\t\tif err != nil {\n\t\t\tif organization.IsErrTeamNotExist(err) {\n\t\t\t\tctx.Error(http.StatusUnprocessableEntity, \"Team does not exist\", err)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tctx.Error(http.StatusInternalServerError, \"GetTeamIDsByNames\", err)\n\t\t\treturn\n\t\t}\n\t\tapprovalsWhitelistTeams, err = organization.GetTeamIDsByNames(repo.OwnerID, form.ApprovalsWhitelistTeams, false)\n\t\tif err != nil {\n\t\t\tif organization.IsErrTeamNotExist(err) {\n\t\t\t\tctx.Error(http.StatusUnprocessableEntity, \"Team does not exist\", err)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tctx.Error(http.StatusInternalServerError, \"GetTeamIDsByNames\", err)\n\t\t\treturn\n\t\t}\n\t}\n\n\tprotectBranch = &git_model.ProtectedBranch{\n\t\tRepoID: ctx.Repo.Repository.ID,\n\t\tRuleName: ruleName,\n\t\tCanPush: form.EnablePush,\n\t\tEnableWhitelist: form.EnablePush && form.EnablePushWhitelist,\n\t\tEnableMergeWhitelist: form.EnableMergeWhitelist,\n\t\tWhitelistDeployKeys: form.EnablePush && form.EnablePushWhitelist && form.PushWhitelistDeployKeys,\n\t\tEnableStatusCheck: form.EnableStatusCheck,\n\t\tStatusCheckContexts: form.StatusCheckContexts,\n\t\tEnableApprovalsWhitelist: form.EnableApprovalsWhitelist,\n\t\tRequiredApprovals: requiredApprovals,\n\t\tBlockOnRejectedReviews: form.BlockOnRejectedReviews,\n\t\tBlockOnOfficialReviewRequests: form.BlockOnOfficialReviewRequests,\n\t\tDismissStaleApprovals: form.DismissStaleApprovals,\n\t\tRequireSignedCommits: form.RequireSignedCommits,\n\t\tProtectedFilePatterns: form.ProtectedFilePatterns,\n\t\tUnprotectedFilePatterns: form.UnprotectedFilePatterns,\n\t\tBlockOnOutdatedBranch: form.BlockOnOutdatedBranch,\n\t}\n\n\terr = git_model.UpdateProtectBranch(ctx, ctx.Repo.Repository, protectBranch, git_model.WhitelistOptions{\n\t\tUserIDs: whitelistUsers,\n\t\tTeamIDs: whitelistTeams,\n\t\tMergeUserIDs: mergeWhitelistUsers,\n\t\tMergeTeamIDs: mergeWhitelistTeams,\n\t\tApprovalsUserIDs: approvalsWhitelistUsers,\n\t\tApprovalsTeamIDs: approvalsWhitelistTeams,\n\t})\n\tif err != nil {\n\t\tctx.Error(http.StatusInternalServerError, \"UpdateProtectBranch\", err)\n\t\treturn\n\t}\n\n\tif isBranchExist {\n\t\tif err = pull_service.CheckPRsForBaseBranch(ctx, ctx.Repo.Repository, ruleName); err != nil {\n\t\t\tctx.Error(http.StatusInternalServerError, \"CheckPRsForBaseBranch\", err)\n\t\t\treturn\n\t\t}\n\t} else {\n\t\tif !isPlainRule {\n\t\t\tif ctx.Repo.GitRepo == nil {\n\t\t\t\tctx.Repo.GitRepo, err = git.OpenRepository(ctx, ctx.Repo.Repository.RepoPath())\n\t\t\t\tif err != nil {\n\t\t\t\t\tctx.Error(http.StatusInternalServerError, \"OpenRepository\", err)\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\tdefer func() {\n\t\t\t\t\tctx.Repo.GitRepo.Close()\n\t\t\t\t\tctx.Repo.GitRepo = nil\n\t\t\t\t}()\n\t\t\t}\n\t\t\t// FIXME: since we only need to recheck files protected rules, we could improve this\n\t\t\tmatchedBranches, err := git_model.FindAllMatchedBranches(ctx, ctx.Repo.Repository.ID, ruleName)\n\t\t\tif err != nil {\n\t\t\t\tctx.Error(http.StatusInternalServerError, \"FindAllMatchedBranches\", err)\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tfor _, branchName := range matchedBranches {\n\t\t\t\tif err = pull_service.CheckPRsForBaseBranch(ctx, ctx.Repo.Repository, branchName); err != nil {\n\t\t\t\t\tctx.Error(http.StatusInternalServerError, \"CheckPRsForBaseBranch\", err)\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\n\t// Reload from db to get all whitelists\n\tbp, err := git_model.GetProtectedBranchRuleByName(ctx, ctx.Repo.Repository.ID, ruleName)\n\tif err != nil {\n\t\tctx.Error(http.StatusInternalServerError, \"GetProtectedBranchByID\", err)\n\t\treturn\n\t}\n\tif bp == nil || bp.RepoID != ctx.Repo.Repository.ID {\n\t\tctx.Error(http.StatusInternalServerError, \"New branch protection not found\", err)\n\t\treturn\n\t}\n\n\tctx.JSON(http.StatusCreated, convert.ToBranchProtection(bp))\n}", "func (t TestRepo) GetPolicyByName(org string, name string) (*Policy, error) {\n\tt.ArgsIn[GetPolicyByNameMethod][0] = org\n\tt.ArgsIn[GetPolicyByNameMethod][1] = name\n\tif specialFunc, ok := t.SpecialFuncs[GetPolicyByNameMethod].(func(org string, name string) (*Policy, error)); ok && specialFunc != nil {\n\t\treturn specialFunc(org, name)\n\t}\n\tvar policy *Policy\n\tif t.ArgsOut[GetPolicyByNameMethod][0] != nil {\n\t\tpolicy = t.ArgsOut[GetPolicyByNameMethod][0].(*Policy)\n\t}\n\tvar err error\n\tif t.ArgsOut[GetPolicyByNameMethod][1] != nil {\n\t\terr = t.ArgsOut[GetPolicyByNameMethod][1].(error)\n\t}\n\treturn policy, err\n}", "func (branch *Branch) Get(name string) (b *Branch, ok bool) {\n\tb, ok = branch.branches[name]\n\treturn\n}", "func (m *MockRepositoryClient) GetBranchProtection(org, repo, branch string) (*github.BranchProtection, error) {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"GetBranchProtection\", org, repo, branch)\n\tret0, _ := ret[0].(*github.BranchProtection)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}", "func (s FakeStore) GetRepoByName(ctx context.Context, name string) (*Repo, error) {\n\tif s.GetRepoByNameError != nil {\n\t\treturn nil, s.GetRepoByNameError\n\t}\n\n\tfor _, r := range s.repoByID {\n\t\tif r.Name == name {\n\t\t\treturn r, nil\n\t\t}\n\t}\n\n\treturn nil, ErrNoResults\n}", "func GetBranch(ctx *context.APIContext) {\n\t// swagger:operation GET /repos/{owner}/{repo}/branches/{branch} repository repoGetBranch\n\t// ---\n\t// summary: Retrieve a specific branch from a repository, including its effective branch protection\n\t// produces:\n\t// - application/json\n\t// parameters:\n\t// - name: owner\n\t// in: path\n\t// description: owner of the repo\n\t// type: string\n\t// required: true\n\t// - name: repo\n\t// in: path\n\t// description: name of the repo\n\t// type: string\n\t// required: true\n\t// - name: branch\n\t// in: path\n\t// description: branch to get\n\t// type: string\n\t// required: true\n\t// responses:\n\t// \"200\":\n\t// \"$ref\": \"#/responses/Branch\"\n\t// \"404\":\n\t// \"$ref\": \"#/responses/notFound\"\n\n\tbranchName := ctx.Params(\"*\")\n\n\tbranch, err := ctx.Repo.GitRepo.GetBranch(branchName)\n\tif err != nil {\n\t\tif git.IsErrBranchNotExist(err) {\n\t\t\tctx.NotFound(err)\n\t\t} else {\n\t\t\tctx.Error(http.StatusInternalServerError, \"GetBranch\", err)\n\t\t}\n\t\treturn\n\t}\n\n\tc, err := branch.GetCommit()\n\tif err != nil {\n\t\tctx.Error(http.StatusInternalServerError, \"GetCommit\", err)\n\t\treturn\n\t}\n\n\tbranchProtection, err := git_model.GetFirstMatchProtectedBranchRule(ctx, ctx.Repo.Repository.ID, branchName)\n\tif err != nil {\n\t\tctx.Error(http.StatusInternalServerError, \"GetBranchProtection\", err)\n\t\treturn\n\t}\n\n\tbr, err := convert.ToBranch(ctx, ctx.Repo.Repository, branch.Name, c, branchProtection, ctx.Doer, ctx.Repo.IsAdmin())\n\tif err != nil {\n\t\tctx.Error(http.StatusInternalServerError, \"convert.ToBranch\", err)\n\t\treturn\n\t}\n\n\tctx.JSON(http.StatusOK, br)\n}", "func (p *PipelineActivity) BranchName() string {\n\tpipelineName := p.Spec.Pipeline\n\tif pipelineName == \"\" {\n\t\treturn \"\"\n\t}\n\tpaths := strings.Split(pipelineName, \"/\")\n\tbranch := paths[len(paths)-1]\n\tp.Spec.GitBranch = branch\n\treturn branch\n}", "func (m *Manager) GetByName(ctx context.Context, name string) (*hub.ChartRepository, error) {\n\tvar r *hub.ChartRepository\n\terr := m.dbQueryUnmarshal(ctx, &r, \"select get_chart_repository_by_name($1::text)\", name)\n\treturn r, err\n}", "func GetGitBranch(dir string) (string, error) {\n\treturn runGit(dir, \"rev-parse\", \"--abbrev-ref\", \"HEAD\")\n}", "func (m *MockClient) GetBranchProtection(org, repo, branch string) (*github.BranchProtection, error) {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"GetBranchProtection\", org, repo, branch)\n\tret0, _ := ret[0].(*github.BranchProtection)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}", "func (p *GitProvider) GetName() string { return \"git\" }", "func GetRepository(name string) *Repository {\n\tfor i := range repos {\n\t\tif repos[i].Name == name {\n\t\t\treturn &repos[i]\n\t\t}\n\t}\n\tlog.Fatalln(\"Can't not find repository.\")\n\treturn nil\n}", "func (c *client) GetRepo(org, name string) (*library.Repo, error) {\n\tlogrus.Tracef(\"getting repo %s/%s from the database\", org, name)\n\n\t// variable to store query results\n\tr := new(database.Repo)\n\n\t// send query to the database and store result in variable\n\tresult := c.Postgres.\n\t\tTable(constants.TableRepo).\n\t\tRaw(dml.SelectRepo, org, name).\n\t\tScan(r)\n\n\t// check if the query returned a record not found error or no rows were returned\n\tif errors.Is(result.Error, gorm.ErrRecordNotFound) || result.RowsAffected == 0 {\n\t\treturn nil, gorm.ErrRecordNotFound\n\t}\n\n\t// decrypt the fields for the repo\n\t//\n\t// https://pkg.go.dev/github.com/go-vela/types/database#Repo.Decrypt\n\terr := r.Decrypt(c.config.EncryptionKey)\n\tif err != nil {\n\t\t// ensures that the change is backwards compatible\n\t\t// by logging the error instead of returning it\n\t\t// which allows us to fetch unencrypted repos\n\t\tlogrus.Errorf(\"unable to decrypt repo %s/%s: %v\", org, name, err)\n\n\t\t// return the unencrypted repo\n\t\treturn r.ToLibrary(), result.Error\n\t}\n\n\t// return the decrypted repo\n\treturn r.ToLibrary(), result.Error\n}", "func (r RepositoryName) Repo() (string, error) {\n\tss := strings.Split(string(r), \"/\")\n\tif len(ss) != 2 {\n\t\treturn \"\", fmt.Errorf(\"Invalid repository name: %s \", r)\n\t}\n\treturn ss[1], nil\n}", "func gitBranchName() string {\n\t// branch name variable set by Github Actions\n\tif branch, isset := os.LookupEnv(\"GITHUB_HEAD_REF\"); isset && branch != \"\" {\n\t\treturn \"origin/\" + branch\n\t}\n\tif branch, isset := os.LookupEnv(\"GITHUB_REF\"); isset && branch != \"\" {\n\t\treturn \"origin/\" + strings.TrimPrefix(branch, \"refs/heads/\")\n\t}\n\tbranch := getCmdOutput(\"git rev-parse --abbrev-ref HEAD\")\n\treturn branch\n}", "func GetByName(name string) (*Wireguard, error) {\n\tlink, err := netlink.LinkByName(name)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif link.Type() != \"wireguard\" {\n\t\treturn nil, fmt.Errorf(\"link %s is not of type wireguard\", name)\n\t}\n\twg := &Wireguard{\n\t\tattrs: link.Attrs(),\n\t}\n\treturn wg, nil\n}", "func GetGitRepositoryDefaultBranch(url string) (string, error) {\n\terr := C.git_libgit2_init()\n\tif err < 0 {\n\t\treturn \"\", errors.New(\"failed to initialize libgit2\")\n\t}\n\tvar odb *C.git_odb\n\terr = C.git_odb_new(&odb)\n\tif err != 0 {\n\t\treturn \"\", errors.New(\"failed to create git_odb\")\n\t}\n\tvar repo *C.git_repository\n\terr = C.git_repository_wrap_odb(&repo, odb)\n\tif err != 0 {\n\t\treturn \"\", errors.New(\"failed to wrap odb into repository\")\n\t}\n\tvar remote *C.git_remote\n\terr = C.git_remote_create_anonymous(&remote, repo, C.CString(url))\n\tif err != 0 {\n\t\treturn \"\", errors.New(\"failed to create anonymous remote\")\n\t}\n\terr = C.git_remote_connect(remote, C.GIT_DIRECTION_FETCH, nil, nil, nil)\n\tif err != 0 {\n\t\treturn \"\", errors.New(\"failed to connect to remote (fetch)\")\n\t}\n\tvar remote_heads **C.git_remote_head\n\tvar remote_heads_size C.ulong\n\terr = C.git_remote_ls(&remote_heads, &remote_heads_size, remote)\n\tif err != 0 {\n\t\treturn \"\", errors.New(\"failed to list remote heads\")\n\t}\n\tvar remote_heads2 []*C.git_remote_head\n\tsh := (*reflect.SliceHeader)(unsafe.Pointer(&remote_heads2))\n\tsh.Data = uintptr(unsafe.Pointer(remote_heads))\n\tsh.Len = int(remote_heads_size)\n\tsh.Cap = int(remote_heads_size)\n\tfound := \"\"\n\tfor _, remote_head := range remote_heads2 {\n\t\tif remote_head.symref_target != nil {\n\t\t\t// s := C.GoString(C.git_oid_tostr_s(&remote_head.oid))\n\t\t\th := C.GoString(remote_head.name)\n\t\t\tif h != \"HEAD\" {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tsr := C.GoString(remote_head.symref_target)\n\t\t\tsr = strings.TrimPrefix(sr, \"refs/heads/\")\n\t\t\tlog.Printf(\"[%s] Found default branch name = %s\\n\", h, sr)\n\t\t\tfound = sr\n\t\t}\n\t}\n\tC.git_remote_free(remote)\n\tC.git_repository_free(repo)\n\tC.git_odb_free(odb)\n\tC.git_libgit2_shutdown()\n\n\treturn found, nil\n}", "func Get(name string) *Branch {\n\treturn novis.Get(name)\n}", "func GetBranch(repo *models.Repository, branch string) (*git.Branch, error) {\n\tif len(branch) == 0 {\n\t\treturn nil, fmt.Errorf(\"GetBranch: empty string for branch\")\n\t}\n\tgitRepo, err := git.OpenRepository(repo.RepoPath())\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer gitRepo.Close()\n\n\treturn gitRepo.GetBranch(branch)\n}", "func requestedRepository(repoName string) (repository.Repository, error) {\n\t/*\t_, repoName, err := parseGitCommand(sshcmd)\n\t\tif err != nil {\n\t\t\treturn repository.Repository{}, err\n\t\t}*/\n\tvar repo repository.Repository\n\tconn, err := db.Conn()\n\tif err != nil {\n\t\treturn repository.Repository{}, err\n\t}\n\tdefer conn.Close()\n\tif err := conn.Repository().Find(bson.M{\"_id\": repoName}).One(&repo); err != nil {\n\t\treturn repository.Repository{}, errors.New(\"Repository not found\")\n\t}\n\treturn repo, nil\n}", "func (g *GitUtil) GetBranch() (string, error) {\n\tref, err := g.Repository.Head()\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tif !ref.Name().IsBranch() {\n\t\tbranches, err := g.Repository.Branches()\n\t\tif err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\n\t\tvar currentBranch string\n\t\tfound := branches.ForEach(func(p *plumbing.Reference) error {\n\n\t\t\tif p.Name().IsBranch() && p.Name().Short() != \"origin\" {\n\t\t\t\tcurrentBranch = p.Name().Short()\n\t\t\t\treturn fmt.Errorf(\"break\")\n\t\t\t}\n\t\t\treturn nil\n\t\t})\n\n\t\tif found != nil {\n\t\t\tlog.Debugf(\"Found branch from HEAD %s\", currentBranch)\n\t\t\treturn currentBranch, nil\n\t\t}\n\n\t\treturn \"\", fmt.Errorf(\"no branch found, found %s, please checkout a branch (git checkout -b <BRANCH>)\", ref.Name().String())\n\t}\n\tlog.Debugf(\"Found branch %s\", ref.Name().Short())\n\treturn ref.Name().Short(), nil\n}", "func (api *APIClient) GetBlockByRepoName(repoPieces RepoPieces) (Block, error) {\n\tu, err := url.Parse(fmt.Sprintf(\"%s/api/v1/blocks\", api.baseURL))\n\tif err != nil {\n\t\treturn Block{}, errors.New(\"unable to parse Learn remote\")\n\t}\n\tv := url.Values{}\n\tv.Set(\"repo_name\", repoPieces.RepoName)\n\tv.Set(\"org\", repoPieces.Org)\n\tv.Set(\"origin\", repoPieces.Origin)\n\tu.RawQuery = v.Encode()\n\n\treq, err := http.NewRequest(\"GET\", u.String(), nil)\n\tif err != nil {\n\t\treturn Block{}, err\n\t}\n\n\treq.Header.Set(\"Content-Type\", \"application/json\")\n\treq.Header.Set(\"Source\", \"gLearn_cli\")\n\treq.Header.Set(\"Authorization\", fmt.Sprintf(\"Bearer %s\", api.Credentials.token))\n\n\tres, err := api.client.Do(req)\n\tif err != nil {\n\t\treturn Block{}, err\n\t}\n\tdefer res.Body.Close()\n\n\tif res.StatusCode != http.StatusOK {\n\t\treturn Block{}, fmt.Errorf(\"Error: response status: %d\", res.StatusCode)\n\t}\n\n\tvar blockResp blockResponse\n\terr = json.NewDecoder(res.Body).Decode(&blockResp)\n\tif err != nil {\n\t\treturn Block{}, err\n\t}\n\n\tif len(blockResp.Blocks) == 1 {\n\t\treturn blockResp.Blocks[0], nil\n\t}\n\treturn Block{}, nil\n}", "func (r *Repository) Name() string {\n\tre := regexp.MustCompile(\"/([^/]*)\\\\.git$\")\n\tmatch := re.FindStringSubmatch(r.Git)\n\tif len(match) > 0 {\n\t\treturn match[1]\n\t}\n\n\treturn \"\"\n}", "func GetPolicyBranch() string {\n\treturn Global.Policy.Branch\n}", "func (novis *Novis) Get(name string) *Branch {\n\tvar branch *Branch\n\troute := strings.Split(name, \".\")\n\tlast := route[len(route)-1]\n\tfor branch = range novis.traverse(name) {\n\t\tif branch.name == last {\n\t\t\tbreak\n\t\t}\n\t}\n\treturn branch\n}", "func getGithubUser(distroName string) (string, error) {\n\n\tswitch distroName {\n\tcase \"doom-emacs\":\n\t\treturn \"hlissner\", nil\n\tcase \"emacs-live\":\n\t\treturn \"overtone\", nil\n\tcase \"prelude\":\n\t\treturn \"bbatsov\", nil\n\tcase \"spacemacs\":\n\t\treturn \"syl20bnr\", nil\n\tcase \"ohai-emacs\":\n\t\treturn \"bodil\", nil\n\t}\n\n\treturn \"\", errors.New(\"Could not find Github userName for repository '\" + distroName + \"'\")\n}", "func GetPwByName(username string) (*PassWd, error) {\n\tlog.WithField(\"username\", username).Traceln(\"--> pw.GetPwByName\")\n\treturn convertToPasswd(C.getpwnam(C.CString(username)))\n}", "func (r *SettingRepository) GetBranchByID(id string) (branch models.Branch, err error) {\n\terr = r.C.Find(bson.ObjectIdHex(id)).One(&branch)\n\treturn\n}", "func (b *BitBucketClient) GetBranch(workspace string, repositorySlug string, branchName string) (dto.BitBucketResponseBranchCreate, error) {\n\tlog.Logger().StartMessage(\"Get branch\")\n\tif err := b.beforeRequest(); err != nil {\n\t\tlog.Logger().FinishMessage(\"Get branch\")\n\t\treturn dto.BitBucketResponseBranchCreate{}, err\n\t}\n\n\tb.client.SetBaseURL(DefaultBitBucketBaseAPIUrl)\n\n\tendpoint := fmt.Sprintf(\"/repositories/%s/%s/refs/branches/%s\", workspace, repositorySlug, branchName)\n\tresponse, statusCode, err := b.client.Get(endpoint, map[string]string{})\n\tif err != nil {\n\t\tlog.Logger().FinishMessage(\"Get branch\")\n\t\treturn dto.BitBucketResponseBranchCreate{}, err\n\t}\n\n\tif statusCode == http.StatusNotFound {\n\t\tlog.Logger().FinishMessage(\"Get branch\")\n\t\treturn dto.BitBucketResponseBranchCreate{}, errors.New(\"this branch doesn't exist. \")\n\t}\n\n\tif statusCode == http.StatusForbidden {\n\t\tlog.Logger().FinishMessage(\"Get branch\")\n\t\treturn dto.BitBucketResponseBranchCreate{}, errors.New(\"action is not permitted. \")\n\t}\n\n\tresponseObject := dto.BitBucketResponseBranchCreate{}\n\terr = json.Unmarshal(response, &responseObject)\n\tif err != nil {\n\t\tlog.Logger().AddError(err).Msg(\"Error during response unmarshal\")\n\t\tlog.Logger().FinishMessage(\"Get branch\")\n\t\treturn dto.BitBucketResponseBranchCreate{}, err\n\t}\n\n\tlog.Logger().FinishMessage(\"Get branch\")\n\treturn responseObject, nil\n}", "func (g *GitLocal) Branch(dir string) (string, error) {\n\treturn g.GitCLI.Branch(dir)\n}", "func branchName() (string, string) {\n\tbranch := gitBranchName()\n\treleaseName := strings.TrimPrefix(branch, \"origin/\")\n\n\treturn releaseName, branch\n}", "func NewRepoBranch(dir, newBranch string) (bool, error){\n\theadsDir := fmt.Sprintf(\"%s/refs/heads\", dir)\n\theads, _ := ioutil.ReadDir(headsDir)\n\tfor _, head := range heads{\n\t\tif newBranch == head.Name() {\n\t\t\treturn false, fmt.Errorf(\"error while create new branch, %s already exist\", newBranch)\n\t\t}\n\t}\n\t_,bufErr,err := com.ExecCmdDirBytes(headsDir, \"cp\",\"master\", newBranch)\n\tif err!=nil || len(bufErr) > 0 {\n\t\treturn false, err\n\t}\n\treturn true, nil\n\n}", "func (r *Repo) GetBranchIfTracked(refName string) *Branch {\n\tbranchName := strings.TrimPrefix(refName, \"origin/\")\n\tfor name, branch := range r.Branches {\n\t\tif name == branchName {\n\t\t\treturn branch\n\t\t}\n\t}\n\treturn nil\n}", "func (cr *chartRepo) GetRepoName() string {\n\treturn cr.Name\n}", "func (c *Client) GetRepoBranch(user, repo, branch string) (*Branch, error) {\n\tb := new(Branch)\n\treturn b, c.getParsedResponse(\"GET\", fmt.Sprintf(\"/repos/%s/%s/branches/%s\", user, repo, branch), nil, nil, &b)\n}", "func (v *VersionHistory) GetBranchToken() []byte {\n\ttoken := make([]byte, len(v.BranchToken))\n\tcopy(token, v.BranchToken)\n\treturn token\n}", "func ByBranch(name string) FilterFunc {\n\treturn func(c *Collection) bool {\n\t\tbranch, err := c.Repository.LookupBranch(name, git2go.BranchRemote)\n\t\tif err != nil {\n\t\t\tlog.Warningf(\"Error finding branch %s %v\", name, err)\n\t\t\treturn false\n\t\t}\n\t\tc.Ref = branch.Reference\n\t\treturn true\n\t}\n}", "func getAccountByName(db *sqlite.Driver, name string) (*Account, error) {\n\tvar err error\n\tvar newAccount Account\n\n\tvar stmt = fmt.Sprintf(\"select %s from %s where name = ?\", allColumns, tableName)\n\tif err = db.QueryRow(stmt, name).Scan(\n\t\t&newAccount.ID,\n\t\t&newAccount.Name,\n\t\t&newAccount.Credential,\n\t\t&newAccount.PermLevel); err != nil {\n\t\tif err == sql.ErrNoRows {\n\t\t\treturn nil, FindAccountError(name)\n\t\t}\n\t\treturn nil, SQLExecutionError(err)\n\t}\n\n\treturn &newAccount, nil\n}", "func EditBranchProtection(ctx *context.APIContext) {\n\t// swagger:operation PATCH /repos/{owner}/{repo}/branch_protections/{name} repository repoEditBranchProtection\n\t// ---\n\t// summary: Edit a branch protections for a repository. Only fields that are set will be changed\n\t// consumes:\n\t// - application/json\n\t// produces:\n\t// - application/json\n\t// parameters:\n\t// - name: owner\n\t// in: path\n\t// description: owner of the repo\n\t// type: string\n\t// required: true\n\t// - name: repo\n\t// in: path\n\t// description: name of the repo\n\t// type: string\n\t// required: true\n\t// - name: name\n\t// in: path\n\t// description: name of protected branch\n\t// type: string\n\t// required: true\n\t// - name: body\n\t// in: body\n\t// schema:\n\t// \"$ref\": \"#/definitions/EditBranchProtectionOption\"\n\t// responses:\n\t// \"200\":\n\t// \"$ref\": \"#/responses/BranchProtection\"\n\t// \"404\":\n\t// \"$ref\": \"#/responses/notFound\"\n\t// \"422\":\n\t// \"$ref\": \"#/responses/validationError\"\n\tform := web.GetForm(ctx).(*api.EditBranchProtectionOption)\n\trepo := ctx.Repo.Repository\n\tbpName := ctx.Params(\":name\")\n\tprotectBranch, err := git_model.GetProtectedBranchRuleByName(ctx, repo.ID, bpName)\n\tif err != nil {\n\t\tctx.Error(http.StatusInternalServerError, \"GetProtectedBranchByID\", err)\n\t\treturn\n\t}\n\tif protectBranch == nil || protectBranch.RepoID != repo.ID {\n\t\tctx.NotFound()\n\t\treturn\n\t}\n\n\tif form.EnablePush != nil {\n\t\tif !*form.EnablePush {\n\t\t\tprotectBranch.CanPush = false\n\t\t\tprotectBranch.EnableWhitelist = false\n\t\t\tprotectBranch.WhitelistDeployKeys = false\n\t\t} else {\n\t\t\tprotectBranch.CanPush = true\n\t\t\tif form.EnablePushWhitelist != nil {\n\t\t\t\tif !*form.EnablePushWhitelist {\n\t\t\t\t\tprotectBranch.EnableWhitelist = false\n\t\t\t\t\tprotectBranch.WhitelistDeployKeys = false\n\t\t\t\t} else {\n\t\t\t\t\tprotectBranch.EnableWhitelist = true\n\t\t\t\t\tif form.PushWhitelistDeployKeys != nil {\n\t\t\t\t\t\tprotectBranch.WhitelistDeployKeys = *form.PushWhitelistDeployKeys\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\n\tif form.EnableMergeWhitelist != nil {\n\t\tprotectBranch.EnableMergeWhitelist = *form.EnableMergeWhitelist\n\t}\n\n\tif form.EnableStatusCheck != nil {\n\t\tprotectBranch.EnableStatusCheck = *form.EnableStatusCheck\n\t}\n\n\tif form.StatusCheckContexts != nil {\n\t\tprotectBranch.StatusCheckContexts = form.StatusCheckContexts\n\t}\n\n\tif form.RequiredApprovals != nil && *form.RequiredApprovals >= 0 {\n\t\tprotectBranch.RequiredApprovals = *form.RequiredApprovals\n\t}\n\n\tif form.EnableApprovalsWhitelist != nil {\n\t\tprotectBranch.EnableApprovalsWhitelist = *form.EnableApprovalsWhitelist\n\t}\n\n\tif form.BlockOnRejectedReviews != nil {\n\t\tprotectBranch.BlockOnRejectedReviews = *form.BlockOnRejectedReviews\n\t}\n\n\tif form.BlockOnOfficialReviewRequests != nil {\n\t\tprotectBranch.BlockOnOfficialReviewRequests = *form.BlockOnOfficialReviewRequests\n\t}\n\n\tif form.DismissStaleApprovals != nil {\n\t\tprotectBranch.DismissStaleApprovals = *form.DismissStaleApprovals\n\t}\n\n\tif form.RequireSignedCommits != nil {\n\t\tprotectBranch.RequireSignedCommits = *form.RequireSignedCommits\n\t}\n\n\tif form.ProtectedFilePatterns != nil {\n\t\tprotectBranch.ProtectedFilePatterns = *form.ProtectedFilePatterns\n\t}\n\n\tif form.UnprotectedFilePatterns != nil {\n\t\tprotectBranch.UnprotectedFilePatterns = *form.UnprotectedFilePatterns\n\t}\n\n\tif form.BlockOnOutdatedBranch != nil {\n\t\tprotectBranch.BlockOnOutdatedBranch = *form.BlockOnOutdatedBranch\n\t}\n\n\tvar whitelistUsers []int64\n\tif form.PushWhitelistUsernames != nil {\n\t\twhitelistUsers, err = user_model.GetUserIDsByNames(ctx, form.PushWhitelistUsernames, false)\n\t\tif err != nil {\n\t\t\tif user_model.IsErrUserNotExist(err) {\n\t\t\t\tctx.Error(http.StatusUnprocessableEntity, \"User does not exist\", err)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tctx.Error(http.StatusInternalServerError, \"GetUserIDsByNames\", err)\n\t\t\treturn\n\t\t}\n\t} else {\n\t\twhitelistUsers = protectBranch.WhitelistUserIDs\n\t}\n\tvar mergeWhitelistUsers []int64\n\tif form.MergeWhitelistUsernames != nil {\n\t\tmergeWhitelistUsers, err = user_model.GetUserIDsByNames(ctx, form.MergeWhitelistUsernames, false)\n\t\tif err != nil {\n\t\t\tif user_model.IsErrUserNotExist(err) {\n\t\t\t\tctx.Error(http.StatusUnprocessableEntity, \"User does not exist\", err)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tctx.Error(http.StatusInternalServerError, \"GetUserIDsByNames\", err)\n\t\t\treturn\n\t\t}\n\t} else {\n\t\tmergeWhitelistUsers = protectBranch.MergeWhitelistUserIDs\n\t}\n\tvar approvalsWhitelistUsers []int64\n\tif form.ApprovalsWhitelistUsernames != nil {\n\t\tapprovalsWhitelistUsers, err = user_model.GetUserIDsByNames(ctx, form.ApprovalsWhitelistUsernames, false)\n\t\tif err != nil {\n\t\t\tif user_model.IsErrUserNotExist(err) {\n\t\t\t\tctx.Error(http.StatusUnprocessableEntity, \"User does not exist\", err)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tctx.Error(http.StatusInternalServerError, \"GetUserIDsByNames\", err)\n\t\t\treturn\n\t\t}\n\t} else {\n\t\tapprovalsWhitelistUsers = protectBranch.ApprovalsWhitelistUserIDs\n\t}\n\n\tvar whitelistTeams, mergeWhitelistTeams, approvalsWhitelistTeams []int64\n\tif repo.Owner.IsOrganization() {\n\t\tif form.PushWhitelistTeams != nil {\n\t\t\twhitelistTeams, err = organization.GetTeamIDsByNames(repo.OwnerID, form.PushWhitelistTeams, false)\n\t\t\tif err != nil {\n\t\t\t\tif organization.IsErrTeamNotExist(err) {\n\t\t\t\t\tctx.Error(http.StatusUnprocessableEntity, \"Team does not exist\", err)\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\tctx.Error(http.StatusInternalServerError, \"GetTeamIDsByNames\", err)\n\t\t\t\treturn\n\t\t\t}\n\t\t} else {\n\t\t\twhitelistTeams = protectBranch.WhitelistTeamIDs\n\t\t}\n\t\tif form.MergeWhitelistTeams != nil {\n\t\t\tmergeWhitelistTeams, err = organization.GetTeamIDsByNames(repo.OwnerID, form.MergeWhitelistTeams, false)\n\t\t\tif err != nil {\n\t\t\t\tif organization.IsErrTeamNotExist(err) {\n\t\t\t\t\tctx.Error(http.StatusUnprocessableEntity, \"Team does not exist\", err)\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\tctx.Error(http.StatusInternalServerError, \"GetTeamIDsByNames\", err)\n\t\t\t\treturn\n\t\t\t}\n\t\t} else {\n\t\t\tmergeWhitelistTeams = protectBranch.MergeWhitelistTeamIDs\n\t\t}\n\t\tif form.ApprovalsWhitelistTeams != nil {\n\t\t\tapprovalsWhitelistTeams, err = organization.GetTeamIDsByNames(repo.OwnerID, form.ApprovalsWhitelistTeams, false)\n\t\t\tif err != nil {\n\t\t\t\tif organization.IsErrTeamNotExist(err) {\n\t\t\t\t\tctx.Error(http.StatusUnprocessableEntity, \"Team does not exist\", err)\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\tctx.Error(http.StatusInternalServerError, \"GetTeamIDsByNames\", err)\n\t\t\t\treturn\n\t\t\t}\n\t\t} else {\n\t\t\tapprovalsWhitelistTeams = protectBranch.ApprovalsWhitelistTeamIDs\n\t\t}\n\t}\n\n\terr = git_model.UpdateProtectBranch(ctx, ctx.Repo.Repository, protectBranch, git_model.WhitelistOptions{\n\t\tUserIDs: whitelistUsers,\n\t\tTeamIDs: whitelistTeams,\n\t\tMergeUserIDs: mergeWhitelistUsers,\n\t\tMergeTeamIDs: mergeWhitelistTeams,\n\t\tApprovalsUserIDs: approvalsWhitelistUsers,\n\t\tApprovalsTeamIDs: approvalsWhitelistTeams,\n\t})\n\tif err != nil {\n\t\tctx.Error(http.StatusInternalServerError, \"UpdateProtectBranch\", err)\n\t\treturn\n\t}\n\n\tisPlainRule := !git_model.IsRuleNameSpecial(bpName)\n\tvar isBranchExist bool\n\tif isPlainRule {\n\t\tisBranchExist = git.IsBranchExist(ctx.Req.Context(), ctx.Repo.Repository.RepoPath(), bpName)\n\t}\n\n\tif isBranchExist {\n\t\tif err = pull_service.CheckPRsForBaseBranch(ctx, ctx.Repo.Repository, bpName); err != nil {\n\t\t\tctx.Error(http.StatusInternalServerError, \"CheckPrsForBaseBranch\", err)\n\t\t\treturn\n\t\t}\n\t} else {\n\t\tif !isPlainRule {\n\t\t\tif ctx.Repo.GitRepo == nil {\n\t\t\t\tctx.Repo.GitRepo, err = git.OpenRepository(ctx, ctx.Repo.Repository.RepoPath())\n\t\t\t\tif err != nil {\n\t\t\t\t\tctx.Error(http.StatusInternalServerError, \"OpenRepository\", err)\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\tdefer func() {\n\t\t\t\t\tctx.Repo.GitRepo.Close()\n\t\t\t\t\tctx.Repo.GitRepo = nil\n\t\t\t\t}()\n\t\t\t}\n\n\t\t\t// FIXME: since we only need to recheck files protected rules, we could improve this\n\t\t\tmatchedBranches, err := git_model.FindAllMatchedBranches(ctx, ctx.Repo.Repository.ID, protectBranch.RuleName)\n\t\t\tif err != nil {\n\t\t\t\tctx.Error(http.StatusInternalServerError, \"FindAllMatchedBranches\", err)\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tfor _, branchName := range matchedBranches {\n\t\t\t\tif err = pull_service.CheckPRsForBaseBranch(ctx, ctx.Repo.Repository, branchName); err != nil {\n\t\t\t\t\tctx.Error(http.StatusInternalServerError, \"CheckPrsForBaseBranch\", err)\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\n\t// Reload from db to ensure get all whitelists\n\tbp, err := git_model.GetProtectedBranchRuleByName(ctx, repo.ID, bpName)\n\tif err != nil {\n\t\tctx.Error(http.StatusInternalServerError, \"GetProtectedBranchBy\", err)\n\t\treturn\n\t}\n\tif bp == nil || bp.RepoID != ctx.Repo.Repository.ID {\n\t\tctx.Error(http.StatusInternalServerError, \"New branch protection not found\", err)\n\t\treturn\n\t}\n\n\tctx.JSON(http.StatusOK, convert.ToBranchProtection(bp))\n}", "func (ggc *Client) checkoutBasedOnBranchParam(targetBranch string) (*git.Repository, error) {\n\tlogs.Info(\"git clone %s %s\", ggc.URL, ggc.Workspace)\n\trepo, err := git.PlainClone(ggc.Workspace, false, &git.CloneOptions{\n\t\tAuth: &http.BasicAuth{\n\t\t\tUsername: ggc.User,\n\t\t\tPassword: ggc.Token,\n\t\t},\n\t\tURL: ggc.URL,\n\t\tReferenceName: plumbing.NewBranchReferenceName(targetBranch),\n\t})\n\tif err != nil {\n\t\tlogs.Error(\"\\x1b[31;1m%s\\x1b[0m\\n\", fmt.Sprintf(\"error: %s\", err))\n\t\treturn nil, fmt.Errorf(\"check branch: %v\", err)\n\t}\n\treturn repo, nil\n}", "func (s *store) RepoName(ctx context.Context, repositoryID int) (_ string, err error) {\n\tctx, _, endObservation := s.operations.repoName.With(ctx, &err, observation.Args{LogFields: []log.Field{\n\t\tlog.Int(\"repositoryID\", repositoryID),\n\t}})\n\tdefer endObservation(1, observation.Args{})\n\n\tname, exists, err := basestore.ScanFirstString(s.db.Query(ctx, sqlf.Sprintf(repoNameQuery, repositoryID)))\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tif !exists {\n\t\treturn \"\", ErrUnknownRepository\n\t}\n\treturn name, nil\n}", "func (s *InMemoryRepo) GetBucket(_ context.Context, name string) (Bucket, error) {\n\ts.mu.RLock()\n\tbucket, ok := s.buckets[name]\n\ts.mu.RUnlock()\n\n\tif !ok {\n\t\tbucket.Name = name\n\t}\n\n\treturn bucket, nil\n}", "func (g *GitLocal) RepoName(org, repoName string) string {\n\treturn g.GitCLI.RepoName(org, repoName)\n}", "func NewBranch(repoName string, branchName string) *pfs.Branch {\n\treturn &pfs.Branch{\n\t\tRepo: NewRepo(repoName),\n\t\tName: branchName,\n\t}\n}", "func getRepo(repos []config.Repository, repoName string) (config.Repository, bool) {\n\tfor _, repo := range repos {\n\t\tif repo.Name == repoName {\n\t\t\treturn repo, true\n\t\t}\n\t}\n\treturn config.Repository{}, false\n}", "func (o TriggerTriggerTemplatePtrOutput) BranchName() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v *TriggerTriggerTemplate) *string {\n\t\tif v == nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn v.BranchName\n\t}).(pulumi.StringPtrOutput)\n}", "func (o TriggerTriggerTemplateOutput) BranchName() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v TriggerTriggerTemplate) *string { return v.BranchName }).(pulumi.StringPtrOutput)\n}", "func (h *Handler) GetByName(name string) (*corev1.Secret, error) {\n\treturn h.clientset.CoreV1().Secrets(h.namespace).Get(h.ctx, name, h.Options.GetOptions)\n}", "func getRepoDomain(name string) string {\n\trepoPath, _ := GetSpecificRepoPath(name)\n\trepo, _ := gitutil.GetRepoFromLocalDir(*repoPath)\n\trepoURL, _ := gitutil.GetURLForRepo(*repo)\n\n\treturn strings.Split(*repoURL, \"/\")[2]\n}", "func (self *Repository) Branch(path string) error { return nil }", "func (v *MatchingPollForDecisionTaskResponse) GetBranchToken() (o []byte) {\n\tif v != nil && v.BranchToken != nil {\n\t\treturn v.BranchToken\n\t}\n\treturn\n}", "func (ref RefName) BranchName() string {\n\treturn ref.nameWithoutPrefix(BranchPrefix)\n}", "func matchBitbucketRepo(root string) (RemoteRepo, error) {\n\tif strings.HasSuffix(root, \".git\") {\n\t\treturn nil, errors.New(\"path must not include .git suffix\")\n\t}\n\treturn &bitbucketRepo{baseRepo{root: root}}, nil\n}", "func GetTrackingBranchName(branchName string) string {\n\treturn \"origin/\" + branchName\n}", "func (o GetChartRepositoriesRepositoryOutput) RepoName() pulumi.StringOutput {\n\treturn o.ApplyT(func(v GetChartRepositoriesRepository) string { return v.RepoName }).(pulumi.StringOutput)\n}", "func GetBranch() string {\n\tv := Map[\"branch\"]\n\treturn v\n}", "func GitBranch(dir string) (branch string, err error) {\n\tcmd := Cmd(dir, \"git rev-parse --abbrev-ref HEAD\")\n\tout, err := cmd.Output()\n\tif err != nil {\n\t\treturn\n\t}\n\tbranch = strings.TrimSpace(string(out))\n\tif branch == \"HEAD\" {\n\t\terr = ErrGitDetached\n\t}\n\treturn\n}", "func (m *MockRepositoryInterface) GitHubGetRepositoryByName(ctx context.Context, repositoryName string) (*models.GithubRepository, error) {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"GitHubGetRepositoryByName\", ctx, repositoryName)\n\tret0, _ := ret[0].(*models.GithubRepository)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}", "func (g GithubClient) GetBranch(ctx context.Context, owner, repo, branch string) (*github.Branch, *github.Response, error) {\n\treturn g.client.Repositories.GetBranch(ctx, owner, repo, branch)\n}", "func (p *PluginClient) GitBranch(meta Meta, secret corev1.Secret) ClientGitBranch {\n\treturn newGitBranch(p, meta, secret)\n}", "func (repo *GitRepository) GetName() string {\n\treturn repo.Name\n}", "func IsBranchOfRepoRequirePullRequest(repoID int64, name string) bool {\n\tprotectBranch, err := GetProtectBranchOfRepoByName(repoID, name)\n\tif err != nil {\n\t\treturn false\n\t}\n\treturn protectBranch.Protected && protectBranch.RequirePullRequest\n}", "func GetMatchingBranch(input string) (string, error) {\n\tvar foundBranches []string\n\n\tloweredInput := strings.ToLower(input)\n\tfor _, branch := range GetBranches() {\n\t\tloweredBranch := strings.ToLower(branch)\n\t\tif loweredBranch == loweredInput {\n\t\t\treturn input, nil\n\t\t} else if strings.Contains(loweredBranch, loweredInput) {\n\t\t\tfoundBranches = append(foundBranches, branch)\n\t\t}\n\t}\n\n\tif len(foundBranches) > 1 {\n\t\treturn \"\", fmt.Errorf(\"multiple branches found: %s\", strings.Join(foundBranches, \", \"))\n\t} else if len(foundBranches) == 1 {\n\t\treturn foundBranches[0], nil\n\t}\n\n\tlog.Errorf(\"Branch not found: %s. We have %d known branches\", input, len(branches))\n\n\t// branch not found in local list, but maybe it was created recently -> let's try it if jenkins accept it\n\treturn input, nil\n}", "func getRepoName(dir string) (string, error) {\n\tr, err := git.PlainOpen(dir)\n\tif err != nil {\n\t\treturn \"\", fmt.Errorf(\"error opening git dir %s: %w\", dir, err)\n\t}\n\trm, err := r.Remote(defaultRemote)\n\tif err != nil {\n\t\treturn \"\", fmt.Errorf(\"error finding remote %s in git dir %s: %w\", defaultRemote, dir, err)\n\t}\n\n\t// validate remote URL\n\tremoteURL, err := url.Parse(rm.Config().URLs[0])\n\tif err != nil {\n\t\treturn \"\", fmt.Errorf(\"error parsing remote URL: %w\", err)\n\t}\n\ttrimmedRemotePath := strings.TrimSuffix(remoteURL.Path, \"/\")\n\tsplitRemotePath := strings.Split(trimmedRemotePath, \"/\")\n\t// expect path to be /owner/repo\n\tif len(splitRemotePath) != 3 {\n\t\treturn \"\", fmt.Errorf(\"expected owner/repo, got %s\", trimmedRemotePath)\n\t}\n\treturn splitRemotePath[len(splitRemotePath)-1], nil\n}", "func (r *Repository) GetDefaultBranch() string {\n\tif r == nil || r.DefaultBranch == nil {\n\t\treturn \"\"\n\t}\n\treturn *r.DefaultBranch\n}", "func (pc *PlatformClient) GetBrokerByName(ctx context.Context, name string) (*platform.ServiceBroker, error) {\n\tvar broker servicecatalog.Broker\n\tvar brokerUID types.UID\n\n\tif pc.isClusterScoped() {\n\t\tclusterBroker, err := pc.platformAPI.RetrieveClusterServiceBrokerByName(name)\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"unable to get cluster-scoped broker (%s)\", err)\n\t\t}\n\n\t\tbroker, brokerUID = clusterBroker, clusterBroker.GetUID()\n\t} else {\n\t\tnamespaceBroker, err := pc.platformAPI.RetrieveNamespaceServiceBrokerByName(name, pc.targetNamespace)\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"unable to get namespace-scoped broker (%s)\", err)\n\t\t}\n\n\t\tbroker, brokerUID = namespaceBroker, namespaceBroker.GetUID()\n\t}\n\n\treturn &platform.ServiceBroker{\n\t\tGUID: string(brokerUID),\n\t\tName: broker.GetName(),\n\t\tBrokerURL: broker.GetURL(),\n\t}, nil\n}", "func (c *config) branch(name string) (output string, err error) {\n\tlog.Printf(\"creating branch: %v\", name)\n\n\tdefaultCommand := []string{\"branch\", name}\n\n\treturn c.command(defaultCommand...)\n\n}", "func DeleteBranchProtection(ctx *context.APIContext) {\n\t// swagger:operation DELETE /repos/{owner}/{repo}/branch_protections/{name} repository repoDeleteBranchProtection\n\t// ---\n\t// summary: Delete a specific branch protection for the repository\n\t// produces:\n\t// - application/json\n\t// parameters:\n\t// - name: owner\n\t// in: path\n\t// description: owner of the repo\n\t// type: string\n\t// required: true\n\t// - name: repo\n\t// in: path\n\t// description: name of the repo\n\t// type: string\n\t// required: true\n\t// - name: name\n\t// in: path\n\t// description: name of protected branch\n\t// type: string\n\t// required: true\n\t// responses:\n\t// \"204\":\n\t// \"$ref\": \"#/responses/empty\"\n\t// \"404\":\n\t// \"$ref\": \"#/responses/notFound\"\n\n\trepo := ctx.Repo.Repository\n\tbpName := ctx.Params(\":name\")\n\tbp, err := git_model.GetProtectedBranchRuleByName(ctx, repo.ID, bpName)\n\tif err != nil {\n\t\tctx.Error(http.StatusInternalServerError, \"GetProtectedBranchByID\", err)\n\t\treturn\n\t}\n\tif bp == nil || bp.RepoID != repo.ID {\n\t\tctx.NotFound()\n\t\treturn\n\t}\n\n\tif err := git_model.DeleteProtectedBranch(ctx, ctx.Repo.Repository.ID, bp.ID); err != nil {\n\t\tctx.Error(http.StatusInternalServerError, \"DeleteProtectedBranch\", err)\n\t\treturn\n\t}\n\n\tctx.Status(http.StatusNoContent)\n}", "func GetBranchFromRef(ref string) string {\n\tparts := strings.Split(ref, \"/\")\n\treturn strings.Join(parts[2:], \"/\")\n}", "func (ge *GollumEvent) BranchName() string {\n\treturn ge.raw.Payload[\"ref\"].(string)\n}", "func (r *Repository) GetMasterBranch() string {\n\tif r == nil || r.MasterBranch == nil {\n\t\treturn \"\"\n\t}\n\treturn *r.MasterBranch\n}", "func (r *Repo) BranchRef() string {\n\treturn fmt.Sprintf(\"refs/heads/%s\", r.Branch)\n}", "func branchPropertiesFromName(name string) (string, string, color.Attribute) {\n\tif strings.Contains(name, \"feature/\") {\n\t\treturn \"feature\", \"develop\", color.FgGreen\n\t} else if strings.Contains(name, \"bugfix/\") {\n\t\treturn \"bugfix\", \"develop\", color.FgYellow\n\t} else if strings.Contains(name, \"hotfix/\") {\n\t\treturn \"hotfix\", \"master\", color.FgRed\n\t}\n\treturn \"other\", name, color.FgWhite\n}", "func GetCurrentBranch() string {\n\tcmd := exec.Command(\"git\", \"branch\")\n\tvar out bytes.Buffer\n\tcmd.Stdout = &out\n\tcmd.Run()\n\tlines := strings.Split(out.String(), \"\\n\")\n\tfor _, line := range lines {\n\t\tif strings.Contains(line, \"*\") {\n\t\t\tbranch := strings.Replace(line, \"*\", \"\", -1)\n\t\t\tbranch = strings.TrimSpace(branch)\n\t\t\treturn branch\n\t\t}\n\t}\n\treturn \"\"\n}", "func GrantByName(name string) Grant {\n\tfor g, grantName := range grantNameMap {\n\t\tif grantName == name {\n\t\t\treturn g\n\t\t}\n\t}\n\n\treturn GrantNone\n}", "func (o BranchProtectionOutput) Branch() pulumi.StringOutput {\n\treturn o.ApplyT(func(v *BranchProtection) pulumi.StringOutput { return v.Branch }).(pulumi.StringOutput)\n}", "func (o TriggerGithubPullRequestPtrOutput) Branch() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v *TriggerGithubPullRequest) *string {\n\t\tif v == nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn &v.Branch\n\t}).(pulumi.StringPtrOutput)\n}", "func getBranch(urlstr string, branch string, dst string) {\n\tlog.Debugf(\"Getting branch %s\", branch)\n\tif sh.DirExists(dst) {\n\t\tlog.Infof(\"Folder exists, skipping cloning %s\", dst)\n\t\tlog.Infof(\"Checking out %s\", branch)\n\t\tif oldPwd := sh.Pwd(); !(oldPwd == dst) {\n\t\t\tsh.Cd(dst)\n\t\t\tsh.SetE(exec.Command(\"git\", \"checkout\", branch))\n\t\t\tsh.Cd(oldPwd)\n\t\t} else {\n\t\t\tsh.SetE(exec.Command(\"git\", \"checkout\", branch))\n\t\t}\n\t} else {\n\t\tlog.Infof(\"Cloning into %s\", dst)\n\t\tcloneCmd := []string{\n\t\t\t// don't verify the ssl certificate (I've run into trouble with it)\n\t\t\t\"-c\", \"http.sslVerify=false\",\n\t\t\t\"clone\", urlstr, dst,\n\t\t\t// only clone this branch, with two commits of history\n\t\t\t\"--branch=\" + branch, \"--single-branch\",\n\t\t\t\"--depth\", \"2\",\n\t\t}\n\t\tsh.SetE(exec.Command(\"git\", cloneCmd...))\n\t}\n\tlog.Debugf(\"Done getting branch %s\", branch)\n}", "func (c *client) RemoveBranchProtection(org, repo, branch string) error {\n\tdurationLogger := c.log(\"RemoveBranchProtection\", org, repo, branch)\n\tdefer durationLogger()\n\n\t_, err := c.request(&request{\n\t\tmethod: http.MethodDelete,\n\t\tpath: fmt.Sprintf(\"/repos/%s/%s/branches/%s/protection\", org, repo, branch),\n\t\torg: org,\n\t\texitCodes: []int{204},\n\t}, nil)\n\treturn err\n}", "func GetSpecificRepoPath(repoName string) (*string, error) {\n\trepoStorePath, err := GetRepoStorePath()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tspecificPath := filepath.Join(*repoStorePath, repoName)\n\n\treturn &specificPath, nil\n}" ]
[ "0.563166", "0.56300473", "0.54814917", "0.53906345", "0.52956754", "0.5287144", "0.5120009", "0.5038256", "0.5038108", "0.5026133", "0.49994874", "0.49632233", "0.49617618", "0.49427322", "0.49369785", "0.49321407", "0.49279362", "0.48991442", "0.48990035", "0.4890306", "0.48711357", "0.4856135", "0.48494017", "0.48465136", "0.48294666", "0.48155642", "0.48107007", "0.48067635", "0.47634465", "0.4754932", "0.47529414", "0.47467765", "0.473615", "0.4734485", "0.47105587", "0.47057167", "0.47026506", "0.46985462", "0.46762303", "0.4668924", "0.45875105", "0.4583827", "0.45830023", "0.45742112", "0.45714366", "0.45562074", "0.45443174", "0.4533589", "0.45242706", "0.45112136", "0.4506509", "0.4503285", "0.4496243", "0.44845298", "0.4463939", "0.44581503", "0.44575924", "0.445731", "0.44550028", "0.44248897", "0.4424699", "0.44221738", "0.4421689", "0.44120294", "0.4399689", "0.4396204", "0.43877017", "0.438384", "0.43818122", "0.43713453", "0.4368436", "0.43610445", "0.43442482", "0.43134293", "0.43078852", "0.43070474", "0.4303392", "0.42979938", "0.42930397", "0.42857218", "0.42759243", "0.4275247", "0.42638937", "0.42602468", "0.42531538", "0.42503458", "0.42424345", "0.4236543", "0.42361483", "0.42294025", "0.42218325", "0.42041308", "0.42040113", "0.41996178", "0.4193095", "0.41903558", "0.41870204", "0.4177643", "0.41677168", "0.41580084" ]
0.8605521
0
IsBranchOfRepoRequirePullRequest returns true if branch requires pull request in given repository.
func IsBranchOfRepoRequirePullRequest(repoID int64, name string) bool { protectBranch, err := GetProtectBranchOfRepoByName(repoID, name) if err != nil { return false } return protectBranch.Protected && protectBranch.RequirePullRequest }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (c *Client) IsPullRequestMerged(owner, repo string, index int64) (bool, *Response, error) {\n\tif err := escapeValidatePathSegments(&owner, &repo); err != nil {\n\t\treturn false, nil, err\n\t}\n\tstatus, resp, err := c.getStatusCode(\"GET\", fmt.Sprintf(\"/repos/%s/%s/pulls/%d/merge\", owner, repo, index), nil, nil)\n\n\tif err != nil {\n\t\treturn false, resp, err\n\t}\n\n\treturn status == 204, resp, nil\n}", "func (r *Repo) IsGitHubRepo() bool { return strings.HasPrefix(r.URI, \"github.com/\") }", "func (cmd InspectCmd) RequiresRepo() bool {\n\treturn true\n}", "func (cmd ConfigCmd) RequiresRepo() bool {\n\treturn false\n}", "func (b *Client) PullIsMergeable(repo models.Repo, pull models.PullRequest, vcsstatusname string) (bool, error) {\n\tnextPageURL := fmt.Sprintf(\"%s/2.0/repositories/%s/pullrequests/%d/diffstat\", b.BaseURL, repo.FullName, pull.Num)\n\t// We'll only loop 1000 times as a safety measure.\n\tmaxLoops := 1000\n\tfor i := 0; i < maxLoops; i++ {\n\t\tresp, err := b.makeRequest(\"GET\", nextPageURL, nil)\n\t\tif err != nil {\n\t\t\treturn false, err\n\t\t}\n\t\tvar diffStat DiffStat\n\t\tif err := json.Unmarshal(resp, &diffStat); err != nil {\n\t\t\treturn false, errors.Wrapf(err, \"Could not parse response %q\", string(resp))\n\t\t}\n\t\tif err := validator.New().Struct(diffStat); err != nil {\n\t\t\treturn false, errors.Wrapf(err, \"API response %q was missing fields\", string(resp))\n\t\t}\n\t\tfor _, v := range diffStat.Values {\n\t\t\t// These values are undocumented, found via manual testing.\n\t\t\tif *v.Status == \"merge conflict\" || *v.Status == \"local deleted\" {\n\t\t\t\treturn false, nil\n\t\t\t}\n\t\t}\n\t\tif diffStat.Next == nil || *diffStat.Next == \"\" {\n\t\t\tbreak\n\t\t}\n\t\tnextPageURL = *diffStat.Next\n\t}\n\treturn true, nil\n}", "func (c *client) IsMergeable(org, repo string, number int, SHA string) (bool, error) {\n\tbackoff := time.Second * 3\n\tmaxTries := 3\n\tfor try := 0; try < maxTries; try++ {\n\t\tpr, err := c.GetPullRequest(org, repo, number)\n\t\tif err != nil {\n\t\t\treturn false, err\n\t\t}\n\t\tif pr.Head.SHA != SHA {\n\t\t\treturn false, fmt.Errorf(\"pull request head changed while checking mergeability (%s -> %s)\", SHA, pr.Head.SHA)\n\t\t}\n\t\tif pr.Merged {\n\t\t\treturn false, errors.New(\"pull request was merged while checking mergeability\")\n\t\t}\n\t\tif pr.Mergable != nil {\n\t\t\treturn *pr.Mergable, nil\n\t\t}\n\t\tif try+1 < maxTries {\n\t\t\tc.time.Sleep(backoff)\n\t\t\tbackoff *= 2\n\t\t}\n\t}\n\treturn false, fmt.Errorf(\"reached maximum number of retries (%d) checking mergeability\", maxTries)\n}", "func (b *Client) PullIsApproved(repo models.Repo, pull models.PullRequest) (approvalStatus models.ApprovalStatus, err error) {\n\tpath := fmt.Sprintf(\"%s/2.0/repositories/%s/pullrequests/%d\", b.BaseURL, repo.FullName, pull.Num)\n\tresp, err := b.makeRequest(\"GET\", path, nil)\n\tif err != nil {\n\t\treturn approvalStatus, err\n\t}\n\tvar pullResp PullRequest\n\tif err := json.Unmarshal(resp, &pullResp); err != nil {\n\t\treturn approvalStatus, errors.Wrapf(err, \"Could not parse response %q\", string(resp))\n\t}\n\tif err := validator.New().Struct(pullResp); err != nil {\n\t\treturn approvalStatus, errors.Wrapf(err, \"API response %q was missing fields\", string(resp))\n\t}\n\tauthorUUID := *pullResp.Author.UUID\n\tfor _, participant := range pullResp.Participants {\n\t\t// Bitbucket allows the author to approve their own pull request. This\n\t\t// defeats the purpose of approvals so we don't count that approval.\n\t\tif *participant.Approved && *participant.User.UUID != authorUUID {\n\t\t\treturn models.ApprovalStatus{\n\t\t\t\tIsApproved: true,\n\t\t\t}, nil\n\t\t}\n\t}\n\treturn approvalStatus, nil\n}", "func IsRepo() bool {\n\tout, err := Run(\"rev-parse\", \"--is-inside-work-tree\")\n\treturn err == nil && strings.TrimSpace(out) == \"true\"\n}", "func IsErrErrPullRequestHeadRepoMissing(err error) bool {\n\t_, ok := err.(ErrPullRequestHeadRepoMissing)\n\treturn ok\n}", "func IsMergeRequestReferencePath(name string) bool {\n\tre := \"^refs/heads/%s(/|$)?\"\n\treturn regexp.MustCompile(fmt.Sprintf(re, MergeRequestBranchPrefix)).MatchString(name)\n}", "func (p *Patch) IsGithubMergePatch() bool {\n\treturn p.GithubMergeData.HeadSHA != \"\"\n}", "func IsMergeRequestReference(name string) bool {\n\tre := \"^refs/heads/%s/[1-9]+([0-9]+)?$\"\n\treturn regexp.MustCompile(fmt.Sprintf(re, MergeRequestBranchPrefix)).MatchString(name)\n}", "func (cmd LoginCmd) RequiresRepo() bool {\n\treturn false\n}", "func (c PullRequestMergeType) IsValid() bool {\n\treturn c == MergeMerge || c == MergeRebase || c == MergeSquash\n}", "func (c *client) CreatePullRequest(org, repo, title, body, head, base string, canModify bool) (int, error) {\n\tdurationLogger := c.log(\"CreatePullRequest\", org, repo, title)\n\tdefer durationLogger()\n\n\tdata := struct {\n\t\tTitle string `json:\"title\"`\n\t\tBody string `json:\"body\"`\n\t\tHead string `json:\"head\"`\n\t\tBase string `json:\"base\"`\n\t\t// MaintainerCanModify allows maintainers of the repo to modify this\n\t\t// pull request, eg. push changes to it before merging.\n\t\tMaintainerCanModify bool `json:\"maintainer_can_modify\"`\n\t}{\n\t\tTitle: title,\n\t\tBody: body,\n\t\tHead: head,\n\t\tBase: base,\n\n\t\tMaintainerCanModify: canModify,\n\t}\n\tvar resp struct {\n\t\tNum int `json:\"number\"`\n\t}\n\t_, err := c.request(&request{\n\t\t// allow the description and draft fields\n\t\t// https://developer.github.com/changes/2018-02-22-label-description-search-preview/\n\t\t// https://developer.github.com/changes/2019-02-14-draft-pull-requests/\n\t\taccept: \"application/vnd.github.symmetra-preview+json, application/vnd.github.shadow-cat-preview\",\n\t\tmethod: http.MethodPost,\n\t\tpath: fmt.Sprintf(\"/repos/%s/%s/pulls\", org, repo),\n\t\torg: org,\n\t\trequestBody: &data,\n\t\texitCodes: []int{201},\n\t}, &resp)\n\tif err != nil {\n\t\treturn 0, fmt.Errorf(\"failed to create pull request against %s/%s#%s from head %s: %w\", org, repo, base, head, err)\n\t}\n\treturn resp.Num, nil\n}", "func IsErrPullRequestHasMerged(err error) bool {\n\t_, ok := err.(ErrPullRequestHasMerged)\n\treturn ok\n}", "func (c *Client) MergePullRequest(owner, repo string, index int64, opt MergePullRequestOption) (bool, *Response, error) {\n\tif err := escapeValidatePathSegments(&owner, &repo); err != nil {\n\t\treturn false, nil, err\n\t}\n\tif err := opt.Validate(c); err != nil {\n\t\treturn false, nil, err\n\t}\n\tbody, err := json.Marshal(&opt)\n\tif err != nil {\n\t\treturn false, nil, err\n\t}\n\tstatus, resp, err := c.getStatusCode(\"POST\", fmt.Sprintf(\"/repos/%s/%s/pulls/%d/merge\", owner, repo, index), jsonHeader, bytes.NewReader(body))\n\tif err != nil {\n\t\treturn false, resp, err\n\t}\n\treturn status == 200, resp, nil\n}", "func (o *V1WorkloadSpec) HasImagePullCredentials() bool {\n\tif o != nil && o.ImagePullCredentials != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}", "func (f IsFilter) ApplyPullRequest(context operations.Context, pullRequest *github.PullRequest) bool {\n\t// We're called on a pull request: filter passes if configured to accept\n\t// pull requests.\n\treturn f.PullRequestOnly\n}", "func isTestgroundRepo(path string) bool {\n\tfi, err := os.Stat(path)\n\tif err != nil || !fi.IsDir() {\n\t\treturn false\n\t}\n\tf, err := os.Open(filepath.Join(path, \"go.mod\"))\n\tif err != nil {\n\t\treturn false\n\t}\n\ts := bufio.NewScanner(f)\n\tif !s.Scan() {\n\t\treturn false\n\t}\n\treturn s.Text() == gomodHeader\n}", "func isTestgroundRepo(path string) bool {\n\tfi, err := os.Stat(path)\n\tif err != nil || !fi.IsDir() {\n\t\treturn false\n\t}\n\tf, err := os.Open(filepath.Join(path, \"go.mod\"))\n\tif err != nil {\n\t\treturn false\n\t}\n\ts := bufio.NewScanner(f)\n\tif !s.Scan() {\n\t\treturn false\n\t}\n\treturn s.Text() == gomodHeader\n}", "func (r *Repo) CheckIssue(comments []Comment, sender string) bool {\n\tconfig := configure.GlobalConfig.Repos\n\tapprovalsNeeded := 0\n\tapprovers := []string{}\n\tused := list.New()\n\tused.PushBack(sender) // block issue creator from approving\n\n\tfor _, relevantRepo := range config {\n\t\tif relevantRepo.Name == r.FullName {\n\t\t\tapprovalsNeeded = relevantRepo.ApprovalsNeeded\n\t\t\tapprovers = relevantRepo.Approvers\n\t\t}\n\t}\n\tif approvalsNeeded == 0 { // repo not in config\n\t\treturn false\n\t}\n\n\tfor _, comment := range comments {\n\t\tif comment.RequestApproved(approvers, used) {\n\t\t\tapprovalsNeeded -= 1\n\t\t}\n\t}\n\n\t// if not enough approvals have been made yet\n\tif approvalsNeeded > 0 {\n\t\treturn false\n\t}\n\treturn true\n}", "func (g *Gitlab) GetOpenPullRequest(ctx context.Context, repo scm.Repository, branchName string) (scm.PullRequest, error) {\n\tproject := repo.(repository)\n\n\tstate := \"opened\"\n\tmrs, _, err := g.glClient.MergeRequests.ListProjectMergeRequests(project.pid, &gitlab.ListProjectMergeRequestsOptions{\n\t\tListOptions: gitlab.ListOptions{\n\t\t\tPerPage: 1,\n\t\t},\n\t\tSourceBranch: &branchName,\n\t\tState: &state,\n\t}, gitlab.WithContext(ctx))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif len(mrs) == 0 {\n\t\treturn nil, nil\n\t}\n\n\treturn convertMergeRequest(mrs[0], project.name, project.ownerName), nil\n}", "func (c *client) GetPullRequest(org, repo string, number int) (*PullRequest, error) {\n\tdurationLogger := c.log(\"GetPullRequest\", org, repo, number)\n\tdefer durationLogger()\n\n\tvar pr PullRequest\n\t_, err := c.request(&request{\n\t\t// allow the description and draft fields\n\t\t// https://developer.github.com/changes/2018-02-22-label-description-search-preview/\n\t\t// https://developer.github.com/changes/2019-02-14-draft-pull-requests/\n\t\taccept: \"application/vnd.github.symmetra-preview+json, application/vnd.github.shadow-cat-preview\",\n\t\tmethod: http.MethodGet,\n\t\tpath: fmt.Sprintf(\"/repos/%s/%s/pulls/%d\", org, repo, number),\n\t\torg: org,\n\t\texitCodes: []int{200},\n\t}, &pr)\n\treturn &pr, err\n}", "func (wl *Workload) pullRequestVisible(pr *PullRequest) bool {\n\tif pr.Author != wl.Assignee {\n\t\t// Mismatched authors\n\t\treturn false\n\t}\n\n\tif wl.TrackingIssue.Milestone != \"\" {\n\t\tif pr.Milestone != \"\" && wl.TrackingIssue.Milestone != pr.Milestone {\n\t\t\treturn false\n\t\t}\n\t}\n\n\treturn true\n}", "func (o *PostWebhook) HasRepoPush() bool {\n\tif o != nil && o.RepoPush != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}", "func IsBareRepository(path string) bool {\n\n\tcmd := exec.Command(\"git\", fmt.Sprintf(\"--git-dir=%s\", path), \"rev-parse\", \"--is-bare-repository\")\n\tbody, err := cmd.Output()\n\n\tif err != nil {\n\t\treturn false\n\t}\n\n\tstatus := strings.Trim(string(body), \"\\n \")\n\treturn status == \"true\"\n}", "func (g *Github) CreatePullRequest(ctx context.Context, repo scm.Repository, prRepo scm.Repository, newPR scm.NewPullRequest) (scm.PullRequest, error) {\n\tr := repo.(repository)\n\tprR := prRepo.(repository)\n\n\tg.modLock()\n\tdefer g.modUnlock()\n\n\tpr, err := g.createPullRequest(ctx, r, prR, newPR)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif err := g.addReviewers(ctx, r, newPR, pr); err != nil {\n\t\treturn nil, err\n\t}\n\n\tif err := g.addAssignees(ctx, r, newPR, pr); err != nil {\n\t\treturn nil, err\n\t}\n\n\tif err := g.addLabels(ctx, r, newPR, pr); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn convertPullRequest(pr), nil\n}", "func (p *PluginClient) GitPullRequest(meta Meta, secret corev1.Secret) ClientGitPullRequest {\n\treturn newGitPullRequest(p, meta, secret)\n}", "func (u *User) canCreateRepo() bool {\n\treturn u.maxNumRepos() <= -1 || u.NumRepos < u.maxNumRepos()\n}", "func (g *Gitlab) CreatePullRequest(ctx context.Context, repo scm.Repository, prRepo scm.Repository, newPR scm.NewPullRequest) (scm.PullRequest, error) {\n\tr := repo.(repository)\n\tprR := prRepo.(repository)\n\n\treviewersIDs, err := g.getUserIds(ctx, newPR.Reviewers)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tassigneesIDs, err := g.getUserIds(ctx, newPR.Assignees)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tprTitle := newPR.Title\n\tif newPR.Draft {\n\t\tprTitle = \"Draft: \" + prTitle // See https://docs.gitlab.com/ee/user/project/merge_requests/drafts.html#mark-merge-requests-as-drafts\n\t}\n\n\tlabels := gitlab.Labels(newPR.Labels)\n\tremoveSourceBranch := true\n\tmr, _, err := g.glClient.MergeRequests.CreateMergeRequest(prR.pid, &gitlab.CreateMergeRequestOptions{\n\t\tTitle: &prTitle,\n\t\tDescription: &newPR.Body,\n\t\tSourceBranch: &newPR.Head,\n\t\tTargetBranch: &newPR.Base,\n\t\tTargetProjectID: &r.pid,\n\t\tReviewerIDs: &reviewersIDs,\n\t\tRemoveSourceBranch: &removeSourceBranch,\n\t\tSquash: &r.shouldSquash,\n\t\tAssigneeIDs: &assigneesIDs,\n\t\tLabels: &labels,\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn pullRequest{\n\t\trepoName: r.name,\n\t\townerName: r.ownerName,\n\t\ttargetPID: mr.TargetProjectID,\n\t\tsourcePID: mr.SourceProjectID,\n\t\tbranchName: newPR.Head,\n\t\tiid: mr.IID,\n\t\twebURL: mr.WebURL,\n\t}, nil\n}", "func AddPullRequest(config *Config, targetBranch, title string) error {\n\tgitlab := gogitlab.NewGitlab(config.Host, config.ApiPath, config.Token)\n\tid, err := getProjectId(gitlab)\n\tif err != nil {\n\t\treturn Mask(err)\n\t}\n\tfmt.Printf(\"Project id: %s\\n\", id)\n\t//fmt.Println(id)\n\n\tsourceBranch, err := git.GetLocalBranchName(nil)\n\tif err != nil {\n\t\treturn Mask(err)\n\t}\n\tfmt.Printf(\"Source branch: %s\\n\", sourceBranch)\n\tfmt.Printf(\"Target branch: %s\\n\", targetBranch)\n\n\terr = gitlab.AddMergeRequest(id, sourceBranch, targetBranch, title)\n\tif err != nil {\n\t\treturn Mask(err)\n\t}\n\n\treturn nil\n}", "func (p *PullRequest) GetMergeable() bool {\n\tif p == nil || p.Mergeable == nil {\n\t\treturn false\n\t}\n\treturn *p.Mergeable\n}", "func createPullRequest(c *cli.Context) error {\n\tbranch := c.String(\"pr-branch\")\n\trepo := c.String(\"pr-repo\")\n\tbase := c.String(\"pr-base\")\n\tremote := c.String(\"pr-remote\")\n\tdirectory := c.String(\"out\")\n\tif repo == \"\" {\n\t\treturn errors.New(\"repo must be defined if create-pr is true\")\n\t}\n\tif branch == \"\" {\n\t\tbranch = c.String(\"name\") + \"-\" + uuid.NewString()[:6]\n\t}\n\tfmt.Printf(\"Creating a PR to repo %s with base %s and branch %s\\n\", repo, base, branch)\n\tr := &runner.CLIRunner{}\n\tg := git.NewCLIGit(git.CLIGitConfig{\n\t\tDirectory: directory,\n\t\tBranch: branch,\n\t\tRemote: remote,\n\t\tBase: base,\n\t}, r)\n\tscmClient, err := git.NewClient(git.SCMConfig{\n\t\tBranch: branch,\n\t\tBase: base,\n\t\tRepo: repo,\n\t})\n\tif err != nil {\n\t\treturn fmt.Errorf(\"failed to create scm client: %w\", err)\n\t}\n\treturn catalog.CreatePullRequest(scmClient, g, branch)\n}", "func isGitRepo (dir string) (bool, error) {\n\tfiles, err := ioutil.ReadDir(dir)\n\tif err != nil {\n\t\treturn false, err\n\t}\n\tfor _, file := range files {\n\t\tif file.IsDir() && file.Name() == \".git\" {\n\t\t\treturn true, nil\n\t\t}\n\t}\n\treturn false, nil\n}", "func (f CommentsFilter) ApplyPullRequest(context operations.Context, pullRequest *github.PullRequest) bool {\n\treturn f.predicate(*pullRequest.Comments)\n}", "func PushRepoAndCreatePullRequest(dir string, upstreamRepo *GitRepository, forkRepo *GitRepository, base string, prDetails *PullRequestDetails, filter *PullRequestFilter, commit bool, commitMessage string, push bool, dryRun bool, gitter Gitter, provider GitProvider) (*PullRequestInfo, error) {\n\tuserAuth := provider.UserAuth()\n\tif commit {\n\t\terr := gitter.Add(dir, \"-A\")\n\t\tif err != nil {\n\t\t\treturn nil, errors.WithStack(err)\n\t\t}\n\t\tchanged, err := gitter.HasChanges(dir)\n\t\tif err != nil {\n\t\t\treturn nil, errors.WithStack(err)\n\t\t}\n\t\tif !changed {\n\t\t\tlog.Logger().Warnf(\"No changes made to the source code in %s. Code must be up to date!\", dir)\n\t\t\treturn nil, nil\n\t\t}\n\t\tif commitMessage == \"\" {\n\t\t\tcommitMessage = prDetails.Message\n\t\t}\n\t\terr = gitter.CommitDir(dir, commitMessage)\n\t\tif err != nil {\n\t\t\treturn nil, errors.WithStack(err)\n\t\t}\n\t}\n\n\theadPrefix := \"\"\n\n\tusername := upstreamRepo.Organisation\n\tcloneURL := upstreamRepo.CloneURL\n\tif forkRepo != nil {\n\t\tusername = forkRepo.Organisation\n\t\tcloneURL = forkRepo.CloneURL\n\t}\n\n\tif upstreamRepo.Organisation != username {\n\t\theadPrefix = username + \":\"\n\t}\n\n\tgha := &GitPullRequestArguments{\n\t\tGitRepository: upstreamRepo,\n\t\tTitle: prDetails.Title,\n\t\tBody: prDetails.Message,\n\t\tBase: base,\n\t\tLabels: prDetails.Labels,\n\t}\n\tvar existingPr *GitPullRequest\n\n\tforkPushURL, err := gitter.CreateAuthenticatedURL(cloneURL, &userAuth)\n\tif err != nil {\n\t\treturn nil, errors.Wrapf(err, \"creating push URL for %s\", cloneURL)\n\t}\n\n\tif filter != nil && push {\n\t\t// lets rebase an existing PR\n\t\texistingPrs, err := FilterOpenPullRequests(provider, upstreamRepo.Organisation, upstreamRepo.Name, *filter)\n\t\tif err != nil {\n\t\t\treturn nil, errors.Wrapf(err, \"finding existing PRs using filter %s on repo %s/%s\", filter.String(), upstreamRepo.Organisation, upstreamRepo.Name)\n\t\t}\n\n\t\tif len(existingPrs) > 1 {\n\t\t\tsort.SliceStable(existingPrs, func(i, j int) bool {\n\t\t\t\t// sort in descending order of PR numbers (assumes PRs numbers increment!)\n\t\t\t\treturn util.DereferenceInt(existingPrs[j].Number) < util.DereferenceInt(existingPrs[i].Number)\n\t\t\t})\n\t\t\tprs := make([]string, 0)\n\t\t\tfor _, pr := range existingPrs {\n\t\t\t\tprs = append(prs, pr.URL)\n\t\t\t}\n\t\t\tlog.Logger().Debugf(\"Found more than one PR %s using filter %s on repo %s/%s so rebasing latest PR %s\", strings.Join(prs, \", \"), filter.String(), upstreamRepo.Organisation, upstreamRepo.Name, existingPrs[:1][0].URL)\n\t\t\texistingPr = existingPrs[0]\n\t\t} else if len(existingPrs) == 1 {\n\t\t\texistingPr = existingPrs[0]\n\t\t}\n\t}\n\tremoteBranch := prDetails.BranchName\n\tif existingPr != nil {\n\t\tif util.DereferenceString(existingPr.HeadOwner) == username && existingPr.HeadRef != nil && existingPr.Number != nil {\n\t\t\tremote := \"origin\"\n\t\t\tif forkRepo != nil && forkRepo.Fork {\n\t\t\t\tremote = \"upstream\"\n\t\t\t}\n\t\t\tchangeBranch, err := gitter.Branch(dir)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, errors.WithStack(err)\n\t\t\t}\n\t\t\tlocalBranchUUID, err := uuid.NewUUID()\n\t\t\tif err != nil {\n\t\t\t\treturn nil, errors.Wrapf(err, \"creating UUID for local branch\")\n\t\t\t}\n\t\t\t// We use this \"dummy\" local branch to pull into to avoid having to work with FETCH_HEAD as our local\n\t\t\t// representation of the remote branch. This is an oddity of the pull/%d/head remote.\n\t\t\tlocalBranch := localBranchUUID.String()\n\t\t\tremoteBranch = *existingPr.HeadRef\n\t\t\tfetchRefSpec := fmt.Sprintf(\"pull/%d/head:%s\", *existingPr.Number, localBranch)\n\t\t\terr = gitter.FetchBranch(dir, remote, fetchRefSpec)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, errors.Wrapf(err, \"fetching %s for merge\", fetchRefSpec)\n\t\t\t}\n\n\t\t\terr = gitter.CreateBranchFrom(dir, prDetails.BranchName, localBranch)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, errors.Wrapf(err, \"creating branch %s from %s\", prDetails.BranchName, fetchRefSpec)\n\t\t\t}\n\t\t\terr = gitter.Checkout(dir, prDetails.BranchName)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, errors.Wrapf(err, \"checking out branch %s\", prDetails.BranchName)\n\t\t\t}\n\t\t\terr = gitter.MergeTheirs(dir, changeBranch)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, errors.Wrapf(err, \"merging %s into %s\", changeBranch, fetchRefSpec)\n\t\t\t}\n\t\t\terr = gitter.RebaseTheirs(dir, fmt.Sprintf(localBranch), \"\", true)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, errors.WithStack(err)\n\t\t\t}\n\t\t\tchangedFiles, err := gitter.ListChangedFilesFromBranch(dir, localBranch)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, errors.Wrap(err, \"failed to list changed files\")\n\t\t\t}\n\t\t\tif changedFiles == \"\" {\n\t\t\t\tlog.Logger().Info(\"No file changes since the existing PR. Nothing to push.\")\n\t\t\t\treturn nil, nil\n\t\t\t}\n\t\t} else {\n\t\t\t// We can only update an existing PR if the owner of that PR is this user, so we clear the existingPr\n\t\t\texistingPr = nil\n\t\t}\n\t}\n\tvar pr *GitPullRequest\n\tif !dryRun && existingPr != nil {\n\t\tgha.Head = headPrefix + remoteBranch\n\t\t// work out the minimal similar title\n\t\tif strings.HasPrefix(existingPr.Title, \"chore(deps): bump \") {\n\t\t\torigWords := strings.Split(existingPr.Title, \" \")\n\t\t\tnewWords := strings.Split(prDetails.Title, \" \")\n\t\t\tanswer := make([]string, 0)\n\t\t\tfor i, w := range newWords {\n\t\t\t\tif len(origWords) > i && origWords[i] == w {\n\t\t\t\t\tanswer = append(answer, w)\n\t\t\t\t}\n\t\t\t}\n\t\t\tif answer[len(answer)-1] == \"bump\" {\n\t\t\t\t// if there are no similarities in the actual dependency, then add a generic form of words\n\t\t\t\tanswer = append(answer, \"dependency\", \"versions\")\n\t\t\t}\n\t\t\tif answer[len(answer)-1] == \"to\" || answer[len(answer)-1] == \"from\" {\n\t\t\t\t// remove trailing prepositions\n\t\t\t\tanswer = answer[:len(answer)-1]\n\t\t\t}\n\t\t\tgha.Title = strings.Join(answer, \" \")\n\t\t} else {\n\t\t\tgha.Title = prDetails.Title\n\t\t}\n\t\tgha.Body = fmt.Sprintf(\"%s\\n<hr />\\n\\n%s\", prDetails.Message, existingPr.Body)\n\t\tvar err error\n\t\tpr, err = provider.UpdatePullRequest(gha, *existingPr.Number)\n\t\tif err != nil {\n\t\t\treturn nil, errors.Wrapf(err, \"updating pull request %s\", existingPr.URL)\n\t\t}\n\t\tlog.Logger().Infof(\"Updated Pull Request: %s\", util.ColorInfo(pr.URL))\n\t}\n\tif dryRun {\n\t\tlog.Logger().Infof(\"Commit created but not pushed; would have updated pull request %s with %s and used commit message %s. Please manually delete %s when you are done\", util.ColorInfo(existingPr.URL), prDetails.String(), commitMessage, util.ColorInfo(dir))\n\t\treturn nil, nil\n\t} else if push {\n\t\terr := gitter.Push(dir, forkPushURL, true, fmt.Sprintf(\"%s:%s\", \"HEAD\", remoteBranch))\n\t\tif err != nil {\n\t\t\treturn nil, errors.Wrapf(err, \"pushing merged branch %s\", remoteBranch)\n\t\t}\n\t}\n\tif existingPr == nil {\n\t\tgha.Head = headPrefix + prDetails.BranchName\n\n\t\tpr, err = provider.CreatePullRequest(gha)\n\t\tif err != nil {\n\t\t\treturn nil, errors.Wrapf(err, \"creating pull request with arguments %v\", gha.String())\n\t\t}\n\t\tlog.Logger().Infof(\"Created Pull Request: %s\", util.ColorInfo(pr.URL))\n\t}\n\n\tprInfo := &PullRequestInfo{\n\t\tGitProvider: provider,\n\t\tPullRequest: pr,\n\t\tPullRequestArguments: gha,\n\t}\n\n\terr = addLabelsToPullRequest(prInfo, prDetails.Labels)\n\tif err != nil {\n\t\treturn nil, errors.Wrapf(err, \"failed to add labels %+v to PR %s\", prDetails.Labels, pr.URL)\n\t}\n\n\treturn prInfo, nil\n}", "func IsRepo() bool {\n\treturn run.Silent(\"git rev-parse --git-dir >/dev/null 2>&1\")\n}", "func IsLocalNonBareGitRepository(fs fs.FileSystem, dir string) (bool, error) {\n\t_, err := fs.Stat(filepath.Join(dir, \".git\"))\n\tif os.IsNotExist(err) {\n\t\treturn false, nil\n\t}\n\tif err != nil {\n\t\treturn false, err\n\t}\n\treturn true, nil\n}", "func (p *PullRequestBranch) GetRepo() *Repository {\n\tif p == nil {\n\t\treturn nil\n\t}\n\treturn p.Repo\n}", "func (c *Client) GetPullRequest(owner, repo string, number int) (*PullRequest, error) {\n\tresp, err := c.request(http.MethodGet, fmt.Sprintf(\"%s/repos/%s/%s/pulls/%d\", c.base, owner, repo, number), nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer resp.Body.Close()\n\tif resp.StatusCode != 200 {\n\t\treturn nil, fmt.Errorf(\"response not 200: %s\", resp.Status)\n\t}\n\tb, err := ioutil.ReadAll(resp.Body)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tvar pr PullRequest\n\tif err := json.Unmarshal(b, &pr); err != nil {\n\t\treturn nil, err\n\t}\n\treturn &pr, nil\n}", "func (gc *GithubClient) CreatePullRequest(org, repo, head, base, title, body string) (*github.PullRequest, error) {\n\tb := true\n\tPR := &github.NewPullRequest{\n\t\tTitle: &title,\n\t\tBody: &body,\n\t\tHead: &head,\n\t\tBase: &base,\n\t\tMaintainerCanModify: &b,\n\t}\n\n\tvar res *github.PullRequest\n\t_, err := gc.retry(\n\t\tfmt.Sprintf(\"creating PullRequest from '%s' to '%s', title: '%s'. body: '%s'\", head, base, title, body),\n\t\tmaxRetryCount,\n\t\tfunc() (*github.Response, error) {\n\t\t\tvar resp *github.Response\n\t\t\tvar err error\n\t\t\tres, resp, err = gc.Client.PullRequests.Create(ctx, org, repo, PR)\n\t\t\treturn resp, err\n\t\t},\n\t)\n\treturn res, err\n}", "func isPRChanged(pe github.PullRequestEvent) bool {\n\tswitch pe.Action {\n\tcase github.PullRequestActionOpened:\n\t\treturn true\n\tcase github.PullRequestActionReopened:\n\t\treturn true\n\tcase github.PullRequestActionSynchronize:\n\t\treturn true\n\tcase github.PullRequestActionEdited:\n\t\treturn true\n\tdefault:\n\t\treturn false\n\t}\n}", "func (g *GitDriver) IsOpen() bool {\n\tif g.Repository == nil {\n\t\treturn false\n\t}\n\treturn true\n}", "func (o *PromoteOptions) waitForGitOpsPullRequest(ns string, env *v1.Environment, releaseInfo *ReleaseInfo) error {\n\tduration := *o.TimeoutDuration\n\tpullRequestInfo := releaseInfo.PullRequestInfo\n\tif pullRequestInfo != nil {\n\t\tfor {\n\t\t\tend := time.Now().Add(duration)\n\t\t\tpr := pullRequestInfo.PullRequest\n\t\t\terr := pullRequestInfo.GitProvider.UpdatePullRequestStatus(pr)\n\t\t\tif err != nil {\n\t\t\t\treturn fmt.Errorf(\"Failed to query the Pull Request status for %s %s\", pr.URL, err)\n\t\t\t}\n\t\t\tif pr.Merged != nil && *pr.Merged {\n\t\t\t\to.Printf(\"Pull Request %s is merged\\n\", util.ColorInfo(pr.URL))\n\t\t\t\treturn nil\n\t\t\t}\n\t\t\tif pr.IsClosed() {\n\t\t\t\to.warnf(\"Pull Request %s is closed\\n\", util.ColorInfo(pr.URL))\n\t\t\t\treturn fmt.Errorf(\"Promotion failed as Pull Request %s is closed without merging\", pr.URL)\n\t\t\t}\n\t\t\tif time.Now().After(end) {\n\t\t\t\treturn fmt.Errorf(\"Timed out waiting for pull request %s to merge. Waited %s\", pr.URL, duration.String())\n\t\t\t}\n\t\t\ttime.Sleep(*o.PullRequestPollDuration)\n\t\t}\n\t}\n\treturn nil\n}", "func (gh *GithubRequireAheadPrecondition) NeedsMerge(deployment common.Deployment, ctx *GithubRequireAheadContext) (bool, error) {\n\tif deployment.IsForce() {\n\t\treturn false, nil\n\t}\n\n\tbase := *ctx.RepoDetails.DefaultBranch\n\thead := deployment.Ref()\n\n\tcomparison, err := ctx.RepoClient.CompareCommits(base, head)\n\tif err != nil {\n\t\treturn false, err\n\t}\n\n\treturn (*comparison.BehindBy > 0), nil\n}", "func (f ContainsFilter) ApplyPullRequest(context operations.Context, pullRequest *github.PullRequest) bool {\n\treturn f.filter(context, *(pullRequest.Number))\n}", "func (g *Github) MergePullRequest(ctx context.Context, pullReq scm.PullRequest) error {\n\tpr := pullReq.(pullRequest)\n\n\tg.modLock()\n\tdefer g.modUnlock()\n\n\t// We need to fetch the repo again since no AllowXMerge is present in listings of repositories\n\trepo, _, err := retry(ctx, func() (*github.Repository, *github.Response, error) {\n\t\treturn g.ghClient.Repositories.Get(ctx, pr.ownerName, pr.repoName)\n\t})\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// Filter out all merge types to only the allowed ones, but keep the order of the ones left\n\tmergeTypes := scm.MergeTypeIntersection(g.MergeTypes, repoMergeTypes(repo))\n\tif len(mergeTypes) == 0 {\n\t\treturn errors.New(\"none of the configured merge types was permitted\")\n\t}\n\n\t_, _, err = retry(ctx, func() (*github.PullRequestMergeResult, *github.Response, error) {\n\t\treturn g.ghClient.PullRequests.Merge(ctx, pr.ownerName, pr.repoName, pr.number, \"\", &github.PullRequestOptions{\n\t\t\tMergeMethod: mergeTypeGhName[mergeTypes[0]],\n\t\t})\n\t})\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t_, err = retryWithoutReturn(ctx, func() (*github.Response, error) {\n\t\treturn g.ghClient.Git.DeleteRef(ctx, pr.prOwnerName, pr.prRepoName, fmt.Sprintf(\"heads/%s\", pr.branchName))\n\t})\n\n\t// Ignore errors about the reference not existing since it may be the case that GitHub has already deleted the branch\n\tif err != nil && !strings.Contains(err.Error(), \"Reference does not exist\") {\n\t\treturn err\n\t}\n\n\treturn nil\n}", "func (c *Config) IsGitHub() bool {\n\treturn c.Github.ClientID != \"\"\n}", "func hasPresubmit(r []rt.Repo) bool {\n\treturn hasProwjobType(r, \"type_presubmit\")\n}", "func (s *AutograderService) IsEmptyRepo(ctx context.Context, in *pb.RepositoryRequest) (*pb.Void, error) {\n\tusr, scm, err := s.getUserAndSCMForCourse(ctx, in.GetCourseID())\n\tif err != nil {\n\t\ts.logger.Errorf(\"IsEmptyRepo failed: scm authentication error: %w\", err)\n\t\treturn nil, err\n\t}\n\n\tif !s.isTeacher(usr.GetID(), in.GetCourseID()) {\n\t\ts.logger.Error(\"IsEmptyRepo failed: user is not teacher\")\n\t\treturn nil, status.Errorf(codes.PermissionDenied, \"only teachers can access repository info\")\n\t}\n\n\tif err := s.isEmptyRepo(ctx, scm, in); err != nil {\n\t\ts.logger.Errorf(\"IsEmptyRepo failed: %w\", err)\n\t\tif contextCanceled(ctx) {\n\t\t\treturn nil, status.Error(codes.FailedPrecondition, ErrContextCanceled)\n\t\t}\n\t\tif ok, parsedErr := parseSCMError(err); ok {\n\t\t\treturn nil, parsedErr\n\t\t}\n\t\treturn nil, status.Errorf(codes.FailedPrecondition, \"group repository does not exist or not empty\")\n\t}\n\n\treturn &pb.Void{}, nil\n}", "func IsGitRepository(path string) bool {\n\tvar out bytes.Buffer\n\tcmd := exec.Command(\"git\", \"-C\", options.Dir, \"rev-parse\", \"--is-inside-work-tree\")\n\tcmd.Stdout = &out\n\n\terr := cmd.Run()\n\tif err != nil {\n\t\tlog.Println(\"ERROR\", err)\n\t\treturn false\n\t}\n\n\tvar val bool\n\t_, err = fmt.Sscanf(out.String(), \"%t\", &val)\n\tif err != nil {\n\t\tlog.Println(\"ERROR\", err)\n\t\treturn false\n\t}\n\n\treturn val\n}", "func RemoteHasBranch(remote string, branch string) bool {\n\treturn run.Silent(\n\t\t\"git branch --remote --contains %s >/dev/null 2>&1\",\n\t\tfmt.Sprintf(\"%s/%s\", remote, branch),\n\t)\n}", "func IsErrPullRequestNotExist(err error) bool {\n\t_, ok := err.(ErrPullRequestNotExist)\n\treturn ok\n}", "func IsRepo(repoPath string) bool {\n\trp := path.Join(repoPath, \".git\")\n\tinfo, err := os.Stat(rp)\n\tif err != nil {\n\t\treturn false\n\t}\n\n\treturn info.IsDir()\n}", "func (p *PullRequest) GetMerged() bool {\n\tif p == nil || p.Merged == nil {\n\t\treturn false\n\t}\n\treturn *p.Merged\n}", "func (g *GitHubStruct) req_repos_exists(req *UpdateReq, ret *goforjj.PluginData) (err error) {\n\tif req == nil || ret == nil {\n\t\treturn fmt.Errorf(\"Internal error: Invalid parameters. req and ret cannot be nil.\")\n\t}\n\n\tc := g.Client.Repositories\n\n\t// loop on list of repos, and ensure they exist with minimal config and rights\n\tfor name, _ := range req.Objects.Repo {\n\t\tlog.Printf(\"Looking for Repo '%s' from '%s'\", name, g.githubDeploy.Organization)\n\t\tfound_repo, _, err := c.Get(g.ctxt, g.githubDeploy.Organization, name)\n\n\t\tr := goforjj.PluginRepo{\n\t\t\tName: name,\n\t\t\tExist: (err == nil),\n\t\t\tRemotes: make(map[string]goforjj.PluginRepoRemoteUrl),\n\t\t\tBranchConnect: make(map[string]string),\n\t\t\tOwner: g.githubDeploy.Organization,\n\t\t}\n\t\tif err == nil {\n\t\t\tr.Remotes[\"origin\"] = goforjj.PluginRepoRemoteUrl{\n\t\t\t\tSsh: *found_repo.SSHURL,\n\t\t\t\tUrl: *found_repo.HTMLURL,\n\t\t\t}\n\t\t\tr.BranchConnect[\"master\"] = \"origin/master\"\n\t\t}\n\n\t\tret.Repos[name] = r\n\t}\n\treturn\n}", "func RefIsBranch(dir string, ref string, gitter Gitter) (bool, error) {\n\tremoteBranches, err := gitter.RemoteBranches(dir)\n\tif err != nil {\n\t\treturn false, errors.Wrapf(err, \"error getting remote branches to find provided ref %s\", ref)\n\t}\n\tfor _, b := range remoteBranches {\n\t\tif strings.Contains(b, ref) {\n\t\t\treturn true, nil\n\t\t}\n\t}\n\treturn false, nil\n}", "func (w Workspace) IsPulling(ctx context.Context) bool {\n\tnodes := GetModelContext(ctx).Nodes\n\n\tfor _, id := range w.ProjectIDs {\n\t\tnode := nodes.MustLoadProject(id)\n\t\tif node.IsPulling {\n\t\t\treturn true\n\t\t}\n\t}\n\n\treturn false\n}", "func CheckStalePullRequest(repo *Repo, pullRequest *github.PullRequest) (stalePullRequest *StalePullRequest, err error) {\n\tstalePullRequest = &StalePullRequest{\n\t\tType: StaleTypeNormal,\n\t\tRepo: repo,\n\t\tPullRequest: pullRequest,\n\t}\n\n\tprTitle := *pullRequest.Title\n\tprState := *pullRequest.State\n\n\tif prState != \"open\" {\n\t\tstalePullRequest.reason = \"pull request is not opened\"\n\t\treturn\n\t}\n\n\tif len(pullRequest.Assignees) < 1 {\n\t\tstalePullRequest.reason = \"pull request has no assignees\"\n\t\treturn\n\t}\n\n\tif (*pullRequest.CreatedAt).Add(pullRequestFreshTime).After(time.Now()) {\n\t\t// GitHub's pr comment API seems like have some lag,\n\t\t// so we have to check later.\n\t\tstalePullRequest.reason = \"pull request is still fresh\"\n\t\treturn\n\t}\n\n\tif strings.HasPrefix(strings.ToLower(prTitle), \"[wip]\") {\n\t\tstalePullRequest.reason = \"pull request is WIP\"\n\t\treturn\n\t}\n\n\tunreviewedAssignees, err := findUnreviewAssignees(repo, pullRequest)\n\tif err != nil {\n\t\treturn\n\t}\n\tif len(unreviewedAssignees) > 0 {\n\t\tstalePullRequest.Type = StaleTypeNoReviews\n\t\tstalePullRequest.reason = strings.Join(unreviewedAssignees, \" \")\n\t\treturn\n\t}\n\n\tif (*pullRequest.UpdatedAt).Add(pullRequestStaleTime).Before(time.Now()) {\n\t\tstalePullRequest.Type = StaleTypeNoUpdates\n\t\treturn\n\t}\n\n\treturn\n}", "func HasBranch(branchName string) bool {\n\tfor _, line := range strings.Split(command.New(\"git\", \"branch\", \"-a\").Output(), \"\\n\") {\n\t\tline = strings.Trim(line, \"* \")\n\t\tline = strings.TrimSpace(line)\n\t\tline = strings.Replace(line, \"remotes/origin/\", \"\", 1)\n\t\tif line == branchName {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}", "func (w *Watcher) IsRepoValid(repo string) (bool, error) {\n\trepos, err := w.ListGoRepos()\n\tif err != nil {\n\t\treturn false, errors.Wrap(err, \"repo: listing Go repos\")\n\t}\n\tfor _, r := range repos {\n\t\tif r == repo {\n\t\t\treturn true, nil\n\t\t}\n\t}\n\treturn false, nil\n}", "func isMainBranch(br string) bool {\n\treturn br == \"master\" || strings.HasPrefix(br, \"dev.\")\n}", "func (c *gitClient) checkoutPR(num int) error {\n\tif err := os.Chdir(filepath.Join(os.Getenv(\"GITHUB_WORKSPACE\"), c.repo)); err != nil {\n\t\treturn errors.Wrap(err, \"changing to GITHUB_WORKSPACE dir\")\n\t}\n\t_, _, err := execCommand(\"git\", \"fetch\", \"origin\", fmt.Sprintf(\"pull/%d/head:pullrequest\", num))\n\tif err != nil {\n\t\treturn err\n\t}\n\t_, _, err = execCommand(\"git\", \"checkout\", c.branch)\n\tif err != nil {\n\t\treturn err\n\t}\n\t_, exitCode, err := execCommand(\"git\", \"merge\", \"--squash\", \"--no-commit\", \"pullrequest\")\n\tif err != nil || exitCode != 0 {\n\t\treturn errors.Wrap(err, \"Pull request merge failed.\")\n\t}\n\t_, _, err = execCommand(\"git\", \"reset\")\n\treturn err\n}", "func (c *Client) GetPullRequest(owner, repo string, index int64) (*PullRequest, *Response, error) {\n\tif err := escapeValidatePathSegments(&owner, &repo); err != nil {\n\t\treturn nil, nil, err\n\t}\n\tpr := new(PullRequest)\n\tresp, err := c.getParsedResponse(\"GET\", fmt.Sprintf(\"/repos/%s/%s/pulls/%d\", owner, repo, index), nil, nil, pr)\n\tif c.checkServerVersionGreaterThanOrEqual(version1_14_0) != nil {\n\t\tif err := fixPullHeadSha(c, pr); err != nil {\n\t\t\treturn pr, resp, err\n\t\t}\n\t}\n\treturn pr, resp, err\n}", "func containsPulld(packages []string) bool {\n\tfor _, s := range packages {\n\t\tif p := strings.Split(s, \"/\")[0]; p == \"pulld\" || p == \"pulld-not-gce\" {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}", "func (m *MaintainerManager) GetPullRequest(number string) (*gh.PullRequest, error) {\n\treturn m.client.PullRequest(m.repo, number, nil)\n}", "func IsIssueReferencePath(name string) bool {\n\treturn regexp.MustCompile(fmt.Sprintf(\"^refs/heads/%s(/|$)?\", IssueBranchPrefix)).MatchString(name)\n}", "func (a *Application) checkRepoIsReal(name ...string) bool {\n\tvar fullname string\n\tswitch len(name) {\n\tcase 1:\n\t\tfullname = strings.TrimSpace(name[0])\n\t\tif fullname == \"\" || fullname == \"/\" {\n\t\t\treturn false\n\t\t}\n\tcase 2:\n\t\torg := strings.TrimSpace(name[0])\n\t\trepo := strings.TrimSpace(name[1])\n\t\tif org == \"\" || repo == \"\" {\n\t\t\treturn false\n\t\t}\n\t\tfullname = u.Format(\"%s/%s\", name[0], name[1])\n\tdefault:\n\t\tpanic(\"Youre doing this wrong\")\n\t}\n\turl := u.Format(\"https://github.com/%s\", fullname)\n\tif code, _, _, e := nt.HTTP(nt.HEAD, url, nt.NewHeaderBuilder().GetHeader(), nil); e != nil || code != 200 {\n\t\treturn false\n\t} else {\n\t\treturn true\n\t}\n}", "func IsFunctionalTestRequired() bool {\n\treturn strings.TrimSpace(os.Getenv(TestRequiredEnvVar)) == \"true\"\n}", "func (m *MockClient) CreatePullRequest(org, repo, title, body, head, base string, canModify bool) (int, error) {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"CreatePullRequest\", org, repo, title, body, head, base, canModify)\n\tret0, _ := ret[0].(int)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}", "func (p *Patch) ShouldPatchFileWithDiff(path string) bool {\n\treturn !(p.IsGithubPRPatch() || p.IsPRMergePatch()) && p.ConfigChanged(path)\n}", "func repoExists(repo string) (bool, error) {\n\tresp, err := httpClient.Get(fmt.Sprintf(\"https://%s\", repo))\n\tif err != nil {\n\t\treturn false, err\n\t}\n\n\tif resp.StatusCode == http.StatusNotFound {\n\t\treturn false, ErrRepoNotFound\n\t}\n\n\tif resp.StatusCode > 399 {\n\t\treturn false, ErrUnknown\n\t}\n\treturn true, nil\n}", "func (c *Client) CreatePullRequest(owner, repo string, opt CreatePullRequestOption) (*PullRequest, *Response, error) {\n\tif err := escapeValidatePathSegments(&owner, &repo); err != nil {\n\t\treturn nil, nil, err\n\t}\n\tbody, err := json.Marshal(&opt)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\tpr := new(PullRequest)\n\tresp, err := c.getParsedResponse(\"POST\",\n\t\tfmt.Sprintf(\"/repos/%s/%s/pulls\", owner, repo),\n\t\tjsonHeader, bytes.NewReader(body), pr)\n\treturn pr, resp, err\n}", "func FindOneProjectRefByRepoAndBranchWithPRTesting(owner, repo, branch string) (*ProjectRef, error) {\n\tprojectRefs, err := FindMergedEnabledProjectRefsByRepoAndBranch(owner, repo, branch)\n\tif err != nil {\n\t\treturn nil, errors.Wrapf(err, \"Could not fetch project ref for repo '%s/%s' with branch '%s'\",\n\t\t\towner, repo, branch)\n\t}\n\tfor _, p := range projectRefs {\n\t\tif p.IsPRTestingEnabled() {\n\t\t\tp.checkDefaultLogger()\n\t\t\treturn &p, nil\n\t\t}\n\t}\n\tif len(projectRefs) > 0 {\n\t\tgrip.Debug(message.Fields{\n\t\t\t\"source\": \"find project ref for PR testing\",\n\t\t\t\"message\": \"project ref enabled but pr testing not enabled\",\n\t\t\t\"owner\": owner,\n\t\t\t\"repo\": repo,\n\t\t\t\"branch\": branch,\n\t\t})\n\t\treturn nil, nil\n\t}\n\n\t// if no projects are enabled, check if the repo has PR testing enabled, in which case we can use a disabled/hidden project.\n\trepoRef, err := FindRepoRefByOwnerAndRepo(owner, repo)\n\tif err != nil {\n\t\treturn nil, errors.Wrapf(err, \"error finding merged repo refs for repo '%s/%s'\", owner, repo)\n\t}\n\tif repoRef == nil || !repoRef.IsEnabled() || !repoRef.IsPRTestingEnabled() || repoRef.RemotePath == \"\" {\n\t\tgrip.Debug(message.Fields{\n\t\t\t\"source\": \"find project ref for PR testing\",\n\t\t\t\"message\": \"repo ref not configured for PR testing untracked branches\",\n\t\t\t\"owner\": owner,\n\t\t\t\"repo\": repo,\n\t\t\t\"branch\": branch,\n\t\t})\n\t\treturn nil, nil\n\t}\n\n\tprojectRefs, err = FindMergedProjectRefsThatUseRepoSettingsByRepoAndBranch(owner, repo, branch)\n\tif err != nil {\n\t\treturn nil, errors.Wrapf(err, \"error finding merged all project refs for repo '%s/%s' with branch '%s'\",\n\t\t\towner, repo, branch)\n\t}\n\n\t// if a disabled project exists, then return early\n\tvar hiddenProject *ProjectRef\n\tfor i, p := range projectRefs {\n\t\tif !p.IsEnabled() && !p.IsHidden() {\n\t\t\tgrip.Debug(message.Fields{\n\t\t\t\t\"source\": \"find project ref for PR testing\",\n\t\t\t\t\"message\": \"project ref is disabled, not PR testing\",\n\t\t\t\t\"owner\": owner,\n\t\t\t\t\"repo\": repo,\n\t\t\t\t\"branch\": branch,\n\t\t\t})\n\t\t\treturn nil, nil\n\t\t}\n\t\tif p.IsHidden() {\n\t\t\thiddenProject = &projectRefs[i]\n\t\t}\n\t}\n\tif hiddenProject == nil {\n\t\tgrip.Debug(message.Fields{\n\t\t\t\"source\": \"find project ref for PR testing\",\n\t\t\t\"message\": \"creating hidden project because none exists\",\n\t\t\t\"owner\": owner,\n\t\t\t\"repo\": repo,\n\t\t\t\"branch\": branch,\n\t\t})\n\t\t// if no project exists, create and return skeleton project\n\t\thiddenProject = &ProjectRef{\n\t\t\tId: mgobson.NewObjectId().Hex(),\n\t\t\tOwner: owner,\n\t\t\tRepo: repo,\n\t\t\tBranch: branch,\n\t\t\tRepoRefId: repoRef.Id,\n\t\t\tUseRepoSettings: true,\n\t\t\tEnabled: utility.FalsePtr(),\n\t\t\tHidden: utility.TruePtr(),\n\t\t}\n\t\tif err = hiddenProject.Add(nil); err != nil {\n\t\t\tgrip.Error(message.WrapError(err, message.Fields{\n\t\t\t\t\"source\": \"find project ref for PR testing\",\n\t\t\t\t\"message\": \"hidden project could not be added\",\n\t\t\t\t\"owner\": owner,\n\t\t\t\t\"repo\": repo,\n\t\t\t\t\"branch\": branch,\n\t\t\t}))\n\t\t\treturn nil, nil\n\t\t}\n\t}\n\n\treturn hiddenProject, nil\n}", "func IsErrReachLimitOfRepo(err error) bool {\n\t_, ok := err.(ErrReachLimitOfRepo)\n\treturn ok\n}", "func IsApproved(issue *maintner.GitHubIssue) bool {\n\tif issue == nil {\n\t\treturn false\n\t}\n\n\treviewers := make(map[*maintner.GitHubUser]bool)\n\n\t// ForeachReview processes reviews in chronological\n\t// order. We can just call this serially and if a\n\t// reviewer ever requests changes after approving\n\t// this will still set the final review to 'false'\n\tissue.ForeachReview(func(review *maintner.GitHubReview) error {\n\t\treviewers[review.Actor] = review.State == \"APPROVED\"\n\t\treturn nil\n\t})\n\n\t// If there are no reviewers, we shall state that it is not approved\n\tif len(reviewers) == 0 {\n\t\treturn false\n\t}\n\n\tfor _, approved := range reviewers {\n\t\tif !approved {\n\t\t\treturn false\n\t\t}\n\t}\n\n\treturn true\n}", "func IsIssueReference(name string) bool {\n\treturn regexp.MustCompile(fmt.Sprintf(\"^refs/heads/%s/[1-9]+([0-9]+)?$\", IssueBranchPrefix)).MatchString(name)\n}", "func (g *Github) GetOpenPullRequest(ctx context.Context, repo scm.Repository, branchName string) (scm.PullRequest, error) {\n\tr := repo.(repository)\n\n\theadOwner, err := g.headOwner(ctx, r.ownerName)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tprs, _, err := retry(ctx, func() ([]*github.PullRequest, *github.Response, error) {\n\t\treturn g.ghClient.PullRequests.List(ctx, headOwner, r.name, &github.PullRequestListOptions{\n\t\t\tHead: fmt.Sprintf(\"%s:%s\", headOwner, branchName),\n\t\t\tState: \"open\",\n\t\t\tListOptions: github.ListOptions{\n\t\t\t\tPerPage: 1,\n\t\t\t},\n\t\t})\n\t})\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to get open pull requests: %w\", err)\n\t}\n\tif len(prs) == 0 {\n\t\treturn nil, nil\n\t}\n\treturn convertPullRequest(prs[0]), nil\n}", "func (c CommentCommand) IsForSpecificProject() bool {\n\treturn c.RepoRelDir != \"\" || c.Workspace != \"\" || c.ProjectName != \"\"\n}", "func (o *PullRequestOptions) Validate() error {\n\terr := o.Options.Validate()\n\tif err != nil {\n\t\treturn errors.Wrapf(err, \"failed to validate repository options\")\n\t}\n\n\tif o.Number <= 0 {\n\t\to.Number, err = FindPullRequestFromEnvironment()\n\t\tif err != nil {\n\t\t\tif o.IgnoreMissingPullRequest {\n\t\t\t\tlog.Logger().Warnf(\"could not find Pull Request number from environment. Assuming main branch instead\")\n\t\t\t\treturn nil\n\t\t\t}\n\t\t\treturn errors.Wrapf(err, \"failed to get PullRequest from environment. Try supplying option: --pr\")\n\t\t}\n\n\t\tif o.Number <= 0 && !o.IgnoreMissingPullRequest {\n\t\t\treturn options.MissingOption(\"pr\")\n\t\t}\n\t}\n\treturn nil\n}", "func IsErrPullRequestAlreadyExists(err error) bool {\n\t_, ok := err.(ErrPullRequestAlreadyExists)\n\treturn ok\n}", "func (m *MockPullRequestClient) CreatePullRequest(org, repo, title, body, head, base string, canModify bool) (int, error) {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"CreatePullRequest\", org, repo, title, body, head, base, canModify)\n\tret0, _ := ret[0].(int)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}", "func FilterIncludesPullRequests(filters []*Filter) bool {\n\tfor _, filter := range filters {\n\t\tif f, ok := filter.Strategy.(IsFilter); ok && !f.PullRequestOnly {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}", "func (c *client) UpdatePullRequest(org, repo string, number int, title, body *string, open *bool, branch *string, canModify *bool) error {\n\tdurationLogger := c.log(\"UpdatePullRequest\", org, repo, title)\n\tdefer durationLogger()\n\n\tdata := struct {\n\t\tState *string `json:\"state,omitempty\"`\n\t\tTitle *string `json:\"title,omitempty\"`\n\t\tBody *string `json:\"body,omitempty\"`\n\t\tBase *string `json:\"base,omitempty\"`\n\t\t// MaintainerCanModify allows maintainers of the repo to modify this\n\t\t// pull request, eg. push changes to it before merging.\n\t\tMaintainerCanModify *bool `json:\"maintainer_can_modify,omitempty\"`\n\t}{\n\t\tTitle: title,\n\t\tBody: body,\n\t\tBase: branch,\n\t\tMaintainerCanModify: canModify,\n\t}\n\tif open != nil && *open {\n\t\top := \"open\"\n\t\tdata.State = &op\n\t} else if open != nil {\n\t\tcl := \"closed\"\n\t\tdata.State = &cl\n\t}\n\t_, err := c.request(&request{\n\t\t// allow the description and draft fields\n\t\t// https://developer.github.com/changes/2018-02-22-label-description-search-preview/\n\t\t// https://developer.github.com/changes/2019-02-14-draft-pull-requests/\n\t\taccept: \"application/vnd.github.symmetra-preview+json, application/vnd.github.shadow-cat-preview\",\n\t\tmethod: http.MethodPatch,\n\t\tpath: fmt.Sprintf(\"/repos/%s/%s/pulls/%d\", org, repo, number),\n\t\torg: org,\n\t\trequestBody: &data,\n\t\texitCodes: []int{200},\n\t}, nil)\n\treturn err\n}", "func openPullRequest(config *GitXargsConfig, repo *github.Repository, branch string) error {\n\n\tlogger := logging.GetLogger(\"git-xargs\")\n\n\tif config.DryRun || config.SkipPullRequests {\n\t\tlogger.WithFields(logrus.Fields{\n\t\t\t\"Repo\": repo.GetName(),\n\t\t}).Debug(\"--dry-run and / or --skip-pull-requests is set to true, so skipping opening a pull request!\")\n\t\treturn nil\n\t}\n\n\t// If the user only supplies a commit message, use that for both the pull request title and descriptions,\n\t// unless they are provided separately\n\ttitleToUse := config.PullRequestTitle\n\tdescriptionToUse := config.PullRequestDescription\n\n\tcommitMessage := config.CommitMessage\n\n\tif commitMessage != DefaultCommitMessage {\n\t\tif titleToUse == DefaultPullRequestTitle {\n\t\t\ttitleToUse = commitMessage\n\t\t}\n\n\t\tif descriptionToUse == DefaultPullRequestDescription {\n\t\t\tdescriptionToUse = commitMessage\n\t\t}\n\t}\n\n\t// Configure pull request options that the Github client accepts when making calls to open new pull requests\n\tnewPR := &github.NewPullRequest{\n\t\tTitle: github.String(titleToUse),\n\t\tHead: github.String(branch),\n\t\tBase: github.String(\"master\"),\n\t\tBody: github.String(descriptionToUse),\n\t\tMaintainerCanModify: github.Bool(true),\n\t}\n\n\t// Make a pull request via the Github API\n\tpr, _, err := config.GithubClient.PullRequests.Create(context.Background(), *repo.GetOwner().Login, repo.GetName(), newPR)\n\n\tif err != nil {\n\t\tlogger.WithFields(logrus.Fields{\n\t\t\t\"Error\": err,\n\t\t\t\"Head\": branch,\n\t\t\t\"Base\": \"master\",\n\t\t\t\"Body\": descriptionToUse,\n\t\t}).Debug(\"Error opening Pull request\")\n\n\t\t// Track pull request open failure\n\t\tconfig.Stats.TrackSingle(PullRequestOpenErr, repo)\n\t\treturn errors.WithStackTrace(err)\n\t}\n\n\tlogger.WithFields(logrus.Fields{\n\t\t\"Pull Request URL\": pr.GetHTMLURL(),\n\t}).Debug(\"Successfully opened pull request\")\n\n\t// Track successful opening of the pull request, extracting the HTML url to the PR itself for easier review\n\tconfig.Stats.TrackPullRequest(repo.GetName(), pr.GetHTMLURL())\n\treturn nil\n}", "func (s *TeamsService) IsTeamRepo(ctx context.Context, team int64, owner string, repo string) (*Repository, *Response, error) {\n\tu := fmt.Sprintf(\"teams/%v/repos/%v/%v\", team, owner, repo)\n\treq, err := s.client.NewRequest(\"GET\", u, nil)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\theaders := []string{mediaTypeOrgPermissionRepo, mediaTypeNestedTeamsPreview}\n\treq.Header.Set(\"Accept\", strings.Join(headers, \", \"))\n\n\trepository := new(Repository)\n\tresp, err := s.client.Do(ctx, req, repository)\n\tif err != nil {\n\t\treturn nil, resp, err\n\t}\n\n\treturn repository, resp, nil\n}", "func checkRepositoryBranchExists(client *github.Client, owner, repo, branch string) error {\n\tctx := context.WithValue(context.Background(), ctxId, buildTwoPartID(repo, branch))\n\t_, _, err := client.Repositories.GetBranch(ctx, owner, repo, branch, true)\n\tif err != nil {\n\t\tif ghErr, ok := err.(*github.ErrorResponse); ok {\n\t\t\tif ghErr.Response.StatusCode == http.StatusNotFound {\n\t\t\t\treturn fmt.Errorf(\"branch %s not found in repository %s/%s or repository is not readable\", branch, owner, repo)\n\t\t\t}\n\t\t}\n\t\treturn err\n\t}\n\n\treturn nil\n}", "func (config *TeamConfig) IsGithubConfigured() bool {\n\tgithubConfig := config.Hosts.Github\n\treturn len(githubConfig.Repositories) > 0 && len(config.GetGithubUsers()) > 0 &&\n\t\tgithubConfig.Token != \"\"\n}", "func (p *PullRequestReviewEvent) GetPullRequest() *PullRequest {\n\tif p == nil {\n\t\treturn nil\n\t}\n\treturn p.PullRequest\n}", "func (p *PullRequestReviewCommentEvent) GetPullRequest() *PullRequest {\n\tif p == nil {\n\t\treturn nil\n\t}\n\treturn p.PullRequest\n}", "func (c *Client) HasDependency(ctx context.Context, change *gerritpb.ChangeInfo) (bool, error) {\n\trelatedChanges, err := c.getRelatedChanges(ctx, change)\n\tif err != nil {\n\t\treturn false, errors.Annotate(err, \"failed checking dependency\").Err()\n\t}\n\n\tfor _, relatedChange := range relatedChanges {\n\t\tif relatedChange.Status == gerritpb.ChangeStatus_MERGED {\n\t\t\t// relatedChange here is the newest merged. If relatedChange != change,\n\t\t\t// then there is a merged dependency\n\t\t\treturn relatedChange.Project != change.Project ||\n\t\t\t\trelatedChange.Number != change.Number, nil\n\t\t}\n\t}\n\n\t// none of the related changes are merged, so no merged dependencies\n\treturn false, nil\n}", "func (p *PullRequestEvent) GetPullRequest() *PullRequest {\n\tif p == nil {\n\t\treturn nil\n\t}\n\treturn p.PullRequest\n}", "func (p *PullRequestEvent) GetRepo() *Repository {\n\tif p == nil {\n\t\treturn nil\n\t}\n\treturn p.Repo\n}", "func (r *Repository) GetAllowRebaseMerge() bool {\n\tif r == nil || r.AllowRebaseMerge == nil {\n\t\treturn false\n\t}\n\treturn *r.AllowRebaseMerge\n}", "func (o *PostWebhook) GetRepoPush() bool {\n\tif o == nil || o.RepoPush == nil {\n\t\tvar ret bool\n\t\treturn ret\n\t}\n\treturn *o.RepoPush\n}", "func CheckRepo(repo string) error {\n\tinGopathSrc, err := WdInGoPathSrc()\n\tif err != nil {\n\t\treturn err\n\t}\n\tif !inGopathSrc && repo == \"\" {\n\t\treturn fmt.Errorf(`flag --repo must be set if the working directory is not in $GOPATH/src.\n\t\tSee \"operator-sdk new -h\"`)\n\t}\n\treturn nil\n}", "func (b *BitBucketClient) MergePullRequest(workspace string, repositorySlug string, pullRequestID int64, description string, strategy string) (dto.BitBucketPullRequestInfoResponse, error) {\n\tlog.Logger().StartMessage(\"Merge pull-request\")\n\tif err := b.beforeRequest(); err != nil {\n\t\tlog.Logger().FinishMessage(\"Merge pull-request\")\n\t\treturn dto.BitBucketPullRequestInfoResponse{}, err\n\t}\n\n\tformData := map[string]string{\n\t\t\"merge_strategy\": strategy,\n\t\t\"message\": description,\n\t\t\"close_source_branch\": \"1\",\n\t}\n\n\tbyteString, err := json.Marshal(formData)\n\tif err != nil {\n\t\tlog.Logger().FinishMessage(\"Merge pull-request\")\n\t\treturn dto.BitBucketPullRequestInfoResponse{}, err\n\t}\n\n\tb.client.SetBaseURL(DefaultBitBucketBaseAPIUrl)\n\tendpoint := fmt.Sprintf(\"/repositories/%s/%s/pullrequests/%d/merge?async=false\", workspace, repositorySlug, pullRequestID)\n\n\tvar dtoResponse = dto.BitBucketPullRequestInfoResponse{}\n\tresponse, statusCode, err := b.client.Post(endpoint, byteString, map[string]string{})\n\tif err != nil {\n\t\tlog.Logger().FinishMessage(\"Merge pull-request\")\n\t\treturn dto.BitBucketPullRequestInfoResponse{}, err\n\t}\n\n\t//In that case the bitbucket accepts our request and the Pull-request will be merged but in async way(even if we specified async=false)\n\tif statusCode == http.StatusAccepted {\n\t\tlog.Logger().\n\t\t\tDebug().\n\t\t\tRawJSON(\"response\", response).\n\t\t\tInt(\"status_code\", statusCode).\n\t\t\tMsg(\"The response with poll link received.\")\n\t\tlog.Logger().FinishMessage(\"Merge pull-request\")\n\t\treturn dtoResponse, nil\n\t}\n\n\tif err := json.Unmarshal(response, &dtoResponse); err != nil {\n\t\tlog.Logger().\n\t\t\tAddError(err).\n\t\t\tRawJSON(\"response\", response).\n\t\t\tInt(\"status_code\", statusCode).\n\t\t\tMsg(\"Error during the request unmarshal.\")\n\t\tlog.Logger().FinishMessage(\"Merge pull-request\")\n\t\treturn dto.BitBucketPullRequestInfoResponse{}, err\n\t}\n\n\tif statusCode == http.StatusBadRequest {\n\t\tlog.Logger().FinishMessage(\"Merge pull-request\")\n\t\treturn dto.BitBucketPullRequestInfoResponse{}, fmt.Errorf(\"bitbucket response with the error: %s\", dtoResponse.Error.Message)\n\t}\n\n\tif statusCode == http.StatusUnauthorized {\n\t\tlog.Logger().FinishMessage(\"Merge pull-request\")\n\t\treturn dto.BitBucketPullRequestInfoResponse{}, fmt.Errorf(ErrorMsgNoAccess)\n\t}\n\n\tif statusCode == http.StatusNotFound {\n\t\tlog.Logger().FinishMessage(\"Merge pull-request\")\n\t\treturn dto.BitBucketPullRequestInfoResponse{}, errors.New(\"selected pull-request was not found :( \")\n\t}\n\n\tlog.Logger().FinishMessage(\"Merge pull-request\")\n\treturn dtoResponse, nil\n}", "func (gr *globRule) isMatch(pr *github.PullRequest, files []*github.CommitFile) bool {\n\tfor _, globStr := range gr.Globs {\n\t\tgs, ok := globStr.(string)\n\t\tif !ok {\n\t\t\tfmt.Printf(\"Not a string, Value:'%v'\\n\", globStr)\n\t\t\tcontinue\n\t\t}\n\t\tg := glob.MustCompile(gs)\n\t\tfor _, file := range files {\n\t\t\tif g.Match(file.GetFilename()) {\n\t\t\t\treturn true\n\t\t\t}\n\t\t}\n\t}\n\n\treturn false\n}", "func (gc *GithubClient) GetPullRequest(org, repo string, ID int) (*github.PullRequest, error) {\n\tvar res *github.PullRequest\n\t_, err := gc.retry(\n\t\tfmt.Sprintf(\"Get PullRequest '%d'\", ID),\n\t\tmaxRetryCount,\n\t\tfunc() (*github.Response, error) {\n\t\t\tvar resp *github.Response\n\t\t\tvar err error\n\t\t\tres, resp, err = gc.Client.PullRequests.Get(ctx, org, repo, ID)\n\t\t\treturn resp, err\n\t\t},\n\t)\n\treturn res, err\n}" ]
[ "0.60033196", "0.56456953", "0.5529143", "0.55012417", "0.5406795", "0.5382219", "0.53821987", "0.5308959", "0.5265178", "0.5258808", "0.52289414", "0.52288187", "0.5203953", "0.5083942", "0.506576", "0.50482285", "0.50422275", "0.5011587", "0.500566", "0.49848372", "0.49848372", "0.49407277", "0.49346593", "0.4918292", "0.4915218", "0.4903576", "0.49014905", "0.48976547", "0.4887583", "0.4882742", "0.48626134", "0.48590347", "0.4838432", "0.4817365", "0.4808611", "0.48001683", "0.47967094", "0.47887355", "0.47849894", "0.47760984", "0.47699383", "0.4766585", "0.47404566", "0.47355348", "0.4725808", "0.4723519", "0.4711526", "0.46925163", "0.46915287", "0.4667822", "0.46649718", "0.46639934", "0.4650246", "0.46459457", "0.46280038", "0.46201807", "0.46137503", "0.460614", "0.45943525", "0.45925862", "0.45879465", "0.45854595", "0.45805833", "0.45776883", "0.45773816", "0.4570951", "0.45654875", "0.4564051", "0.4562204", "0.45523468", "0.45475096", "0.45471492", "0.45292953", "0.45207772", "0.4513299", "0.45104676", "0.4507914", "0.4503818", "0.45014814", "0.4495156", "0.4493691", "0.44661343", "0.4465101", "0.44626543", "0.44623154", "0.44531816", "0.44490892", "0.4448205", "0.4440873", "0.44368592", "0.44357187", "0.4432611", "0.443125", "0.44206455", "0.4418022", "0.4417047", "0.44125193", "0.4411922", "0.44071254", "0.43924528" ]
0.87552845
0
UpdateProtectBranch saves branch protection options. If ID is 0, it creates a new record. Otherwise, updates existing record.
func UpdateProtectBranch(protectBranch *ProtectBranch) (err error) { sess := x.NewSession() defer sess.Close() if err = sess.Begin(); err != nil { return err } if protectBranch.ID == 0 { if _, err = sess.Insert(protectBranch); err != nil { return fmt.Errorf("Insert: %v", err) } } if _, err = sess.ID(protectBranch.ID).AllCols().Update(protectBranch); err != nil { return fmt.Errorf("Update: %v", err) } return sess.Commit() }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func EditBranchProtection(ctx *context.APIContext) {\n\t// swagger:operation PATCH /repos/{owner}/{repo}/branch_protections/{name} repository repoEditBranchProtection\n\t// ---\n\t// summary: Edit a branch protections for a repository. Only fields that are set will be changed\n\t// consumes:\n\t// - application/json\n\t// produces:\n\t// - application/json\n\t// parameters:\n\t// - name: owner\n\t// in: path\n\t// description: owner of the repo\n\t// type: string\n\t// required: true\n\t// - name: repo\n\t// in: path\n\t// description: name of the repo\n\t// type: string\n\t// required: true\n\t// - name: name\n\t// in: path\n\t// description: name of protected branch\n\t// type: string\n\t// required: true\n\t// - name: body\n\t// in: body\n\t// schema:\n\t// \"$ref\": \"#/definitions/EditBranchProtectionOption\"\n\t// responses:\n\t// \"200\":\n\t// \"$ref\": \"#/responses/BranchProtection\"\n\t// \"404\":\n\t// \"$ref\": \"#/responses/notFound\"\n\t// \"422\":\n\t// \"$ref\": \"#/responses/validationError\"\n\tform := web.GetForm(ctx).(*api.EditBranchProtectionOption)\n\trepo := ctx.Repo.Repository\n\tbpName := ctx.Params(\":name\")\n\tprotectBranch, err := git_model.GetProtectedBranchRuleByName(ctx, repo.ID, bpName)\n\tif err != nil {\n\t\tctx.Error(http.StatusInternalServerError, \"GetProtectedBranchByID\", err)\n\t\treturn\n\t}\n\tif protectBranch == nil || protectBranch.RepoID != repo.ID {\n\t\tctx.NotFound()\n\t\treturn\n\t}\n\n\tif form.EnablePush != nil {\n\t\tif !*form.EnablePush {\n\t\t\tprotectBranch.CanPush = false\n\t\t\tprotectBranch.EnableWhitelist = false\n\t\t\tprotectBranch.WhitelistDeployKeys = false\n\t\t} else {\n\t\t\tprotectBranch.CanPush = true\n\t\t\tif form.EnablePushWhitelist != nil {\n\t\t\t\tif !*form.EnablePushWhitelist {\n\t\t\t\t\tprotectBranch.EnableWhitelist = false\n\t\t\t\t\tprotectBranch.WhitelistDeployKeys = false\n\t\t\t\t} else {\n\t\t\t\t\tprotectBranch.EnableWhitelist = true\n\t\t\t\t\tif form.PushWhitelistDeployKeys != nil {\n\t\t\t\t\t\tprotectBranch.WhitelistDeployKeys = *form.PushWhitelistDeployKeys\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\n\tif form.EnableMergeWhitelist != nil {\n\t\tprotectBranch.EnableMergeWhitelist = *form.EnableMergeWhitelist\n\t}\n\n\tif form.EnableStatusCheck != nil {\n\t\tprotectBranch.EnableStatusCheck = *form.EnableStatusCheck\n\t}\n\n\tif form.StatusCheckContexts != nil {\n\t\tprotectBranch.StatusCheckContexts = form.StatusCheckContexts\n\t}\n\n\tif form.RequiredApprovals != nil && *form.RequiredApprovals >= 0 {\n\t\tprotectBranch.RequiredApprovals = *form.RequiredApprovals\n\t}\n\n\tif form.EnableApprovalsWhitelist != nil {\n\t\tprotectBranch.EnableApprovalsWhitelist = *form.EnableApprovalsWhitelist\n\t}\n\n\tif form.BlockOnRejectedReviews != nil {\n\t\tprotectBranch.BlockOnRejectedReviews = *form.BlockOnRejectedReviews\n\t}\n\n\tif form.BlockOnOfficialReviewRequests != nil {\n\t\tprotectBranch.BlockOnOfficialReviewRequests = *form.BlockOnOfficialReviewRequests\n\t}\n\n\tif form.DismissStaleApprovals != nil {\n\t\tprotectBranch.DismissStaleApprovals = *form.DismissStaleApprovals\n\t}\n\n\tif form.RequireSignedCommits != nil {\n\t\tprotectBranch.RequireSignedCommits = *form.RequireSignedCommits\n\t}\n\n\tif form.ProtectedFilePatterns != nil {\n\t\tprotectBranch.ProtectedFilePatterns = *form.ProtectedFilePatterns\n\t}\n\n\tif form.UnprotectedFilePatterns != nil {\n\t\tprotectBranch.UnprotectedFilePatterns = *form.UnprotectedFilePatterns\n\t}\n\n\tif form.BlockOnOutdatedBranch != nil {\n\t\tprotectBranch.BlockOnOutdatedBranch = *form.BlockOnOutdatedBranch\n\t}\n\n\tvar whitelistUsers []int64\n\tif form.PushWhitelistUsernames != nil {\n\t\twhitelistUsers, err = user_model.GetUserIDsByNames(ctx, form.PushWhitelistUsernames, false)\n\t\tif err != nil {\n\t\t\tif user_model.IsErrUserNotExist(err) {\n\t\t\t\tctx.Error(http.StatusUnprocessableEntity, \"User does not exist\", err)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tctx.Error(http.StatusInternalServerError, \"GetUserIDsByNames\", err)\n\t\t\treturn\n\t\t}\n\t} else {\n\t\twhitelistUsers = protectBranch.WhitelistUserIDs\n\t}\n\tvar mergeWhitelistUsers []int64\n\tif form.MergeWhitelistUsernames != nil {\n\t\tmergeWhitelistUsers, err = user_model.GetUserIDsByNames(ctx, form.MergeWhitelistUsernames, false)\n\t\tif err != nil {\n\t\t\tif user_model.IsErrUserNotExist(err) {\n\t\t\t\tctx.Error(http.StatusUnprocessableEntity, \"User does not exist\", err)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tctx.Error(http.StatusInternalServerError, \"GetUserIDsByNames\", err)\n\t\t\treturn\n\t\t}\n\t} else {\n\t\tmergeWhitelistUsers = protectBranch.MergeWhitelistUserIDs\n\t}\n\tvar approvalsWhitelistUsers []int64\n\tif form.ApprovalsWhitelistUsernames != nil {\n\t\tapprovalsWhitelistUsers, err = user_model.GetUserIDsByNames(ctx, form.ApprovalsWhitelistUsernames, false)\n\t\tif err != nil {\n\t\t\tif user_model.IsErrUserNotExist(err) {\n\t\t\t\tctx.Error(http.StatusUnprocessableEntity, \"User does not exist\", err)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tctx.Error(http.StatusInternalServerError, \"GetUserIDsByNames\", err)\n\t\t\treturn\n\t\t}\n\t} else {\n\t\tapprovalsWhitelistUsers = protectBranch.ApprovalsWhitelistUserIDs\n\t}\n\n\tvar whitelistTeams, mergeWhitelistTeams, approvalsWhitelistTeams []int64\n\tif repo.Owner.IsOrganization() {\n\t\tif form.PushWhitelistTeams != nil {\n\t\t\twhitelistTeams, err = organization.GetTeamIDsByNames(repo.OwnerID, form.PushWhitelistTeams, false)\n\t\t\tif err != nil {\n\t\t\t\tif organization.IsErrTeamNotExist(err) {\n\t\t\t\t\tctx.Error(http.StatusUnprocessableEntity, \"Team does not exist\", err)\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\tctx.Error(http.StatusInternalServerError, \"GetTeamIDsByNames\", err)\n\t\t\t\treturn\n\t\t\t}\n\t\t} else {\n\t\t\twhitelistTeams = protectBranch.WhitelistTeamIDs\n\t\t}\n\t\tif form.MergeWhitelistTeams != nil {\n\t\t\tmergeWhitelistTeams, err = organization.GetTeamIDsByNames(repo.OwnerID, form.MergeWhitelistTeams, false)\n\t\t\tif err != nil {\n\t\t\t\tif organization.IsErrTeamNotExist(err) {\n\t\t\t\t\tctx.Error(http.StatusUnprocessableEntity, \"Team does not exist\", err)\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\tctx.Error(http.StatusInternalServerError, \"GetTeamIDsByNames\", err)\n\t\t\t\treturn\n\t\t\t}\n\t\t} else {\n\t\t\tmergeWhitelistTeams = protectBranch.MergeWhitelistTeamIDs\n\t\t}\n\t\tif form.ApprovalsWhitelistTeams != nil {\n\t\t\tapprovalsWhitelistTeams, err = organization.GetTeamIDsByNames(repo.OwnerID, form.ApprovalsWhitelistTeams, false)\n\t\t\tif err != nil {\n\t\t\t\tif organization.IsErrTeamNotExist(err) {\n\t\t\t\t\tctx.Error(http.StatusUnprocessableEntity, \"Team does not exist\", err)\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\tctx.Error(http.StatusInternalServerError, \"GetTeamIDsByNames\", err)\n\t\t\t\treturn\n\t\t\t}\n\t\t} else {\n\t\t\tapprovalsWhitelistTeams = protectBranch.ApprovalsWhitelistTeamIDs\n\t\t}\n\t}\n\n\terr = git_model.UpdateProtectBranch(ctx, ctx.Repo.Repository, protectBranch, git_model.WhitelistOptions{\n\t\tUserIDs: whitelistUsers,\n\t\tTeamIDs: whitelistTeams,\n\t\tMergeUserIDs: mergeWhitelistUsers,\n\t\tMergeTeamIDs: mergeWhitelistTeams,\n\t\tApprovalsUserIDs: approvalsWhitelistUsers,\n\t\tApprovalsTeamIDs: approvalsWhitelistTeams,\n\t})\n\tif err != nil {\n\t\tctx.Error(http.StatusInternalServerError, \"UpdateProtectBranch\", err)\n\t\treturn\n\t}\n\n\tisPlainRule := !git_model.IsRuleNameSpecial(bpName)\n\tvar isBranchExist bool\n\tif isPlainRule {\n\t\tisBranchExist = git.IsBranchExist(ctx.Req.Context(), ctx.Repo.Repository.RepoPath(), bpName)\n\t}\n\n\tif isBranchExist {\n\t\tif err = pull_service.CheckPRsForBaseBranch(ctx, ctx.Repo.Repository, bpName); err != nil {\n\t\t\tctx.Error(http.StatusInternalServerError, \"CheckPrsForBaseBranch\", err)\n\t\t\treturn\n\t\t}\n\t} else {\n\t\tif !isPlainRule {\n\t\t\tif ctx.Repo.GitRepo == nil {\n\t\t\t\tctx.Repo.GitRepo, err = git.OpenRepository(ctx, ctx.Repo.Repository.RepoPath())\n\t\t\t\tif err != nil {\n\t\t\t\t\tctx.Error(http.StatusInternalServerError, \"OpenRepository\", err)\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\tdefer func() {\n\t\t\t\t\tctx.Repo.GitRepo.Close()\n\t\t\t\t\tctx.Repo.GitRepo = nil\n\t\t\t\t}()\n\t\t\t}\n\n\t\t\t// FIXME: since we only need to recheck files protected rules, we could improve this\n\t\t\tmatchedBranches, err := git_model.FindAllMatchedBranches(ctx, ctx.Repo.Repository.ID, protectBranch.RuleName)\n\t\t\tif err != nil {\n\t\t\t\tctx.Error(http.StatusInternalServerError, \"FindAllMatchedBranches\", err)\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tfor _, branchName := range matchedBranches {\n\t\t\t\tif err = pull_service.CheckPRsForBaseBranch(ctx, ctx.Repo.Repository, branchName); err != nil {\n\t\t\t\t\tctx.Error(http.StatusInternalServerError, \"CheckPrsForBaseBranch\", err)\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\n\t// Reload from db to ensure get all whitelists\n\tbp, err := git_model.GetProtectedBranchRuleByName(ctx, repo.ID, bpName)\n\tif err != nil {\n\t\tctx.Error(http.StatusInternalServerError, \"GetProtectedBranchBy\", err)\n\t\treturn\n\t}\n\tif bp == nil || bp.RepoID != ctx.Repo.Repository.ID {\n\t\tctx.Error(http.StatusInternalServerError, \"New branch protection not found\", err)\n\t\treturn\n\t}\n\n\tctx.JSON(http.StatusOK, convert.ToBranchProtection(bp))\n}", "func (c *client) UpdateBranchProtection(org, repo, branch string, config BranchProtectionRequest) error {\n\tdurationLogger := c.log(\"UpdateBranchProtection\", org, repo, branch, config)\n\tdefer durationLogger()\n\n\t_, err := c.request(&request{\n\t\taccept: \"application/vnd.github.luke-cage-preview+json\", // for required_approving_review_count\n\t\tmethod: http.MethodPut,\n\t\tpath: fmt.Sprintf(\"/repos/%s/%s/branches/%s/protection\", org, repo, branch),\n\t\torg: org,\n\t\trequestBody: config,\n\t\texitCodes: []int{200},\n\t}, nil)\n\treturn err\n}", "func (p GithubRepoHost) UpdateBranchProtection(repoID string, rule BranchProtectionRule) error {\n\tif isDebug() {\n\t\tfmt.Printf(\"Updating branch protection on %s\\n\", repoID)\n\t}\n\n\trules := fetchBranchProtectionRules()\n\tinput := githubv4.UpdateBranchProtectionRuleInput{\n\t\tBranchProtectionRuleID: rule.ID,\n\t\tPattern: githubv4.NewString(githubv4.String(rules.Pattern)),\n\t\tDismissesStaleReviews: githubv4.NewBoolean(githubv4.Boolean(rules.DismissesStaleReviews)),\n\t\tIsAdminEnforced: githubv4.NewBoolean(githubv4.Boolean(rules.IsAdminEnforced)),\n\t\tRequiresApprovingReviews: githubv4.NewBoolean(githubv4.Boolean(rules.RequiresApprovingReviews)),\n\t\tRequiredApprovingReviewCount: githubv4.NewInt(githubv4.Int(rules.RequiredApprovingReviewCount)),\n\t\tRequiresStatusChecks: githubv4.NewBoolean(githubv4.Boolean(rules.RequiresStatusChecks)),\n\t\tRequiredStatusCheckContexts: &[]githubv4.String{\n\t\t\t*githubv4.NewString(\"build\"),\n\t\t},\n\t}\n\n\tvar m UpdateBranchProtectionRuleMutation\n\tclient := buildClient()\n\terr := client.Mutate(context.Background(), &m, input, nil)\n\treturn err\n}", "func UpdateBranchProtection() error {\n\tvar wg sync.WaitGroup\n\trequests, err := getBranchProtectionRequests()\n\n\tif err != nil {\n\t\treturn err\n\t}\n\n\twg.Add(len(requests))\n\towner, repo := getOwnerRepo()\n\n\tfor _, bp := range requests {\n\t\tgo func(bp BranchProtection) {\n\t\t\tdefer wg.Done()\n\t\t\t_, _, err := cli.Repositories.UpdateBranchProtection(ctx, owner, repo, bp.Branch, bp.Protection)\n\n\t\t\tif err != nil {\n\t\t\t\tlog.Fatal(err)\n\t\t\t}\n\n\t\t\t_, err = fmt.Fprintln(writer, fmt.Sprintf(\"branch %v has been protected\", bp.Branch))\n\n\t\t\tif err != nil {\n\t\t\t\tlog.Fatal(err)\n\t\t\t}\n\t\t}(bp)\n\t}\n\n\twg.Wait()\n\n\treturn nil\n}", "func (mr *MockRepositoryClientMockRecorder) UpdateBranchProtection(org, repo, branch, config interface{}) *gomock.Call {\n\tmr.mock.ctrl.T.Helper()\n\treturn mr.mock.ctrl.RecordCallWithMethodType(mr.mock, \"UpdateBranchProtection\", reflect.TypeOf((*MockRepositoryClient)(nil).UpdateBranchProtection), org, repo, branch, config)\n}", "func UpdateOrgProtectBranch(repo *Repository, protectBranch *ProtectBranch, whitelistUserIDs, whitelistTeamIDs string) (err error) {\n\tif err = repo.GetOwner(); err != nil {\n\t\treturn fmt.Errorf(\"GetOwner: %v\", err)\n\t} else if !repo.Owner.IsOrganization() {\n\t\treturn fmt.Errorf(\"expect repository owner to be an organization\")\n\t}\n\n\thasUsersChanged := false\n\tvalidUserIDs := tool.StringsToInt64s(strings.Split(protectBranch.WhitelistUserIDs, \",\"))\n\tif protectBranch.WhitelistUserIDs != whitelistUserIDs {\n\t\thasUsersChanged = true\n\t\tuserIDs := tool.StringsToInt64s(strings.Split(whitelistUserIDs, \",\"))\n\t\tvalidUserIDs = make([]int64, 0, len(userIDs))\n\t\tfor _, userID := range userIDs {\n\t\t\tif !Perms.Authorize(context.TODO(), userID, repo.ID, AccessModeWrite,\n\t\t\t\tAccessModeOptions{\n\t\t\t\t\tOwnerID: repo.OwnerID,\n\t\t\t\t\tPrivate: repo.IsPrivate,\n\t\t\t\t},\n\t\t\t) {\n\t\t\t\tcontinue // Drop invalid user ID\n\t\t\t}\n\n\t\t\tvalidUserIDs = append(validUserIDs, userID)\n\t\t}\n\n\t\tprotectBranch.WhitelistUserIDs = strings.Join(tool.Int64sToStrings(validUserIDs), \",\")\n\t}\n\n\thasTeamsChanged := false\n\tvalidTeamIDs := tool.StringsToInt64s(strings.Split(protectBranch.WhitelistTeamIDs, \",\"))\n\tif protectBranch.WhitelistTeamIDs != whitelistTeamIDs {\n\t\thasTeamsChanged = true\n\t\tteamIDs := tool.StringsToInt64s(strings.Split(whitelistTeamIDs, \",\"))\n\t\tteams, err := GetTeamsHaveAccessToRepo(repo.OwnerID, repo.ID, AccessModeWrite)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"GetTeamsHaveAccessToRepo [org_id: %d, repo_id: %d]: %v\", repo.OwnerID, repo.ID, err)\n\t\t}\n\t\tvalidTeamIDs = make([]int64, 0, len(teams))\n\t\tfor i := range teams {\n\t\t\tif teams[i].HasWriteAccess() && com.IsSliceContainsInt64(teamIDs, teams[i].ID) {\n\t\t\t\tvalidTeamIDs = append(validTeamIDs, teams[i].ID)\n\t\t\t}\n\t\t}\n\n\t\tprotectBranch.WhitelistTeamIDs = strings.Join(tool.Int64sToStrings(validTeamIDs), \",\")\n\t}\n\n\t// Make sure protectBranch.ID is not 0 for whitelists\n\tif protectBranch.ID == 0 {\n\t\tif _, err = x.Insert(protectBranch); err != nil {\n\t\t\treturn fmt.Errorf(\"Insert: %v\", err)\n\t\t}\n\t}\n\n\t// Merge users and members of teams\n\tvar whitelists []*ProtectBranchWhitelist\n\tif hasUsersChanged || hasTeamsChanged {\n\t\tmergedUserIDs := make(map[int64]bool)\n\t\tfor _, userID := range validUserIDs {\n\t\t\t// Empty whitelist users can cause an ID with 0\n\t\t\tif userID != 0 {\n\t\t\t\tmergedUserIDs[userID] = true\n\t\t\t}\n\t\t}\n\n\t\tfor _, teamID := range validTeamIDs {\n\t\t\tmembers, err := GetTeamMembers(teamID)\n\t\t\tif err != nil {\n\t\t\t\treturn fmt.Errorf(\"GetTeamMembers [team_id: %d]: %v\", teamID, err)\n\t\t\t}\n\n\t\t\tfor i := range members {\n\t\t\t\tmergedUserIDs[members[i].ID] = true\n\t\t\t}\n\t\t}\n\n\t\twhitelists = make([]*ProtectBranchWhitelist, 0, len(mergedUserIDs))\n\t\tfor userID := range mergedUserIDs {\n\t\t\twhitelists = append(whitelists, &ProtectBranchWhitelist{\n\t\t\t\tProtectBranchID: protectBranch.ID,\n\t\t\t\tRepoID: repo.ID,\n\t\t\t\tName: protectBranch.Name,\n\t\t\t\tUserID: userID,\n\t\t\t})\n\t\t}\n\t}\n\n\tsess := x.NewSession()\n\tdefer sess.Close()\n\tif err = sess.Begin(); err != nil {\n\t\treturn err\n\t}\n\n\tif _, err = sess.ID(protectBranch.ID).AllCols().Update(protectBranch); err != nil {\n\t\treturn fmt.Errorf(\"Update: %v\", err)\n\t}\n\n\t// Refresh whitelists\n\tif hasUsersChanged || hasTeamsChanged {\n\t\tif _, err = sess.Delete(&ProtectBranchWhitelist{ProtectBranchID: protectBranch.ID}); err != nil {\n\t\t\treturn fmt.Errorf(\"delete old protect branch whitelists: %v\", err)\n\t\t} else if _, err = sess.Insert(whitelists); err != nil {\n\t\t\treturn fmt.Errorf(\"insert new protect branch whitelists: %v\", err)\n\t\t}\n\t}\n\n\treturn sess.Commit()\n}", "func (mr *MockClientMockRecorder) UpdateBranchProtection(org, repo, branch, config interface{}) *gomock.Call {\n\tmr.mock.ctrl.T.Helper()\n\treturn mr.mock.ctrl.RecordCallWithMethodType(mr.mock, \"UpdateBranchProtection\", reflect.TypeOf((*MockClient)(nil).UpdateBranchProtection), org, repo, branch, config)\n}", "func (m *MockClient) UpdateBranchProtection(org, repo, branch string, config github.BranchProtectionRequest) error {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"UpdateBranchProtection\", org, repo, branch, config)\n\tret0, _ := ret[0].(error)\n\treturn ret0\n}", "func (p GithubRepoHost) AddBranchProtection(repoID string) (BranchProtectionRule, error) {\n\tif isDebug() {\n\t\tfmt.Printf(\"Adding branch protection on %s\\n\", repoID)\n\t}\n\n\trules := fetchBranchProtectionRules()\n\tinput := githubv4.CreateBranchProtectionRuleInput{\n\t\tRepositoryID: repoID,\n\t\tPattern: *githubv4.NewString(githubv4.String(rules.Pattern)),\n\t\tDismissesStaleReviews: githubv4.NewBoolean(githubv4.Boolean(rules.DismissesStaleReviews)),\n\t\tIsAdminEnforced: githubv4.NewBoolean(githubv4.Boolean(rules.IsAdminEnforced)),\n\t\tRequiresApprovingReviews: githubv4.NewBoolean(githubv4.Boolean(rules.RequiresApprovingReviews)),\n\t\tRequiredApprovingReviewCount: githubv4.NewInt(githubv4.Int(rules.RequiredApprovingReviewCount)),\n\t\tRequiresStatusChecks: githubv4.NewBoolean(githubv4.Boolean(rules.RequiresStatusChecks)),\n\t}\n\n\tchecks := make([]githubv4.String, len(rules.RequiredStatusCheckContexts))\n\tfor i, name := range rules.RequiredStatusCheckContexts {\n\t\tchecks[i] = *githubv4.NewString(githubv4.String(name))\n\t}\n\tinput.RequiredStatusCheckContexts = &checks\n\n\tvar m CreateRuleMutation\n\tclient := buildClient()\n\terr := client.Mutate(context.Background(), &m, input, nil)\n\treturn m.CreateBranchProtectionRule.BranchProtectionRule, err\n}", "func (m *MockRepositoryClient) UpdateBranchProtection(org, repo, branch string, config github.BranchProtectionRequest) error {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"UpdateBranchProtection\", org, repo, branch, config)\n\tret0, _ := ret[0].(error)\n\treturn ret0\n}", "func CreateBranchProtection(ctx *context.APIContext) {\n\t// swagger:operation POST /repos/{owner}/{repo}/branch_protections repository repoCreateBranchProtection\n\t// ---\n\t// summary: Create a branch protections for a repository\n\t// consumes:\n\t// - application/json\n\t// produces:\n\t// - application/json\n\t// parameters:\n\t// - name: owner\n\t// in: path\n\t// description: owner of the repo\n\t// type: string\n\t// required: true\n\t// - name: repo\n\t// in: path\n\t// description: name of the repo\n\t// type: string\n\t// required: true\n\t// - name: body\n\t// in: body\n\t// schema:\n\t// \"$ref\": \"#/definitions/CreateBranchProtectionOption\"\n\t// responses:\n\t// \"201\":\n\t// \"$ref\": \"#/responses/BranchProtection\"\n\t// \"403\":\n\t// \"$ref\": \"#/responses/forbidden\"\n\t// \"404\":\n\t// \"$ref\": \"#/responses/notFound\"\n\t// \"422\":\n\t// \"$ref\": \"#/responses/validationError\"\n\n\tform := web.GetForm(ctx).(*api.CreateBranchProtectionOption)\n\trepo := ctx.Repo.Repository\n\n\truleName := form.RuleName\n\tif ruleName == \"\" {\n\t\truleName = form.BranchName //nolint\n\t}\n\tif len(ruleName) == 0 {\n\t\tctx.Error(http.StatusBadRequest, \"both rule_name and branch_name are empty\", \"both rule_name and branch_name are empty\")\n\t\treturn\n\t}\n\n\tisPlainRule := !git_model.IsRuleNameSpecial(ruleName)\n\tvar isBranchExist bool\n\tif isPlainRule {\n\t\tisBranchExist = git.IsBranchExist(ctx.Req.Context(), ctx.Repo.Repository.RepoPath(), ruleName)\n\t}\n\n\tprotectBranch, err := git_model.GetProtectedBranchRuleByName(ctx, repo.ID, ruleName)\n\tif err != nil {\n\t\tctx.Error(http.StatusInternalServerError, \"GetProtectBranchOfRepoByName\", err)\n\t\treturn\n\t} else if protectBranch != nil {\n\t\tctx.Error(http.StatusForbidden, \"Create branch protection\", \"Branch protection already exist\")\n\t\treturn\n\t}\n\n\tvar requiredApprovals int64\n\tif form.RequiredApprovals > 0 {\n\t\trequiredApprovals = form.RequiredApprovals\n\t}\n\n\twhitelistUsers, err := user_model.GetUserIDsByNames(ctx, form.PushWhitelistUsernames, false)\n\tif err != nil {\n\t\tif user_model.IsErrUserNotExist(err) {\n\t\t\tctx.Error(http.StatusUnprocessableEntity, \"User does not exist\", err)\n\t\t\treturn\n\t\t}\n\t\tctx.Error(http.StatusInternalServerError, \"GetUserIDsByNames\", err)\n\t\treturn\n\t}\n\tmergeWhitelistUsers, err := user_model.GetUserIDsByNames(ctx, form.MergeWhitelistUsernames, false)\n\tif err != nil {\n\t\tif user_model.IsErrUserNotExist(err) {\n\t\t\tctx.Error(http.StatusUnprocessableEntity, \"User does not exist\", err)\n\t\t\treturn\n\t\t}\n\t\tctx.Error(http.StatusInternalServerError, \"GetUserIDsByNames\", err)\n\t\treturn\n\t}\n\tapprovalsWhitelistUsers, err := user_model.GetUserIDsByNames(ctx, form.ApprovalsWhitelistUsernames, false)\n\tif err != nil {\n\t\tif user_model.IsErrUserNotExist(err) {\n\t\t\tctx.Error(http.StatusUnprocessableEntity, \"User does not exist\", err)\n\t\t\treturn\n\t\t}\n\t\tctx.Error(http.StatusInternalServerError, \"GetUserIDsByNames\", err)\n\t\treturn\n\t}\n\tvar whitelistTeams, mergeWhitelistTeams, approvalsWhitelistTeams []int64\n\tif repo.Owner.IsOrganization() {\n\t\twhitelistTeams, err = organization.GetTeamIDsByNames(repo.OwnerID, form.PushWhitelistTeams, false)\n\t\tif err != nil {\n\t\t\tif organization.IsErrTeamNotExist(err) {\n\t\t\t\tctx.Error(http.StatusUnprocessableEntity, \"Team does not exist\", err)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tctx.Error(http.StatusInternalServerError, \"GetTeamIDsByNames\", err)\n\t\t\treturn\n\t\t}\n\t\tmergeWhitelistTeams, err = organization.GetTeamIDsByNames(repo.OwnerID, form.MergeWhitelistTeams, false)\n\t\tif err != nil {\n\t\t\tif organization.IsErrTeamNotExist(err) {\n\t\t\t\tctx.Error(http.StatusUnprocessableEntity, \"Team does not exist\", err)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tctx.Error(http.StatusInternalServerError, \"GetTeamIDsByNames\", err)\n\t\t\treturn\n\t\t}\n\t\tapprovalsWhitelistTeams, err = organization.GetTeamIDsByNames(repo.OwnerID, form.ApprovalsWhitelistTeams, false)\n\t\tif err != nil {\n\t\t\tif organization.IsErrTeamNotExist(err) {\n\t\t\t\tctx.Error(http.StatusUnprocessableEntity, \"Team does not exist\", err)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tctx.Error(http.StatusInternalServerError, \"GetTeamIDsByNames\", err)\n\t\t\treturn\n\t\t}\n\t}\n\n\tprotectBranch = &git_model.ProtectedBranch{\n\t\tRepoID: ctx.Repo.Repository.ID,\n\t\tRuleName: ruleName,\n\t\tCanPush: form.EnablePush,\n\t\tEnableWhitelist: form.EnablePush && form.EnablePushWhitelist,\n\t\tEnableMergeWhitelist: form.EnableMergeWhitelist,\n\t\tWhitelistDeployKeys: form.EnablePush && form.EnablePushWhitelist && form.PushWhitelistDeployKeys,\n\t\tEnableStatusCheck: form.EnableStatusCheck,\n\t\tStatusCheckContexts: form.StatusCheckContexts,\n\t\tEnableApprovalsWhitelist: form.EnableApprovalsWhitelist,\n\t\tRequiredApprovals: requiredApprovals,\n\t\tBlockOnRejectedReviews: form.BlockOnRejectedReviews,\n\t\tBlockOnOfficialReviewRequests: form.BlockOnOfficialReviewRequests,\n\t\tDismissStaleApprovals: form.DismissStaleApprovals,\n\t\tRequireSignedCommits: form.RequireSignedCommits,\n\t\tProtectedFilePatterns: form.ProtectedFilePatterns,\n\t\tUnprotectedFilePatterns: form.UnprotectedFilePatterns,\n\t\tBlockOnOutdatedBranch: form.BlockOnOutdatedBranch,\n\t}\n\n\terr = git_model.UpdateProtectBranch(ctx, ctx.Repo.Repository, protectBranch, git_model.WhitelistOptions{\n\t\tUserIDs: whitelistUsers,\n\t\tTeamIDs: whitelistTeams,\n\t\tMergeUserIDs: mergeWhitelistUsers,\n\t\tMergeTeamIDs: mergeWhitelistTeams,\n\t\tApprovalsUserIDs: approvalsWhitelistUsers,\n\t\tApprovalsTeamIDs: approvalsWhitelistTeams,\n\t})\n\tif err != nil {\n\t\tctx.Error(http.StatusInternalServerError, \"UpdateProtectBranch\", err)\n\t\treturn\n\t}\n\n\tif isBranchExist {\n\t\tif err = pull_service.CheckPRsForBaseBranch(ctx, ctx.Repo.Repository, ruleName); err != nil {\n\t\t\tctx.Error(http.StatusInternalServerError, \"CheckPRsForBaseBranch\", err)\n\t\t\treturn\n\t\t}\n\t} else {\n\t\tif !isPlainRule {\n\t\t\tif ctx.Repo.GitRepo == nil {\n\t\t\t\tctx.Repo.GitRepo, err = git.OpenRepository(ctx, ctx.Repo.Repository.RepoPath())\n\t\t\t\tif err != nil {\n\t\t\t\t\tctx.Error(http.StatusInternalServerError, \"OpenRepository\", err)\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\tdefer func() {\n\t\t\t\t\tctx.Repo.GitRepo.Close()\n\t\t\t\t\tctx.Repo.GitRepo = nil\n\t\t\t\t}()\n\t\t\t}\n\t\t\t// FIXME: since we only need to recheck files protected rules, we could improve this\n\t\t\tmatchedBranches, err := git_model.FindAllMatchedBranches(ctx, ctx.Repo.Repository.ID, ruleName)\n\t\t\tif err != nil {\n\t\t\t\tctx.Error(http.StatusInternalServerError, \"FindAllMatchedBranches\", err)\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tfor _, branchName := range matchedBranches {\n\t\t\t\tif err = pull_service.CheckPRsForBaseBranch(ctx, ctx.Repo.Repository, branchName); err != nil {\n\t\t\t\t\tctx.Error(http.StatusInternalServerError, \"CheckPRsForBaseBranch\", err)\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\n\t// Reload from db to get all whitelists\n\tbp, err := git_model.GetProtectedBranchRuleByName(ctx, ctx.Repo.Repository.ID, ruleName)\n\tif err != nil {\n\t\tctx.Error(http.StatusInternalServerError, \"GetProtectedBranchByID\", err)\n\t\treturn\n\t}\n\tif bp == nil || bp.RepoID != ctx.Repo.Repository.ID {\n\t\tctx.Error(http.StatusInternalServerError, \"New branch protection not found\", err)\n\t\treturn\n\t}\n\n\tctx.JSON(http.StatusCreated, convert.ToBranchProtection(bp))\n}", "func DeleteBranchProtection(ctx *context.APIContext) {\n\t// swagger:operation DELETE /repos/{owner}/{repo}/branch_protections/{name} repository repoDeleteBranchProtection\n\t// ---\n\t// summary: Delete a specific branch protection for the repository\n\t// produces:\n\t// - application/json\n\t// parameters:\n\t// - name: owner\n\t// in: path\n\t// description: owner of the repo\n\t// type: string\n\t// required: true\n\t// - name: repo\n\t// in: path\n\t// description: name of the repo\n\t// type: string\n\t// required: true\n\t// - name: name\n\t// in: path\n\t// description: name of protected branch\n\t// type: string\n\t// required: true\n\t// responses:\n\t// \"204\":\n\t// \"$ref\": \"#/responses/empty\"\n\t// \"404\":\n\t// \"$ref\": \"#/responses/notFound\"\n\n\trepo := ctx.Repo.Repository\n\tbpName := ctx.Params(\":name\")\n\tbp, err := git_model.GetProtectedBranchRuleByName(ctx, repo.ID, bpName)\n\tif err != nil {\n\t\tctx.Error(http.StatusInternalServerError, \"GetProtectedBranchByID\", err)\n\t\treturn\n\t}\n\tif bp == nil || bp.RepoID != repo.ID {\n\t\tctx.NotFound()\n\t\treturn\n\t}\n\n\tif err := git_model.DeleteProtectedBranch(ctx, ctx.Repo.Repository.ID, bp.ID); err != nil {\n\t\tctx.Error(http.StatusInternalServerError, \"DeleteProtectedBranch\", err)\n\t\treturn\n\t}\n\n\tctx.Status(http.StatusNoContent)\n}", "func (c *client) RemoveBranchProtection(org, repo, branch string) error {\n\tdurationLogger := c.log(\"RemoveBranchProtection\", org, repo, branch)\n\tdefer durationLogger()\n\n\t_, err := c.request(&request{\n\t\tmethod: http.MethodDelete,\n\t\tpath: fmt.Sprintf(\"/repos/%s/%s/branches/%s/protection\", org, repo, branch),\n\t\torg: org,\n\t\texitCodes: []int{204},\n\t}, nil)\n\treturn err\n}", "func (m *MarkerIndexBranchIDMapping) SetBranchID(index markers.Index, branchID ledgerstate.BranchID) {\n\tm.mappingMutex.Lock()\n\tdefer m.mappingMutex.Unlock()\n\n\tm.mapping.Set(index, branchID)\n}", "func (a *Client) UpdateBranch(params *UpdateBranchParams, authInfo runtime.ClientAuthInfoWriter, opts ...ClientOption) (*UpdateBranchOK, error) {\n\t// TODO: Validate the params before sending\n\tif params == nil {\n\t\tparams = NewUpdateBranchParams()\n\t}\n\top := &runtime.ClientOperation{\n\t\tID: \"updateBranch\",\n\t\tMethod: \"PUT\",\n\t\tPathPattern: \"/vcs/branch/{branchID}\",\n\t\tProducesMediaTypes: []string{\"application/json\"},\n\t\tConsumesMediaTypes: []string{\"application/json\"},\n\t\tSchemes: []string{\"http\", \"https\"},\n\t\tParams: params,\n\t\tReader: &UpdateBranchReader{formats: a.formats},\n\t\tAuthInfo: authInfo,\n\t\tContext: params.Context,\n\t\tClient: params.HTTPClient,\n\t}\n\tfor _, opt := range opts {\n\t\topt(op)\n\t}\n\n\tresult, err := a.transport.Submit(op)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tsuccess, ok := result.(*UpdateBranchOK)\n\tif ok {\n\t\treturn success, nil\n\t}\n\t// unexpected success response\n\t// safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue\n\tmsg := fmt.Sprintf(\"unexpected success response for updateBranch: API contract not enforced by server. Client expected to get an error, but got: %T\", result)\n\tpanic(msg)\n}", "func (r *SettingRepository) EditBranchByID(branch *models.Branch) error {\n\terr := r.C.Update(bson.M{\"_id\": branch.ID},\n\t\tbson.M{\"$set\": bson.M{\n\t\t\t\"name\": branch.Name,\n\t\t\t\"updatedat\": time.Now(),\n\t\t\t\"status\": branch.Status,\n\t\t}})\n\treturn err\n}", "func (m *MarkerBranchIDMappingManager) SetBranchID(marker *markers.Marker, branchID ledgerstate.BranchID) {\n\tm.tangle.Storage.MarkerIndexBranchIDMapping(marker.SequenceID(), NewMarkerIndexBranchIDMapping).Consume(func(markerIndexBranchIDMapping *MarkerIndexBranchIDMapping) {\n\t\tmarkerIndexBranchIDMapping.SetBranchID(marker.Index(), branchID)\n\t})\n}", "func GetProtectBranchesByRepoID(repoID int64) ([]*ProtectBranch, error) {\n\tprotectBranches := make([]*ProtectBranch, 0, 2)\n\treturn protectBranches, x.Where(\"repo_id = ? and protected = ?\", repoID, true).Asc(\"name\").Find(&protectBranches)\n}", "func (m *MarkerIndexBranchIDMapping) Update(other objectstorage.StorableObject) {\n\tpanic(\"updates disabled\")\n}", "func NewBranchProtection(ctx *pulumi.Context,\n\tname string, args *BranchProtectionArgs, opts ...pulumi.ResourceOption) (*BranchProtection, error) {\n\tif args == nil {\n\t\treturn nil, errors.New(\"missing one or more required arguments\")\n\t}\n\n\tif args.Branch == nil {\n\t\treturn nil, errors.New(\"invalid value for required argument 'Branch'\")\n\t}\n\tif args.Project == nil {\n\t\treturn nil, errors.New(\"invalid value for required argument 'Project'\")\n\t}\n\topts = internal.PkgResourceDefaultOpts(opts)\n\tvar resource BranchProtection\n\terr := ctx.RegisterResource(\"gitlab:index/branchProtection:BranchProtection\", name, args, &resource, opts...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &resource, nil\n}", "func (s *UpdateTemplateSyncConfigInput) SetBranch(v string) *UpdateTemplateSyncConfigInput {\n\ts.Branch = &v\n\treturn s\n}", "func handleRepo(ctx context.Context, client *github.Client, repo *github.Repository) error {\n\topt := &github.ListOptions{\n\t\tPerPage: 100,\n\t}\n\n\tbranches, resp, err := client.Repositories.ListBranches(ctx, *repo.Owner.Login, *repo.Name, opt)\n\tif resp.StatusCode == http.StatusNotFound || resp.StatusCode == http.StatusForbidden {\n\t\treturn nil\n\t}\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tfor _, branch := range branches {\n\t\tif branch.GetName() == \"master\" && in(orgs, *repo.Owner.Login) {\n\t\t\t// we must get the individual branch for the branch protection to work\n\t\t\tb, _, err := client.Repositories.GetBranch(ctx, *repo.Owner.Login, *repo.Name, branch.GetName())\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\t// return early if it is already protected\n\t\t\tif b.GetProtected() {\n\t\t\t\tfmt.Printf(\"[OK] %s:%s is already protected\\n\", *repo.FullName, b.GetName())\n\t\t\t\treturn nil\n\t\t\t}\n\n\t\t\tfmt.Printf(\"[UPDATE] %s:%s will be changed to protected\\n\", *repo.FullName, b.GetName())\n\t\t\tif dryrun {\n\t\t\t\t// return early\n\t\t\t\treturn nil\n\t\t\t}\n\n\t\t\t// set the branch to be protected\n\t\t\tif _, _, err := client.Repositories.UpdateBranchProtection(ctx, *repo.Owner.Login, *repo.Name, b.GetName(), &github.ProtectionRequest{\n\t\t\t\tRequiredStatusChecks: &github.RequiredStatusChecks{\n\t\t\t\t\tStrict: false,\n\t\t\t\t\tContexts: []string{},\n\t\t\t\t},\n\t\t\t}); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t}\n\n\treturn nil\n}", "func UpdateCompanyBranchHyCompanybranchBadRequest(t goatest.TInterface, ctx context.Context, service *goa.Service, ctrl app.HyCompanybranchController, id int, payload *app.UpdateCompanyBranchHyCompanybranchPayload) (http.ResponseWriter, error) {\n\t// Setup service\n\tvar (\n\t\tlogBuf bytes.Buffer\n\t\tresp interface{}\n\n\t\trespSetter goatest.ResponseSetterFunc = func(r interface{}) { resp = r }\n\t)\n\tif service == nil {\n\t\tservice = goatest.Service(&logBuf, respSetter)\n\t} else {\n\t\tlogger := log.New(&logBuf, \"\", log.Ltime)\n\t\tservice.WithLogger(goa.NewLogger(logger))\n\t\tnewEncoder := func(io.Writer) goa.Encoder { return respSetter }\n\t\tservice.Encoder = goa.NewHTTPEncoder() // Make sure the code ends up using this decoder\n\t\tservice.Encoder.Register(newEncoder, \"*/*\")\n\t}\n\n\t// Validate payload\n\terr := payload.Validate()\n\tif err != nil {\n\t\te, ok := err.(goa.ServiceError)\n\t\tif !ok {\n\t\t\tpanic(err) // bug\n\t\t}\n\t\treturn nil, e\n\t}\n\n\t// Setup request context\n\trw := httptest.NewRecorder()\n\tu := &url.URL{\n\t\tPath: fmt.Sprintf(\"/api/company/branch/%v\", id),\n\t}\n\treq, _err := http.NewRequest(\"PUT\", u.String(), nil)\n\tif _err != nil {\n\t\tpanic(\"invalid test \" + _err.Error()) // bug\n\t}\n\tprms := url.Values{}\n\tprms[\"ID\"] = []string{fmt.Sprintf(\"%v\", id)}\n\tif ctx == nil {\n\t\tctx = context.Background()\n\t}\n\tgoaCtx := goa.NewContext(goa.WithAction(ctx, \"HyCompanybranchTest\"), rw, req, prms)\n\tupdateCompanyBranchCtx, __err := app.NewUpdateCompanyBranchHyCompanybranchContext(goaCtx, req, service)\n\tif __err != nil {\n\t\t_e, _ok := __err.(goa.ServiceError)\n\t\tif !_ok {\n\t\t\tpanic(\"invalid test data \" + __err.Error()) // bug\n\t\t}\n\t\treturn nil, _e\n\t}\n\tupdateCompanyBranchCtx.Payload = payload\n\n\t// Perform action\n\t__err = ctrl.UpdateCompanyBranch(updateCompanyBranchCtx)\n\n\t// Validate response\n\tif __err != nil {\n\t\tt.Fatalf(\"controller returned %+v, logs:\\n%s\", __err, logBuf.String())\n\t}\n\tif rw.Code != 400 {\n\t\tt.Errorf(\"invalid response status code: got %+v, expected 400\", rw.Code)\n\t}\n\tvar mt error\n\tif resp != nil {\n\t\tvar __ok bool\n\t\tmt, __ok = resp.(error)\n\t\tif !__ok {\n\t\t\tt.Fatalf(\"invalid response media: got variable of type %T, value %+v, expected instance of error\", resp, resp)\n\t\t}\n\t}\n\n\t// Return results\n\treturn rw, mt\n}", "func UpdateCompanyBranchHyCompanybranchOKID(t goatest.TInterface, ctx context.Context, service *goa.Service, ctrl app.HyCompanybranchController, id int, payload *app.UpdateCompanyBranchHyCompanybranchPayload) (http.ResponseWriter, *app.CompanyID) {\n\t// Setup service\n\tvar (\n\t\tlogBuf bytes.Buffer\n\t\tresp interface{}\n\n\t\trespSetter goatest.ResponseSetterFunc = func(r interface{}) { resp = r }\n\t)\n\tif service == nil {\n\t\tservice = goatest.Service(&logBuf, respSetter)\n\t} else {\n\t\tlogger := log.New(&logBuf, \"\", log.Ltime)\n\t\tservice.WithLogger(goa.NewLogger(logger))\n\t\tnewEncoder := func(io.Writer) goa.Encoder { return respSetter }\n\t\tservice.Encoder = goa.NewHTTPEncoder() // Make sure the code ends up using this decoder\n\t\tservice.Encoder.Register(newEncoder, \"*/*\")\n\t}\n\n\t// Validate payload\n\terr := payload.Validate()\n\tif err != nil {\n\t\te, ok := err.(goa.ServiceError)\n\t\tif !ok {\n\t\t\tpanic(err) // bug\n\t\t}\n\t\tt.Errorf(\"unexpected payload validation error: %+v\", e)\n\t\treturn nil, nil\n\t}\n\n\t// Setup request context\n\trw := httptest.NewRecorder()\n\tu := &url.URL{\n\t\tPath: fmt.Sprintf(\"/api/company/branch/%v\", id),\n\t}\n\treq, _err := http.NewRequest(\"PUT\", u.String(), nil)\n\tif _err != nil {\n\t\tpanic(\"invalid test \" + _err.Error()) // bug\n\t}\n\tprms := url.Values{}\n\tprms[\"ID\"] = []string{fmt.Sprintf(\"%v\", id)}\n\tif ctx == nil {\n\t\tctx = context.Background()\n\t}\n\tgoaCtx := goa.NewContext(goa.WithAction(ctx, \"HyCompanybranchTest\"), rw, req, prms)\n\tupdateCompanyBranchCtx, __err := app.NewUpdateCompanyBranchHyCompanybranchContext(goaCtx, req, service)\n\tif __err != nil {\n\t\t_e, _ok := __err.(goa.ServiceError)\n\t\tif !_ok {\n\t\t\tpanic(\"invalid test data \" + __err.Error()) // bug\n\t\t}\n\t\tt.Errorf(\"unexpected parameter validation error: %+v\", _e)\n\t\treturn nil, nil\n\t}\n\tupdateCompanyBranchCtx.Payload = payload\n\n\t// Perform action\n\t__err = ctrl.UpdateCompanyBranch(updateCompanyBranchCtx)\n\n\t// Validate response\n\tif __err != nil {\n\t\tt.Fatalf(\"controller returned %+v, logs:\\n%s\", __err, logBuf.String())\n\t}\n\tif rw.Code != 200 {\n\t\tt.Errorf(\"invalid response status code: got %+v, expected 200\", rw.Code)\n\t}\n\tvar mt *app.CompanyID\n\tif resp != nil {\n\t\tvar __ok bool\n\t\tmt, __ok = resp.(*app.CompanyID)\n\t\tif !__ok {\n\t\t\tt.Fatalf(\"invalid response media: got variable of type %T, value %+v, expected instance of app.CompanyID\", resp, resp)\n\t\t}\n\t\t__err = mt.Validate()\n\t\tif __err != nil {\n\t\t\tt.Errorf(\"invalid response media type: %s\", __err)\n\t\t}\n\t}\n\n\t// Return results\n\treturn rw, mt\n}", "func (s *UpdateServiceSyncConfigInput) SetBranch(v string) *UpdateServiceSyncConfigInput {\n\ts.Branch = &v\n\treturn s\n}", "func (s *CreateTemplateSyncConfigInput) SetBranch(v string) *CreateTemplateSyncConfigInput {\n\ts.Branch = &v\n\treturn s\n}", "func (v *VersionHistory) SetBranchToken(\n\tinputToken []byte,\n) error {\n\n\ttoken := make([]byte, len(inputToken))\n\tcopy(token, inputToken)\n\tv.BranchToken = token\n\treturn nil\n}", "func UpdateCgroupDeviceWriteBps(pid, innerPath, value string) error {\n\tif pid == \"0\" {\n\t\treturn nil\n\t}\n\n\tcgroupPath, err := FindCgroupPath(pid, \"blkio\", innerPath)\n\tif err != nil {\n\t\treturn err\n\t}\n\tpath := filepath.Join(cgroupPath, \"blkio.throttle.write_bps_device\")\n\tif err := ioutil.WriteFile(path, []byte(value), 0600); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}", "func (service *Service) UpdateProtectionMode(d9SecurityGroupID, protectionMode string) (*CloudSecurityGroupResponse, *http.Response, error) {\n\tif protectionMode != \"FullManage\" && protectionMode != \"ReadOnly\" {\n\t\treturn nil, nil, fmt.Errorf(\"protection mode can be FullManage or ReadOnly\")\n\t}\n\n\tv := new(CloudSecurityGroupResponse)\n\trelativeURL := fmt.Sprintf(\"%s/%s/%s\", awsSgResourcePath, d9SecurityGroupID, awsSgResourceProtectionMode)\n\tbody := UpdateProtectionModeQueryParameters{\n\t\tProtectionMode: protectionMode,\n\t}\n\n\tresp, err := service.Client.NewRequestDo(\"POST\", relativeURL, nil, body, v)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\treturn v, resp, nil\n}", "func (mr *MockRepositoryClientMockRecorder) RemoveBranchProtection(org, repo, branch interface{}) *gomock.Call {\n\tmr.mock.ctrl.T.Helper()\n\treturn mr.mock.ctrl.RecordCallWithMethodType(mr.mock, \"RemoveBranchProtection\", reflect.TypeOf((*MockRepositoryClient)(nil).RemoveBranchProtection), org, repo, branch)\n}", "func (s *CreateServiceSyncConfigInput) SetBranch(v string) *CreateServiceSyncConfigInput {\n\ts.Branch = &v\n\treturn s\n}", "func (b *BranchDAG) SetBranchConfirmed(branchID BranchID) (err error) {\n\tif b.InclusionState(branchID) == Confirmed {\n\t\treturn\n\t}\n\n\tif _, branchErr := b.SetBranchMonotonicallyLiked(branchID, true); branchErr != nil {\n\t\terr = errors.Errorf(\"failed to set Branch with %s to be monotonically liked: %w\", branchID, branchErr)\n\t\treturn\n\t}\n\n\tif _, branchErr := b.SetBranchFinalized(branchID, true); branchErr != nil {\n\t\terr = errors.Errorf(\"failed to set Branch with %s to be finalized: %w\", branchID, branchErr)\n\t\treturn\n\t}\n\n\treturn\n}", "func (patchwork *Patchwork) Branch(branch string) {\n\tpatchwork.branch = branch\n}", "func (mr *MockSendUpdateQueryMockRecorder) UpdateBranch(arg0, arg1, arg2 interface{}) *gomock.Call {\n\treturn mr.mock.ctrl.RecordCallWithMethodType(mr.mock, \"UpdateBranch\", reflect.TypeOf((*MockSendUpdateQuery)(nil).UpdateBranch), arg0, arg1, arg2)\n}", "func (s *TemplateSyncConfig) SetBranch(v string) *TemplateSyncConfig {\n\ts.Branch = &v\n\treturn s\n}", "func (r *SettingRepository) AddBranchByOrgID(orgID string) (branch models.Branch, err error) {\n\tobjID := bson.NewObjectId()\n\tbranch.ID = objID\n\tbranch.OrgID = bson.ObjectIdHex(orgID)\n\tbranch.Status = \"Active\"\n\tbranch.CreatedAt = time.Now()\n\tbranch.UpdatedAt = time.Now()\n\n\terr = r.C.Insert(&branch)\n\treturn\n}", "func (mr *MockClientMockRecorder) RemoveBranchProtection(org, repo, branch interface{}) *gomock.Call {\n\tmr.mock.ctrl.T.Helper()\n\treturn mr.mock.ctrl.RecordCallWithMethodType(mr.mock, \"RemoveBranchProtection\", reflect.TypeOf((*MockClient)(nil).RemoveBranchProtection), org, repo, branch)\n}", "func UpdateCompanyBranchHyCompanybranchNotFound(t goatest.TInterface, ctx context.Context, service *goa.Service, ctrl app.HyCompanybranchController, id int, payload *app.UpdateCompanyBranchHyCompanybranchPayload) http.ResponseWriter {\n\t// Setup service\n\tvar (\n\t\tlogBuf bytes.Buffer\n\n\t\trespSetter goatest.ResponseSetterFunc = func(r interface{}) {}\n\t)\n\tif service == nil {\n\t\tservice = goatest.Service(&logBuf, respSetter)\n\t} else {\n\t\tlogger := log.New(&logBuf, \"\", log.Ltime)\n\t\tservice.WithLogger(goa.NewLogger(logger))\n\t\tnewEncoder := func(io.Writer) goa.Encoder { return respSetter }\n\t\tservice.Encoder = goa.NewHTTPEncoder() // Make sure the code ends up using this decoder\n\t\tservice.Encoder.Register(newEncoder, \"*/*\")\n\t}\n\n\t// Validate payload\n\terr := payload.Validate()\n\tif err != nil {\n\t\te, ok := err.(goa.ServiceError)\n\t\tif !ok {\n\t\t\tpanic(err) // bug\n\t\t}\n\t\tt.Errorf(\"unexpected payload validation error: %+v\", e)\n\t\treturn nil\n\t}\n\n\t// Setup request context\n\trw := httptest.NewRecorder()\n\tu := &url.URL{\n\t\tPath: fmt.Sprintf(\"/api/company/branch/%v\", id),\n\t}\n\treq, _err := http.NewRequest(\"PUT\", u.String(), nil)\n\tif _err != nil {\n\t\tpanic(\"invalid test \" + _err.Error()) // bug\n\t}\n\tprms := url.Values{}\n\tprms[\"ID\"] = []string{fmt.Sprintf(\"%v\", id)}\n\tif ctx == nil {\n\t\tctx = context.Background()\n\t}\n\tgoaCtx := goa.NewContext(goa.WithAction(ctx, \"HyCompanybranchTest\"), rw, req, prms)\n\tupdateCompanyBranchCtx, __err := app.NewUpdateCompanyBranchHyCompanybranchContext(goaCtx, req, service)\n\tif __err != nil {\n\t\t_e, _ok := __err.(goa.ServiceError)\n\t\tif !_ok {\n\t\t\tpanic(\"invalid test data \" + __err.Error()) // bug\n\t\t}\n\t\tt.Errorf(\"unexpected parameter validation error: %+v\", _e)\n\t\treturn nil\n\t}\n\tupdateCompanyBranchCtx.Payload = payload\n\n\t// Perform action\n\t__err = ctrl.UpdateCompanyBranch(updateCompanyBranchCtx)\n\n\t// Validate response\n\tif __err != nil {\n\t\tt.Fatalf(\"controller returned %+v, logs:\\n%s\", __err, logBuf.String())\n\t}\n\tif rw.Code != 404 {\n\t\tt.Errorf(\"invalid response status code: got %+v, expected 404\", rw.Code)\n\t}\n\n\t// Return results\n\treturn rw\n}", "func (m *MockSendUpdateQuery) UpdateBranch(arg0 context.Context, arg1 interfaces.NewBranch, arg2 bool) error {\n\tret := m.ctrl.Call(m, \"UpdateBranch\", arg0, arg1, arg2)\n\tret0, _ := ret[0].(error)\n\treturn ret0\n}", "func UpdateLimitAccount(currentLimit float64) {\n\n\terr := utils.CheckTempFile(PathFileC)\n\tif err != nil {\n\t\treturn\n\t}\n\n\tjsonFile, err := os.Open(PathFileC)\n\tif err != nil {\n\t\tfmt.Println(err)\n\t}\n\tdefer jsonFile.Close()\n\n\taccountJSON, err := ioutil.ReadAll(jsonFile)\n\tif err != nil {\n\t\tfmt.Println(err)\n\t}\n\n\tvar account models.Account\n\n\terr = json.Unmarshal(accountJSON, &account)\n\tif err != nil {\n\t\tfmt.Println(err)\n\t}\n\n\taccount.Accounts.AvailableLimit = currentLimit\n\n\taccountJSON, err = json.Marshal(account)\n\tif err != nil {\n\t\tfmt.Println(err)\n\t}\n\n\terr = ioutil.WriteFile(PathFileC, accountJSON, 0644)\n\tif err != nil {\n\t\tfmt.Println(err)\n\t}\n}", "func (c APIClient) SetBranch(repoName string, commit string, branch string) error {\n\treturn c.CreateBranch(repoName, branch, commit, nil)\n}", "func ListBranchProtections(ctx *context.APIContext) {\n\t// swagger:operation GET /repos/{owner}/{repo}/branch_protections repository repoListBranchProtection\n\t// ---\n\t// summary: List branch protections for a repository\n\t// produces:\n\t// - application/json\n\t// parameters:\n\t// - name: owner\n\t// in: path\n\t// description: owner of the repo\n\t// type: string\n\t// required: true\n\t// - name: repo\n\t// in: path\n\t// description: name of the repo\n\t// type: string\n\t// required: true\n\t// responses:\n\t// \"200\":\n\t// \"$ref\": \"#/responses/BranchProtectionList\"\n\n\trepo := ctx.Repo.Repository\n\tbps, err := git_model.FindRepoProtectedBranchRules(ctx, repo.ID)\n\tif err != nil {\n\t\tctx.Error(http.StatusInternalServerError, \"GetProtectedBranches\", err)\n\t\treturn\n\t}\n\tapiBps := make([]*api.BranchProtection, len(bps))\n\tfor i := range bps {\n\t\tapiBps[i] = convert.ToBranchProtection(bps[i])\n\t}\n\n\tctx.JSON(http.StatusOK, apiBps)\n}", "func (c *client) GetBranchProtection(org, repo, branch string) (*BranchProtection, error) {\n\tdurationLogger := c.log(\"GetBranchProtection\", org, repo, branch)\n\tdefer durationLogger()\n\n\tcode, body, err := c.requestRaw(&request{\n\t\tmethod: http.MethodGet,\n\t\tpath: fmt.Sprintf(\"/repos/%s/%s/branches/%s/protection\", org, repo, branch),\n\t\torg: org,\n\t\t// GitHub returns 404 for this call if either:\n\t\t// - The branch is not protected\n\t\t// - The access token used does not have sufficient privileges\n\t\t// We therefore need to introspect the response body.\n\t\texitCodes: []int{200, 404},\n\t})\n\n\tswitch {\n\tcase err != nil:\n\t\treturn nil, err\n\tcase code == 200:\n\t\tvar bp BranchProtection\n\t\tif err := json.Unmarshal(body, &bp); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn &bp, nil\n\tcase code == 404:\n\t\t// continue\n\tdefault:\n\t\treturn nil, fmt.Errorf(\"unexpected status code: %d\", code)\n\t}\n\n\tvar ge githubError\n\tif err := json.Unmarshal(body, &ge); err != nil {\n\t\treturn nil, err\n\t}\n\n\t// If the error was because the branch is not protected, we return a\n\t// nil pointer to indicate this.\n\tif ge.Message == \"Branch not protected\" {\n\t\treturn nil, nil\n\t}\n\n\t// Otherwise we got some other 404 error.\n\treturn nil, fmt.Errorf(\"getting branch protection 404: %s\", ge.Message)\n}", "func (s *GetRepositorySyncStatusInput) SetBranch(v string) *GetRepositorySyncStatusInput {\n\ts.Branch = &v\n\treturn s\n}", "func UpdateCompanyBranchHyCompanybranchOK(t goatest.TInterface, ctx context.Context, service *goa.Service, ctrl app.HyCompanybranchController, id int, payload *app.UpdateCompanyBranchHyCompanybranchPayload) (http.ResponseWriter, *app.Company) {\n\t// Setup service\n\tvar (\n\t\tlogBuf bytes.Buffer\n\t\tresp interface{}\n\n\t\trespSetter goatest.ResponseSetterFunc = func(r interface{}) { resp = r }\n\t)\n\tif service == nil {\n\t\tservice = goatest.Service(&logBuf, respSetter)\n\t} else {\n\t\tlogger := log.New(&logBuf, \"\", log.Ltime)\n\t\tservice.WithLogger(goa.NewLogger(logger))\n\t\tnewEncoder := func(io.Writer) goa.Encoder { return respSetter }\n\t\tservice.Encoder = goa.NewHTTPEncoder() // Make sure the code ends up using this decoder\n\t\tservice.Encoder.Register(newEncoder, \"*/*\")\n\t}\n\n\t// Validate payload\n\terr := payload.Validate()\n\tif err != nil {\n\t\te, ok := err.(goa.ServiceError)\n\t\tif !ok {\n\t\t\tpanic(err) // bug\n\t\t}\n\t\tt.Errorf(\"unexpected payload validation error: %+v\", e)\n\t\treturn nil, nil\n\t}\n\n\t// Setup request context\n\trw := httptest.NewRecorder()\n\tu := &url.URL{\n\t\tPath: fmt.Sprintf(\"/api/company/branch/%v\", id),\n\t}\n\treq, _err := http.NewRequest(\"PUT\", u.String(), nil)\n\tif _err != nil {\n\t\tpanic(\"invalid test \" + _err.Error()) // bug\n\t}\n\tprms := url.Values{}\n\tprms[\"ID\"] = []string{fmt.Sprintf(\"%v\", id)}\n\tif ctx == nil {\n\t\tctx = context.Background()\n\t}\n\tgoaCtx := goa.NewContext(goa.WithAction(ctx, \"HyCompanybranchTest\"), rw, req, prms)\n\tupdateCompanyBranchCtx, __err := app.NewUpdateCompanyBranchHyCompanybranchContext(goaCtx, req, service)\n\tif __err != nil {\n\t\t_e, _ok := __err.(goa.ServiceError)\n\t\tif !_ok {\n\t\t\tpanic(\"invalid test data \" + __err.Error()) // bug\n\t\t}\n\t\tt.Errorf(\"unexpected parameter validation error: %+v\", _e)\n\t\treturn nil, nil\n\t}\n\tupdateCompanyBranchCtx.Payload = payload\n\n\t// Perform action\n\t__err = ctrl.UpdateCompanyBranch(updateCompanyBranchCtx)\n\n\t// Validate response\n\tif __err != nil {\n\t\tt.Fatalf(\"controller returned %+v, logs:\\n%s\", __err, logBuf.String())\n\t}\n\tif rw.Code != 200 {\n\t\tt.Errorf(\"invalid response status code: got %+v, expected 200\", rw.Code)\n\t}\n\tvar mt *app.Company\n\tif resp != nil {\n\t\tvar __ok bool\n\t\tmt, __ok = resp.(*app.Company)\n\t\tif !__ok {\n\t\t\tt.Fatalf(\"invalid response media: got variable of type %T, value %+v, expected instance of app.Company\", resp, resp)\n\t\t}\n\t\t__err = mt.Validate()\n\t\tif __err != nil {\n\t\t\tt.Errorf(\"invalid response media type: %s\", __err)\n\t\t}\n\t}\n\n\t// Return results\n\treturn rw, mt\n}", "func (client *Client) UpdateLoadBalancerProtectionWithOptions(request *UpdateLoadBalancerProtectionRequest, runtime *util.RuntimeOptions) (_result *UpdateLoadBalancerProtectionResponse, _err error) {\n\t_err = util.ValidateModel(request)\n\tif _err != nil {\n\t\treturn _result, _err\n\t}\n\tbody := map[string]interface{}{}\n\tif !tea.BoolValue(util.IsUnset(request.ClientToken)) {\n\t\tbody[\"ClientToken\"] = request.ClientToken\n\t}\n\n\tif !tea.BoolValue(util.IsUnset(request.DeletionProtectionEnabled)) {\n\t\tbody[\"DeletionProtectionEnabled\"] = request.DeletionProtectionEnabled\n\t}\n\n\tif !tea.BoolValue(util.IsUnset(request.DeletionProtectionReason)) {\n\t\tbody[\"DeletionProtectionReason\"] = request.DeletionProtectionReason\n\t}\n\n\tif !tea.BoolValue(util.IsUnset(request.DryRun)) {\n\t\tbody[\"DryRun\"] = request.DryRun\n\t}\n\n\tif !tea.BoolValue(util.IsUnset(request.LoadBalancerId)) {\n\t\tbody[\"LoadBalancerId\"] = request.LoadBalancerId\n\t}\n\n\tif !tea.BoolValue(util.IsUnset(request.ModificationProtectionReason)) {\n\t\tbody[\"ModificationProtectionReason\"] = request.ModificationProtectionReason\n\t}\n\n\tif !tea.BoolValue(util.IsUnset(request.ModificationProtectionStatus)) {\n\t\tbody[\"ModificationProtectionStatus\"] = request.ModificationProtectionStatus\n\t}\n\n\tif !tea.BoolValue(util.IsUnset(request.RegionId)) {\n\t\tbody[\"RegionId\"] = request.RegionId\n\t}\n\n\treq := &openapi.OpenApiRequest{\n\t\tBody: openapiutil.ParseToMap(body),\n\t}\n\tparams := &openapi.Params{\n\t\tAction: tea.String(\"UpdateLoadBalancerProtection\"),\n\t\tVersion: tea.String(\"2022-04-30\"),\n\t\tProtocol: tea.String(\"HTTPS\"),\n\t\tPathname: tea.String(\"/\"),\n\t\tMethod: tea.String(\"POST\"),\n\t\tAuthType: tea.String(\"AK\"),\n\t\tStyle: tea.String(\"RPC\"),\n\t\tReqBodyType: tea.String(\"formData\"),\n\t\tBodyType: tea.String(\"json\"),\n\t}\n\t_result = &UpdateLoadBalancerProtectionResponse{}\n\t_body, _err := client.CallApi(params, req, runtime)\n\tif _err != nil {\n\t\treturn _result, _err\n\t}\n\t_err = tea.Convert(_body, &_result)\n\treturn _result, _err\n}", "func (s *ServiceSyncConfig) SetBranch(v string) *ServiceSyncConfig {\n\ts.Branch = &v\n\treturn s\n}", "func GetBranchProtection(ctx *context.APIContext) {\n\t// swagger:operation GET /repos/{owner}/{repo}/branch_protections/{name} repository repoGetBranchProtection\n\t// ---\n\t// summary: Get a specific branch protection for the repository\n\t// produces:\n\t// - application/json\n\t// parameters:\n\t// - name: owner\n\t// in: path\n\t// description: owner of the repo\n\t// type: string\n\t// required: true\n\t// - name: repo\n\t// in: path\n\t// description: name of the repo\n\t// type: string\n\t// required: true\n\t// - name: name\n\t// in: path\n\t// description: name of protected branch\n\t// type: string\n\t// required: true\n\t// responses:\n\t// \"200\":\n\t// \"$ref\": \"#/responses/BranchProtection\"\n\t// \"404\":\n\t// \"$ref\": \"#/responses/notFound\"\n\n\trepo := ctx.Repo.Repository\n\tbpName := ctx.Params(\":name\")\n\tbp, err := git_model.GetProtectedBranchRuleByName(ctx, repo.ID, bpName)\n\tif err != nil {\n\t\tctx.Error(http.StatusInternalServerError, \"GetProtectedBranchByID\", err)\n\t\treturn\n\t}\n\tif bp == nil || bp.RepoID != repo.ID {\n\t\tctx.NotFound()\n\t\treturn\n\t}\n\n\tctx.JSON(http.StatusOK, convert.ToBranchProtection(bp))\n}", "func (r *WorkbookWorksheetProtectionRequest) Update(ctx context.Context, reqObj *WorkbookWorksheetProtection) error {\n\treturn r.JSONRequest(ctx, \"PATCH\", \"\", reqObj, nil)\n}", "func SetDefaultBranch(ctx context.Context, ownerName, repoName, branch string) ResponseExtra {\n\treqURL := setting.LocalURL + fmt.Sprintf(\"api/internal/hook/set-default-branch/%s/%s/%s\",\n\t\turl.PathEscape(ownerName),\n\t\turl.PathEscape(repoName),\n\t\turl.PathEscape(branch),\n\t)\n\treq := newInternalRequest(ctx, reqURL, \"POST\")\n\t_, extra := requestJSONResp(req, &responseText{})\n\treturn extra\n}", "func (l *BranchLocker) Writer(ctx context.Context, repositoryID graveler.RepositoryID, branchID graveler.BranchID, lockedFn graveler.BranchLockerFunc) (interface{}, error) {\n\twriterLockKey, _ := calculateBranchLockerKeys(repositoryID, branchID)\n\treturn l.db.Transact(func(tx db.Tx) (interface{}, error) {\n\t\t// try lock committer key\n\t\tvar locked bool\n\t\terr := tx.GetPrimitive(&locked, `SELECT pg_try_advisory_xact_lock_shared($1)`, writerLockKey)\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"%w (%d): %s\", graveler.ErrLockNotAcquired, writerLockKey, err)\n\t\t}\n\t\tif !locked {\n\t\t\treturn nil, fmt.Errorf(\"%w (%d)\", graveler.ErrLockNotAcquired, writerLockKey)\n\t\t}\n\t\treturn lockedFn()\n\t}, db.WithContext(ctx), db.WithIsolationLevel(pgx.ReadCommitted))\n}", "func (client *Client) UpdateLoadBalancerProtection(request *UpdateLoadBalancerProtectionRequest) (_result *UpdateLoadBalancerProtectionResponse, _err error) {\n\truntime := &util.RuntimeOptions{}\n\t_result = &UpdateLoadBalancerProtectionResponse{}\n\t_body, _err := client.UpdateLoadBalancerProtectionWithOptions(request, runtime)\n\tif _err != nil {\n\t\treturn _result, _err\n\t}\n\t_result = _body\n\treturn _result, _err\n}", "func (b *Booker) UpdateMessagesBranch(transactionID ledgerstate.TransactionID) {\n\tb.tangle.Utils.WalkMessageAndMetadata(func(message *Message, messageMetadata *MessageMetadata, walker *walker.Walker) {\n\t\tif messageMetadata.IsBooked() {\n\t\t\tinheritedBranch, inheritErr := b.tangle.LedgerState.InheritBranch(b.branchIDsOfParents(message).Add(b.branchIDOfPayload(message)))\n\t\t\tif inheritErr != nil {\n\t\t\t\tpanic(xerrors.Errorf(\"failed to inherit Branch when booking Message with %s: %w\", message.ID(), inheritErr))\n\t\t\t}\n\t\t\tif messageMetadata.SetBranchID(inheritedBranch) {\n\t\t\t\tfor _, approvingMessageID := range b.tangle.Utils.ApprovingMessageIDs(message.ID(), StrongApprover) {\n\t\t\t\t\twalker.Push(approvingMessageID)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}, b.tangle.Storage.AttachmentMessageIDs(transactionID), true)\n}", "func (pu *PendingloanbindingUpdate) SetMainBranch(s string) *PendingloanbindingUpdate {\n\tpu.mutation.SetMainBranch(s)\n\treturn pu\n}", "func DestroyBranch(db *sqlx.DB, id int) (int64, error) {\n\n\t_, err := db.Exec(\"DELETE FROM branches WHERE id=$1\", id)\n\n\tif err != nil {\n\t\treturn 400, err\n\t}\n\n\treturn 200, nil\n}", "func (m *ProtectGroup) SetPrivacy(value *GroupPrivacy)() {\n err := m.GetBackingStore().Set(\"privacy\", value)\n if err != nil {\n panic(err)\n }\n}", "func (l *BranchLocker) Writer(ctx context.Context, repositoryID graveler.RepositoryID, branchID graveler.BranchID, lockedFn graveler.BranchLockerFunc) (interface{}, error) {\n\twriterLockKey := calculateBranchLockerKey(repositoryID, branchID)\n\treturn l.db.Transact(ctx, func(tx db.Tx) (interface{}, error) {\n\t\t// try to get a shared lock on the writer key\n\t\t_, err := tx.Exec(`SELECT pg_advisory_xact_lock_shared($1);`, writerLockKey)\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"%w (%d): %s\", graveler.ErrLockNotAcquired, writerLockKey, err)\n\t\t}\n\t\treturn lockedFn()\n\t}, db.WithIsolationLevel(pgx.ReadCommitted))\n}", "func (s *PaymentStorage) UpdateBeneficiary(ctx context.Context, id aggregate.ID, beneficiary transaction.BankAccount) error {\n\tlogger := log.FromContext(ctx)\n\n\tjsBeneficiary, err := json.Marshal(beneficiary)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tquery := `UPDATE %[1]s \n\t\t\t SET attributes = attributes::jsonb || '{\"beneficiary_party\": %[2]s}'::jsonb\n\t\t\t WHERE id = $1`\n\tquery = fmt.Sprintf(query, s.table, string(jsBeneficiary))\n\n\tif logger != nil {\n\t\tlogger.Debugf(\"exec in transaction sql %s, values %+v\", query, []interface{}{\n\t\t\tid,\n\t\t})\n\t}\n\n\treturn execInTransaction(s.db, func(tx *sqlx.Tx) error {\n\t\t_, err := tx.ExecContext(ctx, query, id)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\treturn nil\n\t})\n}", "func (puo *PendingloanbindingUpdateOne) SetMainBranch(s string) *PendingloanbindingUpdateOne {\n\tpuo.mutation.SetMainBranch(s)\n\treturn puo\n}", "func (buo *BankUpdateOne) AddBranchIDs(ids ...int) *BankUpdateOne {\n\tbuo.mutation.AddBranchIDs(ids...)\n\treturn buo\n}", "func (c *Client) UpdateBarcodeRule(br *BarcodeRule) error {\n\treturn c.UpdateBarcodeRules([]int64{br.Id.Get()}, br)\n}", "func (s *RepositorySyncDefinition) SetBranch(v string) *RepositorySyncDefinition {\n\ts.Branch = &v\n\treturn s\n}", "func (o BranchProtectionOutput) BranchProtectionId() pulumi.IntOutput {\n\treturn o.ApplyT(func(v *BranchProtection) pulumi.IntOutput { return v.BranchProtectionId }).(pulumi.IntOutput)\n}", "func (mr *MockRepositoryClientMockRecorder) GetBranchProtection(org, repo, branch interface{}) *gomock.Call {\n\tmr.mock.ctrl.T.Helper()\n\treturn mr.mock.ctrl.RecordCallWithMethodType(mr.mock, \"GetBranchProtection\", reflect.TypeOf((*MockRepositoryClient)(nil).GetBranchProtection), org, repo, branch)\n}", "func (t *Commit) SetBranch(v string) {\n\tt.Branch = &v\n}", "func (o BranchProtectionOutput) Branch() pulumi.StringOutput {\n\treturn o.ApplyT(func(v *BranchProtection) pulumi.StringOutput { return v.Branch }).(pulumi.StringOutput)\n}", "func (token *Token) Update(db *sqlx.DB) {\n\t\n\tif strings.TrimSpace(token.AccessToken) == \"\" || strings.TrimSpace(token.RefreshToken) == \"\" || token.RefreshTokenExpiry == 0 || token.AccessTokenExpiry == 0 || strings.TrimSpace(token.DeviceID) == \"\" || strings.TrimSpace(token.MacAddress) == \"\" || strings.TrimSpace(token.APIKey) == \"\" || token.Status == 0{\n\t\tfmt.Println(\"Missing Fields from tokens\")\n\t\treturn\n\t}\n\t\n\tif strings.TrimSpace(token.ID) == \"\" {\n\t\tfmt.Println(\"Empty ID\")\n\t\treturn\n\t}\n\n\tsql := \"UPDATE tokens SET access_token=?,access_token_time=?,access_token_expiry=?,user_id=?,status=?,api_key=?,refresh_token=?,refresh_token_time=?,refresh_token_expiry=?,device_id=?,mac_address=? WHERE id='\" + token.ID + \"'\"\n\n\t_, err := db.Exec(sql,\n\t\ttoken.AccessToken,\n\t\ttoken.AccessTokenTime,\n\t\ttoken.AccessTokenExpiry,\n\t\ttoken.UserID,\n\t\ttoken.Status,\n\t\ttoken.APIKey,\n\t\ttoken.RefreshToken,\n\t\ttoken.RefreshTokenTime,\n\t\ttoken.RefreshTokenExpiry,\n\t\ttoken.DeviceID,\n\t\ttoken.MacAddress,\n\t)\n\tif err != nil {\n\t\tfmt.Println(err)\n\t}\n}", "func StoreBranch(db *sqlx.DB, branch *Branch) (int64, error) {\n\n\tif branch.ID != 0 {\n\t\tinsertBranch := `UPDATE branches SET code = $1, name = $2 WHERE id = $3`\n\t\t_, err := db.Exec(insertBranch, branch.Code, branch.Name, branch.ID)\n\n\t\tif err != nil {\n\t\t\treturn 404, err\n\t\t}\n\n\t} else {\n\t\tinsertBranch := `INSERT INTO branches (code, name) VALUES ($1, $2) RETURNING id`\n\t\t_, err := db.Exec(insertBranch, branch.Code, branch.Name)\n\n\t\tif err != nil {\n\t\t\treturn 404, err\n\t\t}\n\t}\n\n\treturn 200, nil\n}", "func Branch(branch string) GitOptions {\n\treturn func(o *options) error {\n\t\to.branch = branch\n\t\treturn nil\n\t}\n}", "func (r *DeviceCompliancePolicyAssignmentRequest) Update(ctx context.Context, reqObj *DeviceCompliancePolicyAssignment) error {\n\treturn r.JSONRequest(ctx, \"PATCH\", \"\", reqObj, nil)\n}", "func (mr *MockClientMockRecorder) GetBranchProtection(org, repo, branch interface{}) *gomock.Call {\n\tmr.mock.ctrl.T.Helper()\n\treturn mr.mock.ctrl.RecordCallWithMethodType(mr.mock, \"GetBranchProtection\", reflect.TypeOf((*MockClient)(nil).GetBranchProtection), org, repo, branch)\n}", "func (s *RepositoryBranchInput_) SetBranch(v string) *RepositoryBranchInput_ {\n\ts.Branch = &v\n\treturn s\n}", "func (auup *AuthUserUserPermission) Update(ctx context.Context, db DB) error {\n\tswitch {\n\tcase !auup._exists: // doesn't exist\n\t\treturn logerror(&ErrUpdateFailed{ErrDoesNotExist})\n\tcase auup._deleted: // deleted\n\t\treturn logerror(&ErrUpdateFailed{ErrMarkedForDeletion})\n\t}\n\t// update with primary key\n\tconst sqlstr = `UPDATE django.auth_user_user_permissions SET ` +\n\t\t`user_id = :1, permission_id = :2 ` +\n\t\t`WHERE id = :3`\n\t// run\n\tlogf(sqlstr, auup.UserID, auup.PermissionID, auup.ID)\n\tif _, err := db.ExecContext(ctx, sqlstr, auup.UserID, auup.PermissionID, auup.ID); err != nil {\n\t\treturn logerror(err)\n\t}\n\treturn nil\n}", "func d4addBranch(branch *d4branchT, node *d4nodeT, newNode **d4nodeT) bool {\n\tif node.count < d4maxNodes { // Split won't be necessary\n\t\tnode.branch[node.count] = *branch\n\t\tnode.count++\n\t\treturn false\n\t} else {\n\t\td4splitNode(node, branch, newNode)\n\t\treturn true\n\t}\n}", "func (m *MockClient) RemoveBranchProtection(org, repo, branch string) error {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"RemoveBranchProtection\", org, repo, branch)\n\tret0, _ := ret[0].(error)\n\treturn ret0\n}", "func (m *MockRepositoryClient) RemoveBranchProtection(org, repo, branch string) error {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"RemoveBranchProtection\", org, repo, branch)\n\tret0, _ := ret[0].(error)\n\treturn ret0\n}", "func (r *DeviceCompliancePolicyGroupAssignmentRequest) Update(ctx context.Context, reqObj *DeviceCompliancePolicyGroupAssignment) error {\n\treturn r.JSONRequest(ctx, \"PATCH\", \"\", reqObj, nil)\n}", "func (r *SettingRepository) DeleteBranchById(id string) error {\n\terr := r.C.Remove(bson.M{\"_id\": bson.ObjectIdHex(id)})\n\treturn err\n}", "func d16addBranch(branch *d16branchT, node *d16nodeT, newNode **d16nodeT) bool {\n\tif node.count < d16maxNodes { // Split won't be necessary\n\t\tnode.branch[node.count] = *branch\n\t\tnode.count++\n\t\treturn false\n\t} else {\n\t\td16splitNode(node, branch, newNode)\n\t\treturn true\n\t}\n}", "func (db *MySQLDB) UpdateTenant(ctx context.Context, tenant *Tenant) error {\n\tfLog := mysqlLog.WithField(\"func\", \"UpdateTenant\").WithField(\"RequestID\", ctx.Value(constants.RequestID))\n\n\texist, err := db.IsTenantRecIDExist(ctx, tenant.RecID)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif !exist {\n\t\treturn ErrNotFound\n\t}\n\n\torigin, err := db.GetTenantByRecID(ctx, tenant.RecID)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdomainChanged := origin.Domain != tenant.Domain\n\n\tq := \"UPDATE HANSIP_TENANT SET TENANT_NAME=?, TENANT_DOMAIN=?, DESCRIPTION=? WHERE REC_ID=?\"\n\t_, err = db.instance.ExecContext(ctx, q,\n\t\ttenant.Name, tenant.Domain, tenant.Description, tenant.RecID)\n\tif err != nil {\n\t\tfLog.Errorf(\"db.instance.ExecContext got %s. SQL = %s\", err.Error(), q)\n\t\treturn &ErrDBExecuteError{\n\t\t\tWrapped: err,\n\t\t\tMessage: \"Error UpdateTenant\",\n\t\t\tSQL: q,\n\t\t}\n\t}\n\n\tif domainChanged {\n\t\tq = \"UPDATE HANSIP_ROLE SET ROLE_DOMAIN=? WHERE ROLE_DOMAIN=?\"\n\t\t_, err = db.instance.ExecContext(ctx, q,\n\t\t\ttenant.Domain, origin.Domain)\n\t\tif err != nil {\n\t\t\tfLog.Errorf(\"db.instance.ExecContext got %s. SQL = %s\", err.Error(), q)\n\t\t\treturn &ErrDBExecuteError{\n\t\t\t\tWrapped: err,\n\t\t\t\tMessage: \"Error UpdateTenant\",\n\t\t\t\tSQL: q,\n\t\t\t}\n\t\t}\n\n\t\tq = \"UPDATE HANSIP_GROUP SET GROUP_DOMAIN=? WHERE GROUP_DOMAIN=?\"\n\t\t_, err = db.instance.ExecContext(ctx, q,\n\t\t\ttenant.Domain, origin.Domain)\n\t\tif err != nil {\n\t\t\tfLog.Errorf(\"db.instance.ExecContext got %s. SQL = %s\", err.Error(), q)\n\t\t\treturn &ErrDBExecuteError{\n\t\t\t\tWrapped: err,\n\t\t\t\tMessage: \"Error UpdateTenant\",\n\t\t\t\tSQL: q,\n\t\t\t}\n\t\t}\n\t}\n\n\treturn nil\n}", "func (bu *BankUpdate) AddBranchIDs(ids ...int) *BankUpdate {\n\tbu.mutation.AddBranchIDs(ids...)\n\treturn bu\n}", "func (accountStrategy) PrepareForUpdate(ctx genericapirequest.Context, obj, old runtime.Object) {\n}", "func UpdateCompanyBranchHyCompanybranchOKIdname(t goatest.TInterface, ctx context.Context, service *goa.Service, ctrl app.HyCompanybranchController, id int, payload *app.UpdateCompanyBranchHyCompanybranchPayload) (http.ResponseWriter, *app.CompanyIdname) {\n\t// Setup service\n\tvar (\n\t\tlogBuf bytes.Buffer\n\t\tresp interface{}\n\n\t\trespSetter goatest.ResponseSetterFunc = func(r interface{}) { resp = r }\n\t)\n\tif service == nil {\n\t\tservice = goatest.Service(&logBuf, respSetter)\n\t} else {\n\t\tlogger := log.New(&logBuf, \"\", log.Ltime)\n\t\tservice.WithLogger(goa.NewLogger(logger))\n\t\tnewEncoder := func(io.Writer) goa.Encoder { return respSetter }\n\t\tservice.Encoder = goa.NewHTTPEncoder() // Make sure the code ends up using this decoder\n\t\tservice.Encoder.Register(newEncoder, \"*/*\")\n\t}\n\n\t// Validate payload\n\terr := payload.Validate()\n\tif err != nil {\n\t\te, ok := err.(goa.ServiceError)\n\t\tif !ok {\n\t\t\tpanic(err) // bug\n\t\t}\n\t\tt.Errorf(\"unexpected payload validation error: %+v\", e)\n\t\treturn nil, nil\n\t}\n\n\t// Setup request context\n\trw := httptest.NewRecorder()\n\tu := &url.URL{\n\t\tPath: fmt.Sprintf(\"/api/company/branch/%v\", id),\n\t}\n\treq, _err := http.NewRequest(\"PUT\", u.String(), nil)\n\tif _err != nil {\n\t\tpanic(\"invalid test \" + _err.Error()) // bug\n\t}\n\tprms := url.Values{}\n\tprms[\"ID\"] = []string{fmt.Sprintf(\"%v\", id)}\n\tif ctx == nil {\n\t\tctx = context.Background()\n\t}\n\tgoaCtx := goa.NewContext(goa.WithAction(ctx, \"HyCompanybranchTest\"), rw, req, prms)\n\tupdateCompanyBranchCtx, __err := app.NewUpdateCompanyBranchHyCompanybranchContext(goaCtx, req, service)\n\tif __err != nil {\n\t\t_e, _ok := __err.(goa.ServiceError)\n\t\tif !_ok {\n\t\t\tpanic(\"invalid test data \" + __err.Error()) // bug\n\t\t}\n\t\tt.Errorf(\"unexpected parameter validation error: %+v\", _e)\n\t\treturn nil, nil\n\t}\n\tupdateCompanyBranchCtx.Payload = payload\n\n\t// Perform action\n\t__err = ctrl.UpdateCompanyBranch(updateCompanyBranchCtx)\n\n\t// Validate response\n\tif __err != nil {\n\t\tt.Fatalf(\"controller returned %+v, logs:\\n%s\", __err, logBuf.String())\n\t}\n\tif rw.Code != 200 {\n\t\tt.Errorf(\"invalid response status code: got %+v, expected 200\", rw.Code)\n\t}\n\tvar mt *app.CompanyIdname\n\tif resp != nil {\n\t\tvar __ok bool\n\t\tmt, __ok = resp.(*app.CompanyIdname)\n\t\tif !__ok {\n\t\t\tt.Fatalf(\"invalid response media: got variable of type %T, value %+v, expected instance of app.CompanyIdname\", resp, resp)\n\t\t}\n\t\t__err = mt.Validate()\n\t\tif __err != nil {\n\t\t\tt.Errorf(\"invalid response media type: %s\", __err)\n\t\t}\n\t}\n\n\t// Return results\n\treturn rw, mt\n}", "func addBranchRestrictions(ft *factsTable, b *Block, br branch) {\n\tc := b.Controls[0]\n\tswitch br {\n\tcase negative:\n\t\taddRestrictions(b, ft, boolean, nil, c, eq)\n\tcase positive:\n\t\taddRestrictions(b, ft, boolean, nil, c, lt|gt)\n\tdefault:\n\t\tpanic(\"unknown branch\")\n\t}\n\tif tr, has := domainRelationTable[c.Op]; has {\n\t\t// When we branched from parent we learned a new set of\n\t\t// restrictions. Update the factsTable accordingly.\n\t\td := tr.d\n\t\tif d == signed && ft.isNonNegative(c.Args[0]) && ft.isNonNegative(c.Args[1]) {\n\t\t\td |= unsigned\n\t\t}\n\t\tswitch c.Op {\n\t\tcase OpIsInBounds, OpIsSliceInBounds:\n\t\t\t// 0 <= a0 < a1 (or 0 <= a0 <= a1)\n\t\t\t//\n\t\t\t// On the positive branch, we learn:\n\t\t\t// signed: 0 <= a0 < a1 (or 0 <= a0 <= a1)\n\t\t\t// unsigned: a0 < a1 (or a0 <= a1)\n\t\t\t//\n\t\t\t// On the negative branch, we learn (0 > a0 ||\n\t\t\t// a0 >= a1). In the unsigned domain, this is\n\t\t\t// simply a0 >= a1 (which is the reverse of the\n\t\t\t// positive branch, so nothing surprising).\n\t\t\t// But in the signed domain, we can't express the ||\n\t\t\t// condition, so check if a0 is non-negative instead,\n\t\t\t// to be able to learn something.\n\t\t\tswitch br {\n\t\t\tcase negative:\n\t\t\t\td = unsigned\n\t\t\t\tif ft.isNonNegative(c.Args[0]) {\n\t\t\t\t\td |= signed\n\t\t\t\t}\n\t\t\t\taddRestrictions(b, ft, d, c.Args[0], c.Args[1], tr.r^(lt|gt|eq))\n\t\t\tcase positive:\n\t\t\t\taddRestrictions(b, ft, signed, ft.zero, c.Args[0], lt|eq)\n\t\t\t\taddRestrictions(b, ft, d, c.Args[0], c.Args[1], tr.r)\n\t\t\t}\n\t\tdefault:\n\t\t\tswitch br {\n\t\t\tcase negative:\n\t\t\t\taddRestrictions(b, ft, d, c.Args[0], c.Args[1], tr.r^(lt|gt|eq))\n\t\t\tcase positive:\n\t\t\t\taddRestrictions(b, ft, d, c.Args[0], c.Args[1], tr.r)\n\t\t\t}\n\t\t}\n\n\t}\n}", "func (u *Updater) tryUpdate(ctx context.Context, repositoryID, dirtyToken int) (err error) {\n\tok, unlock, err := u.locker.Lock(ctx, repositoryID, false)\n\tif err != nil || !ok {\n\t\treturn errors.Wrap(err, \"locker.Lock\")\n\t}\n\tdefer func() {\n\t\terr = unlock(err)\n\t}()\n\n\treturn u.update(ctx, repositoryID, dirtyToken)\n}", "func (b *BranchDAG) setBranchLiked(cachedBranch *CachedBranch, liked bool) (modified bool, err error) {\n\t// release the CachedBranch when we are done\n\tdefer cachedBranch.Release()\n\n\t// unwrap ConflictBranch\n\tconflictBranch, err := cachedBranch.UnwrapConflictBranch()\n\tif err != nil {\n\t\terr = errors.Errorf(\"failed to load ConflictBranch with %s: %w\", cachedBranch.ID(), cerrors.ErrFatal)\n\t\treturn\n\t}\n\n\t// execute case dependent logic\n\tswitch liked {\n\tcase true:\n\t\t// iterate through all Conflicts of the current Branch and set their ConflictMembers to be not liked\n\t\tfor conflictID := range conflictBranch.Conflicts() {\n\t\t\t// iterate through all ConflictMembers and set them to not liked\n\t\t\tcachedConflictMembers := b.ConflictMembers(conflictID)\n\t\t\tfor _, cachedConflictMember := range cachedConflictMembers {\n\t\t\t\t// unwrap the ConflictMember\n\t\t\t\tconflictMember := cachedConflictMember.Unwrap()\n\t\t\t\tif conflictMember == nil {\n\t\t\t\t\tcachedConflictMembers.Release()\n\t\t\t\t\terr = errors.Errorf(\"failed to load ConflictMember of %s: %w\", conflictID, cerrors.ErrFatal)\n\t\t\t\t\treturn\n\t\t\t\t}\n\n\t\t\t\t// skip the current Branch\n\t\t\t\tif conflictMember.BranchID() == conflictBranch.ID() {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\n\t\t\t\t// update the other ConflictMembers to be not liked\n\t\t\t\tif _, err = b.setBranchLiked(b.Branch(conflictMember.BranchID()), false); err != nil {\n\t\t\t\t\tcachedConflictMembers.Release()\n\t\t\t\t\terr = errors.Errorf(\"failed to propagate liked changes to other ConflictMembers: %w\", err)\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t}\n\t\t\tcachedConflictMembers.Release()\n\t\t}\n\n\t\t// abort if the branch was liked already\n\t\tif modified = conflictBranch.SetLiked(true); !modified {\n\t\t\treturn\n\t\t}\n\n\t\t// trigger event\n\t\tb.Events.BranchLiked.Trigger(NewBranchDAGEvent(cachedBranch))\n\n\t\t// update the liked status of the future cone (it only effects the AggregatedBranches)\n\t\tif err = b.updateLikedOfAggregatedChildBranches(conflictBranch.ID(), true); err != nil {\n\t\t\terr = errors.Errorf(\"failed to update liked status of future cone of Branch with %s: %w\", conflictBranch.ID(), err)\n\t\t\treturn\n\t\t}\n\n\t\t// update the liked status of the future cone (if necessary)\n\t\tif _, err = b.updateMonotonicallyLikedStatus(conflictBranch.ID(), true); err != nil {\n\t\t\terr = errors.Errorf(\"failed to update liked status of future cone of Branch with %s: %w\", conflictBranch.ID(), err)\n\t\t\treturn\n\t\t}\n\tcase false:\n\t\t// set the branch to be not liked\n\t\tif modified = conflictBranch.SetLiked(false); modified {\n\t\t\t// trigger event\n\t\t\tb.Events.BranchDisliked.Trigger(NewBranchDAGEvent(cachedBranch))\n\n\t\t\t// update the liked status of the future cone (it only affect the AggregatedBranches)\n\t\t\tif propagationErr := b.updateLikedOfAggregatedChildBranches(conflictBranch.ID(), false); propagationErr != nil {\n\t\t\t\terr = errors.Errorf(\"failed to propagate liked changes to AggregatedBranches: %w\", propagationErr)\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\t// update the liked status of the future cone (if necessary)\n\t\t\tif _, propagationErr := b.updateMonotonicallyLikedStatus(conflictBranch.ID(), false); propagationErr != nil {\n\t\t\t\terr = errors.Errorf(\"failed to update liked status of future cone of Branch with %s: %w\", conflictBranch.ID(), propagationErr)\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t}\n\n\treturn\n}", "func updateLoan(l *models.Loan, db *gorm.DB) error {\n\tomitList := []string{\"id\", \"initial_value\", \"interest\", \"quota\", \"cod_client\", \"cod_collection\", \"cod_user\", \"deleted_at\"}\n\terr := db.Model(l).Omit(omitList...).Save(l).Error\n\treturn err\n}", "func (r *DeviceCompliancePolicyRequest) Update(ctx context.Context, reqObj *DeviceCompliancePolicy) error {\n\treturn r.JSONRequest(ctx, \"PATCH\", \"\", reqObj, nil)\n}", "func (p *Patch) SetFinalized(ctx context.Context, versionId string) error {\n\tif _, err := evergreen.GetEnvironment().DB().Collection(Collection).UpdateOne(ctx,\n\t\tbson.M{IdKey: p.Id},\n\t\tbson.M{\n\t\t\t\"$set\": bson.M{\n\t\t\t\tActivatedKey: true,\n\t\t\t\tVersionKey: versionId,\n\t\t\t},\n\t\t\t\"$unset\": bson.M{\n\t\t\t\tProjectStorageMethodKey: 1,\n\t\t\t\tPatchedParserProjectKey: 1,\n\t\t\t\tPatchedProjectConfigKey: 1,\n\t\t\t},\n\t\t},\n\t); err != nil {\n\t\treturn err\n\t}\n\n\tp.Version = versionId\n\tp.Activated = true\n\tp.ProjectStorageMethod = \"\"\n\tp.PatchedParserProject = \"\"\n\tp.PatchedProjectConfig = \"\"\n\n\treturn nil\n}", "func (handler *InitHandler) handleBranches(c Community, r Repository) error {\n\t// if the branches are defined in the repositories, it means that\n\t// all the branches defined in the community will not inherited by repositories\n\tmapBranches := make(map[string]string)\n\n\tif len(r.ProtectedBranches) > 0 {\n\t\t// using repository branches\n\t\tglog.Infof(\"using repository branches: %s\", *r.Name)\n\t\tfor _, b := range r.ProtectedBranches {\n\t\t\tmapBranches[b] = b\n\t\t}\n\t} else {\n\t\t// using community branches\n\t\tglog.Infof(\"using community branches: %s\", *r.Name)\n\t\tfor _, b := range c.ProtectedBranches {\n\t\t\tmapBranches[b] = b\n\t\t}\n\t}\n\n\t// get branches from DB\n\tvar bs []database.Branches\n\terr := database.DBConnection.Model(&database.Branches{}).\n\t\tWhere(\"owner = ? and repo = ?\", c.Name, r.Name).Find(&bs).Error\n\tif err != nil {\n\t\tglog.Errorf(\"unable to get branches: %v\", err)\n\t\treturn err\n\t}\n\tmapBranchesInDB := make(map[string]string)\n\tfor _, b := range bs {\n\t\tmapBranchesInDB[b.Name] = strconv.Itoa(int(b.ID))\n\t}\n\n\t// un-protected branches\n\terr = handler.removeBranchProtections(c, r, mapBranches, mapBranchesInDB)\n\tif err != nil {\n\t\tglog.Errorf(\"unable to un-protected branches: %v\", err)\n\t}\n\n\t// protected branches\n\terr = handler.addBranchProtections(c, r, mapBranches, mapBranchesInDB)\n\tif err != nil {\n\t\tglog.Errorf(\"unable to protected branches: %v\", err)\n\t}\n\n\treturn nil\n}", "func (s *RepositoryBranch) SetBranch(v string) *RepositoryBranch {\n\ts.Branch = &v\n\treturn s\n}", "func (c *AccountController) Update(ctx echo.Context) error {\n\tmodel := account.Account{}\n\terr := ctx.Bind(&model)\n\tif err != nil {\n\t\treturn ctx.JSON(http.StatusUnprocessableEntity, err.Error())\n\t}\n\n\terr = c.AccountUsecase.Update(&model)\n\tif err != nil {\n\t\tctx.JSON(http.StatusInternalServerError, err.Error())\n\t}\n\n\treturn ctx.NoContent(http.StatusNoContent)\n}", "func (s *BucketService) UpdateBucket(ctx context.Context, id influxdb.ID, upd influxdb.BucketUpdate) (*influxdb.Bucket, error) {\n\tb, err := s.s.FindBucketByID(ctx, id)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif err := authorizeWriteBucket(ctx, b.OrgID, id); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn s.s.UpdateBucket(ctx, id, upd)\n}", "func StoreBranches(dbOwner, dbFolder, dbName string, branches map[string]BranchEntry) error {\n\tdbQuery := `\n\t\tUPDATE sqlite_databases\n\t\tSET branch_heads = $4, branches = $5\n\t\tWHERE user_id = (\n\t\t\t\tSELECT user_id\n\t\t\t\tFROM users\n\t\t\t\tWHERE lower(user_name) = lower($1)\n\t\t\t\t)\n\t\t\tAND folder = $2\n\t\t\tAND db_name = $3`\n\tcommandTag, err := pdb.Exec(dbQuery, dbOwner, dbFolder, dbName, branches, len(branches))\n\tif err != nil {\n\t\tlog.Printf(\"Updating branch heads for database '%s%s%s' to '%v' failed: %v\\n\", dbOwner, dbFolder,\n\t\t\tdbName, branches, err)\n\t\treturn err\n\t}\n\tif numRows := commandTag.RowsAffected(); numRows != 1 {\n\t\tlog.Printf(\n\t\t\t\"Wrong number of rows (%v) affected when updating branch heads for database '%s%s%s' to '%v'\\n\",\n\t\t\tnumRows, dbOwner, dbFolder, dbName, branches)\n\t}\n\treturn nil\n}", "func (op *PermissionProfilesUpdateOp) Do(ctx context.Context) (*model.PermissionProfile, error) {\n\tvar res *model.PermissionProfile\n\treturn res, ((*esign.Op)(op)).Do(ctx, &res)\n}", "func (p *perm) CommitChange() {\n\tp.enforcer.SavePolicy()\n}", "func (s *Repository) Save(ctx context.Context, account Account) error {\n\tcmd, err := s.pool.Exec(\n\t\tctx,\n\t\t`insert into \"account\"\n\t\t\t (\"ID\", \"createdAt\", \"description\", \"balance\", \"currency\", \"lastUpdatedAt\")\n\t\t\t values ($1, current_timestamp(0), $1, $2, $3, current_timestamp(0))\n\t\t\t on conflict (\"ID\") do update\n\t\t\t set \"balance\" = \"excluded\".\"balance\", \"lastUpdatedAt\" = current_timestamp(0)`,\n\t\taccount.ID, account.Balance, account.Currency)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif cmd.RowsAffected() == 0 {\n\t\treturn fmt.Errorf(\"failed to upsert account: %v\", account.ID)\n\t}\n\n\treturn nil\n}", "func (m *AospDeviceOwnerDeviceConfiguration) SetBluetoothBlocked(value *bool)() {\n err := m.GetBackingStore().Set(\"bluetoothBlocked\", value)\n if err != nil {\n panic(err)\n }\n}", "func (b *BranchDAG) SetBranchFinalized(branchID BranchID, finalized bool) (modified bool, err error) {\n\treturn b.setBranchFinalized(b.Branch(branchID), finalized)\n}", "func (c *client) UpdatePullRequestBranch(org, repo string, number int, expectedHeadSha *string) error {\n\tdurationLogger := c.log(\"UpdatePullRequestBranch\", org, repo)\n\tdefer durationLogger()\n\n\tdata := struct {\n\t\t// The expected SHA of the pull request's HEAD ref. This is the most recent commit on the pull request's branch.\n\t\t// If the expected SHA does not match the pull request's HEAD, you will receive a 422 Unprocessable Entity status.\n\t\t// You can use the \"List commits\" endpoint to find the most recent commit SHA. Default: SHA of the pull request's current HEAD ref.\n\t\tExpectedHeadSha *string `json:\"expected_head_sha,omitempty\"`\n\t}{\n\t\tExpectedHeadSha: expectedHeadSha,\n\t}\n\n\tcode, err := c.request(&request{\n\t\tmethod: http.MethodPut,\n\t\tpath: fmt.Sprintf(\"/repos/%s/%s/pulls/%d/update-branch\", org, repo, number),\n\t\taccept: \"application/vnd.github.lydian-preview+json\",\n\t\torg: org,\n\t\trequestBody: &data,\n\t\texitCodes: []int{202, 422},\n\t}, nil)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif code == http.StatusUnprocessableEntity {\n\t\tmsg := \"mismatch expected head sha\"\n\t\tif expectedHeadSha != nil {\n\t\t\tmsg = fmt.Sprintf(\"%s: %s\", msg, *expectedHeadSha)\n\t\t}\n\t\treturn errors.New(msg)\n\t}\n\n\treturn nil\n}" ]
[ "0.6981953", "0.68028724", "0.67981833", "0.6534552", "0.57428634", "0.5720844", "0.56828713", "0.5580903", "0.5515201", "0.5496755", "0.5369292", "0.4983595", "0.49808177", "0.49740207", "0.48678598", "0.48411614", "0.4827034", "0.47039238", "0.46725774", "0.46675575", "0.45363298", "0.452032", "0.4490009", "0.44693503", "0.44619825", "0.444473", "0.4428761", "0.4407938", "0.439502", "0.4361327", "0.4332399", "0.4331433", "0.4327554", "0.431971", "0.43091232", "0.43030196", "0.42953864", "0.42914665", "0.42655522", "0.42574888", "0.42496362", "0.42420632", "0.4235807", "0.42160013", "0.42058375", "0.41991234", "0.41917464", "0.41805708", "0.41705105", "0.41702718", "0.41666535", "0.41490313", "0.4147066", "0.41183293", "0.41154566", "0.41095382", "0.40996015", "0.40972114", "0.4095966", "0.40862817", "0.4082857", "0.40714502", "0.4068068", "0.40668717", "0.40649763", "0.40609756", "0.40513307", "0.4045832", "0.40246597", "0.40198126", "0.40095133", "0.40023246", "0.40001625", "0.3997555", "0.39961457", "0.39936432", "0.39930895", "0.3974371", "0.39688903", "0.39637557", "0.39563718", "0.3925133", "0.39218086", "0.39174074", "0.39167422", "0.39074302", "0.39059624", "0.39001584", "0.38998255", "0.3899464", "0.38686794", "0.3860071", "0.38588694", "0.3855684", "0.38511938", "0.38504738", "0.38504052", "0.38436934", "0.3836403", "0.38331226" ]
0.7305021
0
UpdateOrgProtectBranch saves branch protection options of organizational repository. If ID is 0, it creates a new record. Otherwise, updates existing record. This function also performs check if whitelist user and team's IDs have been changed to avoid unnecessary whitelist delete and regenerate.
func UpdateOrgProtectBranch(repo *Repository, protectBranch *ProtectBranch, whitelistUserIDs, whitelistTeamIDs string) (err error) { if err = repo.GetOwner(); err != nil { return fmt.Errorf("GetOwner: %v", err) } else if !repo.Owner.IsOrganization() { return fmt.Errorf("expect repository owner to be an organization") } hasUsersChanged := false validUserIDs := tool.StringsToInt64s(strings.Split(protectBranch.WhitelistUserIDs, ",")) if protectBranch.WhitelistUserIDs != whitelistUserIDs { hasUsersChanged = true userIDs := tool.StringsToInt64s(strings.Split(whitelistUserIDs, ",")) validUserIDs = make([]int64, 0, len(userIDs)) for _, userID := range userIDs { if !Perms.Authorize(context.TODO(), userID, repo.ID, AccessModeWrite, AccessModeOptions{ OwnerID: repo.OwnerID, Private: repo.IsPrivate, }, ) { continue // Drop invalid user ID } validUserIDs = append(validUserIDs, userID) } protectBranch.WhitelistUserIDs = strings.Join(tool.Int64sToStrings(validUserIDs), ",") } hasTeamsChanged := false validTeamIDs := tool.StringsToInt64s(strings.Split(protectBranch.WhitelistTeamIDs, ",")) if protectBranch.WhitelistTeamIDs != whitelistTeamIDs { hasTeamsChanged = true teamIDs := tool.StringsToInt64s(strings.Split(whitelistTeamIDs, ",")) teams, err := GetTeamsHaveAccessToRepo(repo.OwnerID, repo.ID, AccessModeWrite) if err != nil { return fmt.Errorf("GetTeamsHaveAccessToRepo [org_id: %d, repo_id: %d]: %v", repo.OwnerID, repo.ID, err) } validTeamIDs = make([]int64, 0, len(teams)) for i := range teams { if teams[i].HasWriteAccess() && com.IsSliceContainsInt64(teamIDs, teams[i].ID) { validTeamIDs = append(validTeamIDs, teams[i].ID) } } protectBranch.WhitelistTeamIDs = strings.Join(tool.Int64sToStrings(validTeamIDs), ",") } // Make sure protectBranch.ID is not 0 for whitelists if protectBranch.ID == 0 { if _, err = x.Insert(protectBranch); err != nil { return fmt.Errorf("Insert: %v", err) } } // Merge users and members of teams var whitelists []*ProtectBranchWhitelist if hasUsersChanged || hasTeamsChanged { mergedUserIDs := make(map[int64]bool) for _, userID := range validUserIDs { // Empty whitelist users can cause an ID with 0 if userID != 0 { mergedUserIDs[userID] = true } } for _, teamID := range validTeamIDs { members, err := GetTeamMembers(teamID) if err != nil { return fmt.Errorf("GetTeamMembers [team_id: %d]: %v", teamID, err) } for i := range members { mergedUserIDs[members[i].ID] = true } } whitelists = make([]*ProtectBranchWhitelist, 0, len(mergedUserIDs)) for userID := range mergedUserIDs { whitelists = append(whitelists, &ProtectBranchWhitelist{ ProtectBranchID: protectBranch.ID, RepoID: repo.ID, Name: protectBranch.Name, UserID: userID, }) } } sess := x.NewSession() defer sess.Close() if err = sess.Begin(); err != nil { return err } if _, err = sess.ID(protectBranch.ID).AllCols().Update(protectBranch); err != nil { return fmt.Errorf("Update: %v", err) } // Refresh whitelists if hasUsersChanged || hasTeamsChanged { if _, err = sess.Delete(&ProtectBranchWhitelist{ProtectBranchID: protectBranch.ID}); err != nil { return fmt.Errorf("delete old protect branch whitelists: %v", err) } else if _, err = sess.Insert(whitelists); err != nil { return fmt.Errorf("insert new protect branch whitelists: %v", err) } } return sess.Commit() }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func EditBranchProtection(ctx *context.APIContext) {\n\t// swagger:operation PATCH /repos/{owner}/{repo}/branch_protections/{name} repository repoEditBranchProtection\n\t// ---\n\t// summary: Edit a branch protections for a repository. Only fields that are set will be changed\n\t// consumes:\n\t// - application/json\n\t// produces:\n\t// - application/json\n\t// parameters:\n\t// - name: owner\n\t// in: path\n\t// description: owner of the repo\n\t// type: string\n\t// required: true\n\t// - name: repo\n\t// in: path\n\t// description: name of the repo\n\t// type: string\n\t// required: true\n\t// - name: name\n\t// in: path\n\t// description: name of protected branch\n\t// type: string\n\t// required: true\n\t// - name: body\n\t// in: body\n\t// schema:\n\t// \"$ref\": \"#/definitions/EditBranchProtectionOption\"\n\t// responses:\n\t// \"200\":\n\t// \"$ref\": \"#/responses/BranchProtection\"\n\t// \"404\":\n\t// \"$ref\": \"#/responses/notFound\"\n\t// \"422\":\n\t// \"$ref\": \"#/responses/validationError\"\n\tform := web.GetForm(ctx).(*api.EditBranchProtectionOption)\n\trepo := ctx.Repo.Repository\n\tbpName := ctx.Params(\":name\")\n\tprotectBranch, err := git_model.GetProtectedBranchRuleByName(ctx, repo.ID, bpName)\n\tif err != nil {\n\t\tctx.Error(http.StatusInternalServerError, \"GetProtectedBranchByID\", err)\n\t\treturn\n\t}\n\tif protectBranch == nil || protectBranch.RepoID != repo.ID {\n\t\tctx.NotFound()\n\t\treturn\n\t}\n\n\tif form.EnablePush != nil {\n\t\tif !*form.EnablePush {\n\t\t\tprotectBranch.CanPush = false\n\t\t\tprotectBranch.EnableWhitelist = false\n\t\t\tprotectBranch.WhitelistDeployKeys = false\n\t\t} else {\n\t\t\tprotectBranch.CanPush = true\n\t\t\tif form.EnablePushWhitelist != nil {\n\t\t\t\tif !*form.EnablePushWhitelist {\n\t\t\t\t\tprotectBranch.EnableWhitelist = false\n\t\t\t\t\tprotectBranch.WhitelistDeployKeys = false\n\t\t\t\t} else {\n\t\t\t\t\tprotectBranch.EnableWhitelist = true\n\t\t\t\t\tif form.PushWhitelistDeployKeys != nil {\n\t\t\t\t\t\tprotectBranch.WhitelistDeployKeys = *form.PushWhitelistDeployKeys\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\n\tif form.EnableMergeWhitelist != nil {\n\t\tprotectBranch.EnableMergeWhitelist = *form.EnableMergeWhitelist\n\t}\n\n\tif form.EnableStatusCheck != nil {\n\t\tprotectBranch.EnableStatusCheck = *form.EnableStatusCheck\n\t}\n\n\tif form.StatusCheckContexts != nil {\n\t\tprotectBranch.StatusCheckContexts = form.StatusCheckContexts\n\t}\n\n\tif form.RequiredApprovals != nil && *form.RequiredApprovals >= 0 {\n\t\tprotectBranch.RequiredApprovals = *form.RequiredApprovals\n\t}\n\n\tif form.EnableApprovalsWhitelist != nil {\n\t\tprotectBranch.EnableApprovalsWhitelist = *form.EnableApprovalsWhitelist\n\t}\n\n\tif form.BlockOnRejectedReviews != nil {\n\t\tprotectBranch.BlockOnRejectedReviews = *form.BlockOnRejectedReviews\n\t}\n\n\tif form.BlockOnOfficialReviewRequests != nil {\n\t\tprotectBranch.BlockOnOfficialReviewRequests = *form.BlockOnOfficialReviewRequests\n\t}\n\n\tif form.DismissStaleApprovals != nil {\n\t\tprotectBranch.DismissStaleApprovals = *form.DismissStaleApprovals\n\t}\n\n\tif form.RequireSignedCommits != nil {\n\t\tprotectBranch.RequireSignedCommits = *form.RequireSignedCommits\n\t}\n\n\tif form.ProtectedFilePatterns != nil {\n\t\tprotectBranch.ProtectedFilePatterns = *form.ProtectedFilePatterns\n\t}\n\n\tif form.UnprotectedFilePatterns != nil {\n\t\tprotectBranch.UnprotectedFilePatterns = *form.UnprotectedFilePatterns\n\t}\n\n\tif form.BlockOnOutdatedBranch != nil {\n\t\tprotectBranch.BlockOnOutdatedBranch = *form.BlockOnOutdatedBranch\n\t}\n\n\tvar whitelistUsers []int64\n\tif form.PushWhitelistUsernames != nil {\n\t\twhitelistUsers, err = user_model.GetUserIDsByNames(ctx, form.PushWhitelistUsernames, false)\n\t\tif err != nil {\n\t\t\tif user_model.IsErrUserNotExist(err) {\n\t\t\t\tctx.Error(http.StatusUnprocessableEntity, \"User does not exist\", err)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tctx.Error(http.StatusInternalServerError, \"GetUserIDsByNames\", err)\n\t\t\treturn\n\t\t}\n\t} else {\n\t\twhitelistUsers = protectBranch.WhitelistUserIDs\n\t}\n\tvar mergeWhitelistUsers []int64\n\tif form.MergeWhitelistUsernames != nil {\n\t\tmergeWhitelistUsers, err = user_model.GetUserIDsByNames(ctx, form.MergeWhitelistUsernames, false)\n\t\tif err != nil {\n\t\t\tif user_model.IsErrUserNotExist(err) {\n\t\t\t\tctx.Error(http.StatusUnprocessableEntity, \"User does not exist\", err)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tctx.Error(http.StatusInternalServerError, \"GetUserIDsByNames\", err)\n\t\t\treturn\n\t\t}\n\t} else {\n\t\tmergeWhitelistUsers = protectBranch.MergeWhitelistUserIDs\n\t}\n\tvar approvalsWhitelistUsers []int64\n\tif form.ApprovalsWhitelistUsernames != nil {\n\t\tapprovalsWhitelistUsers, err = user_model.GetUserIDsByNames(ctx, form.ApprovalsWhitelistUsernames, false)\n\t\tif err != nil {\n\t\t\tif user_model.IsErrUserNotExist(err) {\n\t\t\t\tctx.Error(http.StatusUnprocessableEntity, \"User does not exist\", err)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tctx.Error(http.StatusInternalServerError, \"GetUserIDsByNames\", err)\n\t\t\treturn\n\t\t}\n\t} else {\n\t\tapprovalsWhitelistUsers = protectBranch.ApprovalsWhitelistUserIDs\n\t}\n\n\tvar whitelistTeams, mergeWhitelistTeams, approvalsWhitelistTeams []int64\n\tif repo.Owner.IsOrganization() {\n\t\tif form.PushWhitelistTeams != nil {\n\t\t\twhitelistTeams, err = organization.GetTeamIDsByNames(repo.OwnerID, form.PushWhitelistTeams, false)\n\t\t\tif err != nil {\n\t\t\t\tif organization.IsErrTeamNotExist(err) {\n\t\t\t\t\tctx.Error(http.StatusUnprocessableEntity, \"Team does not exist\", err)\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\tctx.Error(http.StatusInternalServerError, \"GetTeamIDsByNames\", err)\n\t\t\t\treturn\n\t\t\t}\n\t\t} else {\n\t\t\twhitelistTeams = protectBranch.WhitelistTeamIDs\n\t\t}\n\t\tif form.MergeWhitelistTeams != nil {\n\t\t\tmergeWhitelistTeams, err = organization.GetTeamIDsByNames(repo.OwnerID, form.MergeWhitelistTeams, false)\n\t\t\tif err != nil {\n\t\t\t\tif organization.IsErrTeamNotExist(err) {\n\t\t\t\t\tctx.Error(http.StatusUnprocessableEntity, \"Team does not exist\", err)\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\tctx.Error(http.StatusInternalServerError, \"GetTeamIDsByNames\", err)\n\t\t\t\treturn\n\t\t\t}\n\t\t} else {\n\t\t\tmergeWhitelistTeams = protectBranch.MergeWhitelistTeamIDs\n\t\t}\n\t\tif form.ApprovalsWhitelistTeams != nil {\n\t\t\tapprovalsWhitelistTeams, err = organization.GetTeamIDsByNames(repo.OwnerID, form.ApprovalsWhitelistTeams, false)\n\t\t\tif err != nil {\n\t\t\t\tif organization.IsErrTeamNotExist(err) {\n\t\t\t\t\tctx.Error(http.StatusUnprocessableEntity, \"Team does not exist\", err)\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\tctx.Error(http.StatusInternalServerError, \"GetTeamIDsByNames\", err)\n\t\t\t\treturn\n\t\t\t}\n\t\t} else {\n\t\t\tapprovalsWhitelistTeams = protectBranch.ApprovalsWhitelistTeamIDs\n\t\t}\n\t}\n\n\terr = git_model.UpdateProtectBranch(ctx, ctx.Repo.Repository, protectBranch, git_model.WhitelistOptions{\n\t\tUserIDs: whitelistUsers,\n\t\tTeamIDs: whitelistTeams,\n\t\tMergeUserIDs: mergeWhitelistUsers,\n\t\tMergeTeamIDs: mergeWhitelistTeams,\n\t\tApprovalsUserIDs: approvalsWhitelistUsers,\n\t\tApprovalsTeamIDs: approvalsWhitelistTeams,\n\t})\n\tif err != nil {\n\t\tctx.Error(http.StatusInternalServerError, \"UpdateProtectBranch\", err)\n\t\treturn\n\t}\n\n\tisPlainRule := !git_model.IsRuleNameSpecial(bpName)\n\tvar isBranchExist bool\n\tif isPlainRule {\n\t\tisBranchExist = git.IsBranchExist(ctx.Req.Context(), ctx.Repo.Repository.RepoPath(), bpName)\n\t}\n\n\tif isBranchExist {\n\t\tif err = pull_service.CheckPRsForBaseBranch(ctx, ctx.Repo.Repository, bpName); err != nil {\n\t\t\tctx.Error(http.StatusInternalServerError, \"CheckPrsForBaseBranch\", err)\n\t\t\treturn\n\t\t}\n\t} else {\n\t\tif !isPlainRule {\n\t\t\tif ctx.Repo.GitRepo == nil {\n\t\t\t\tctx.Repo.GitRepo, err = git.OpenRepository(ctx, ctx.Repo.Repository.RepoPath())\n\t\t\t\tif err != nil {\n\t\t\t\t\tctx.Error(http.StatusInternalServerError, \"OpenRepository\", err)\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\tdefer func() {\n\t\t\t\t\tctx.Repo.GitRepo.Close()\n\t\t\t\t\tctx.Repo.GitRepo = nil\n\t\t\t\t}()\n\t\t\t}\n\n\t\t\t// FIXME: since we only need to recheck files protected rules, we could improve this\n\t\t\tmatchedBranches, err := git_model.FindAllMatchedBranches(ctx, ctx.Repo.Repository.ID, protectBranch.RuleName)\n\t\t\tif err != nil {\n\t\t\t\tctx.Error(http.StatusInternalServerError, \"FindAllMatchedBranches\", err)\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tfor _, branchName := range matchedBranches {\n\t\t\t\tif err = pull_service.CheckPRsForBaseBranch(ctx, ctx.Repo.Repository, branchName); err != nil {\n\t\t\t\t\tctx.Error(http.StatusInternalServerError, \"CheckPrsForBaseBranch\", err)\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\n\t// Reload from db to ensure get all whitelists\n\tbp, err := git_model.GetProtectedBranchRuleByName(ctx, repo.ID, bpName)\n\tif err != nil {\n\t\tctx.Error(http.StatusInternalServerError, \"GetProtectedBranchBy\", err)\n\t\treturn\n\t}\n\tif bp == nil || bp.RepoID != ctx.Repo.Repository.ID {\n\t\tctx.Error(http.StatusInternalServerError, \"New branch protection not found\", err)\n\t\treturn\n\t}\n\n\tctx.JSON(http.StatusOK, convert.ToBranchProtection(bp))\n}", "func UpdateProtectBranch(protectBranch *ProtectBranch) (err error) {\n\tsess := x.NewSession()\n\tdefer sess.Close()\n\tif err = sess.Begin(); err != nil {\n\t\treturn err\n\t}\n\n\tif protectBranch.ID == 0 {\n\t\tif _, err = sess.Insert(protectBranch); err != nil {\n\t\t\treturn fmt.Errorf(\"Insert: %v\", err)\n\t\t}\n\t}\n\n\tif _, err = sess.ID(protectBranch.ID).AllCols().Update(protectBranch); err != nil {\n\t\treturn fmt.Errorf(\"Update: %v\", err)\n\t}\n\n\treturn sess.Commit()\n}", "func (c *client) UpdateBranchProtection(org, repo, branch string, config BranchProtectionRequest) error {\n\tdurationLogger := c.log(\"UpdateBranchProtection\", org, repo, branch, config)\n\tdefer durationLogger()\n\n\t_, err := c.request(&request{\n\t\taccept: \"application/vnd.github.luke-cage-preview+json\", // for required_approving_review_count\n\t\tmethod: http.MethodPut,\n\t\tpath: fmt.Sprintf(\"/repos/%s/%s/branches/%s/protection\", org, repo, branch),\n\t\torg: org,\n\t\trequestBody: config,\n\t\texitCodes: []int{200},\n\t}, nil)\n\treturn err\n}", "func (p GithubRepoHost) UpdateBranchProtection(repoID string, rule BranchProtectionRule) error {\n\tif isDebug() {\n\t\tfmt.Printf(\"Updating branch protection on %s\\n\", repoID)\n\t}\n\n\trules := fetchBranchProtectionRules()\n\tinput := githubv4.UpdateBranchProtectionRuleInput{\n\t\tBranchProtectionRuleID: rule.ID,\n\t\tPattern: githubv4.NewString(githubv4.String(rules.Pattern)),\n\t\tDismissesStaleReviews: githubv4.NewBoolean(githubv4.Boolean(rules.DismissesStaleReviews)),\n\t\tIsAdminEnforced: githubv4.NewBoolean(githubv4.Boolean(rules.IsAdminEnforced)),\n\t\tRequiresApprovingReviews: githubv4.NewBoolean(githubv4.Boolean(rules.RequiresApprovingReviews)),\n\t\tRequiredApprovingReviewCount: githubv4.NewInt(githubv4.Int(rules.RequiredApprovingReviewCount)),\n\t\tRequiresStatusChecks: githubv4.NewBoolean(githubv4.Boolean(rules.RequiresStatusChecks)),\n\t\tRequiredStatusCheckContexts: &[]githubv4.String{\n\t\t\t*githubv4.NewString(\"build\"),\n\t\t},\n\t}\n\n\tvar m UpdateBranchProtectionRuleMutation\n\tclient := buildClient()\n\terr := client.Mutate(context.Background(), &m, input, nil)\n\treturn err\n}", "func UpdateBranchProtection() error {\n\tvar wg sync.WaitGroup\n\trequests, err := getBranchProtectionRequests()\n\n\tif err != nil {\n\t\treturn err\n\t}\n\n\twg.Add(len(requests))\n\towner, repo := getOwnerRepo()\n\n\tfor _, bp := range requests {\n\t\tgo func(bp BranchProtection) {\n\t\t\tdefer wg.Done()\n\t\t\t_, _, err := cli.Repositories.UpdateBranchProtection(ctx, owner, repo, bp.Branch, bp.Protection)\n\n\t\t\tif err != nil {\n\t\t\t\tlog.Fatal(err)\n\t\t\t}\n\n\t\t\t_, err = fmt.Fprintln(writer, fmt.Sprintf(\"branch %v has been protected\", bp.Branch))\n\n\t\t\tif err != nil {\n\t\t\t\tlog.Fatal(err)\n\t\t\t}\n\t\t}(bp)\n\t}\n\n\twg.Wait()\n\n\treturn nil\n}", "func (mr *MockRepositoryClientMockRecorder) UpdateBranchProtection(org, repo, branch, config interface{}) *gomock.Call {\n\tmr.mock.ctrl.T.Helper()\n\treturn mr.mock.ctrl.RecordCallWithMethodType(mr.mock, \"UpdateBranchProtection\", reflect.TypeOf((*MockRepositoryClient)(nil).UpdateBranchProtection), org, repo, branch, config)\n}", "func (mr *MockClientMockRecorder) UpdateBranchProtection(org, repo, branch, config interface{}) *gomock.Call {\n\tmr.mock.ctrl.T.Helper()\n\treturn mr.mock.ctrl.RecordCallWithMethodType(mr.mock, \"UpdateBranchProtection\", reflect.TypeOf((*MockClient)(nil).UpdateBranchProtection), org, repo, branch, config)\n}", "func (m *MockClient) UpdateBranchProtection(org, repo, branch string, config github.BranchProtectionRequest) error {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"UpdateBranchProtection\", org, repo, branch, config)\n\tret0, _ := ret[0].(error)\n\treturn ret0\n}", "func (m *MockRepositoryClient) UpdateBranchProtection(org, repo, branch string, config github.BranchProtectionRequest) error {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"UpdateBranchProtection\", org, repo, branch, config)\n\tret0, _ := ret[0].(error)\n\treturn ret0\n}", "func (p GithubRepoHost) AddBranchProtection(repoID string) (BranchProtectionRule, error) {\n\tif isDebug() {\n\t\tfmt.Printf(\"Adding branch protection on %s\\n\", repoID)\n\t}\n\n\trules := fetchBranchProtectionRules()\n\tinput := githubv4.CreateBranchProtectionRuleInput{\n\t\tRepositoryID: repoID,\n\t\tPattern: *githubv4.NewString(githubv4.String(rules.Pattern)),\n\t\tDismissesStaleReviews: githubv4.NewBoolean(githubv4.Boolean(rules.DismissesStaleReviews)),\n\t\tIsAdminEnforced: githubv4.NewBoolean(githubv4.Boolean(rules.IsAdminEnforced)),\n\t\tRequiresApprovingReviews: githubv4.NewBoolean(githubv4.Boolean(rules.RequiresApprovingReviews)),\n\t\tRequiredApprovingReviewCount: githubv4.NewInt(githubv4.Int(rules.RequiredApprovingReviewCount)),\n\t\tRequiresStatusChecks: githubv4.NewBoolean(githubv4.Boolean(rules.RequiresStatusChecks)),\n\t}\n\n\tchecks := make([]githubv4.String, len(rules.RequiredStatusCheckContexts))\n\tfor i, name := range rules.RequiredStatusCheckContexts {\n\t\tchecks[i] = *githubv4.NewString(githubv4.String(name))\n\t}\n\tinput.RequiredStatusCheckContexts = &checks\n\n\tvar m CreateRuleMutation\n\tclient := buildClient()\n\terr := client.Mutate(context.Background(), &m, input, nil)\n\treturn m.CreateBranchProtectionRule.BranchProtectionRule, err\n}", "func CreateBranchProtection(ctx *context.APIContext) {\n\t// swagger:operation POST /repos/{owner}/{repo}/branch_protections repository repoCreateBranchProtection\n\t// ---\n\t// summary: Create a branch protections for a repository\n\t// consumes:\n\t// - application/json\n\t// produces:\n\t// - application/json\n\t// parameters:\n\t// - name: owner\n\t// in: path\n\t// description: owner of the repo\n\t// type: string\n\t// required: true\n\t// - name: repo\n\t// in: path\n\t// description: name of the repo\n\t// type: string\n\t// required: true\n\t// - name: body\n\t// in: body\n\t// schema:\n\t// \"$ref\": \"#/definitions/CreateBranchProtectionOption\"\n\t// responses:\n\t// \"201\":\n\t// \"$ref\": \"#/responses/BranchProtection\"\n\t// \"403\":\n\t// \"$ref\": \"#/responses/forbidden\"\n\t// \"404\":\n\t// \"$ref\": \"#/responses/notFound\"\n\t// \"422\":\n\t// \"$ref\": \"#/responses/validationError\"\n\n\tform := web.GetForm(ctx).(*api.CreateBranchProtectionOption)\n\trepo := ctx.Repo.Repository\n\n\truleName := form.RuleName\n\tif ruleName == \"\" {\n\t\truleName = form.BranchName //nolint\n\t}\n\tif len(ruleName) == 0 {\n\t\tctx.Error(http.StatusBadRequest, \"both rule_name and branch_name are empty\", \"both rule_name and branch_name are empty\")\n\t\treturn\n\t}\n\n\tisPlainRule := !git_model.IsRuleNameSpecial(ruleName)\n\tvar isBranchExist bool\n\tif isPlainRule {\n\t\tisBranchExist = git.IsBranchExist(ctx.Req.Context(), ctx.Repo.Repository.RepoPath(), ruleName)\n\t}\n\n\tprotectBranch, err := git_model.GetProtectedBranchRuleByName(ctx, repo.ID, ruleName)\n\tif err != nil {\n\t\tctx.Error(http.StatusInternalServerError, \"GetProtectBranchOfRepoByName\", err)\n\t\treturn\n\t} else if protectBranch != nil {\n\t\tctx.Error(http.StatusForbidden, \"Create branch protection\", \"Branch protection already exist\")\n\t\treturn\n\t}\n\n\tvar requiredApprovals int64\n\tif form.RequiredApprovals > 0 {\n\t\trequiredApprovals = form.RequiredApprovals\n\t}\n\n\twhitelistUsers, err := user_model.GetUserIDsByNames(ctx, form.PushWhitelistUsernames, false)\n\tif err != nil {\n\t\tif user_model.IsErrUserNotExist(err) {\n\t\t\tctx.Error(http.StatusUnprocessableEntity, \"User does not exist\", err)\n\t\t\treturn\n\t\t}\n\t\tctx.Error(http.StatusInternalServerError, \"GetUserIDsByNames\", err)\n\t\treturn\n\t}\n\tmergeWhitelistUsers, err := user_model.GetUserIDsByNames(ctx, form.MergeWhitelistUsernames, false)\n\tif err != nil {\n\t\tif user_model.IsErrUserNotExist(err) {\n\t\t\tctx.Error(http.StatusUnprocessableEntity, \"User does not exist\", err)\n\t\t\treturn\n\t\t}\n\t\tctx.Error(http.StatusInternalServerError, \"GetUserIDsByNames\", err)\n\t\treturn\n\t}\n\tapprovalsWhitelistUsers, err := user_model.GetUserIDsByNames(ctx, form.ApprovalsWhitelistUsernames, false)\n\tif err != nil {\n\t\tif user_model.IsErrUserNotExist(err) {\n\t\t\tctx.Error(http.StatusUnprocessableEntity, \"User does not exist\", err)\n\t\t\treturn\n\t\t}\n\t\tctx.Error(http.StatusInternalServerError, \"GetUserIDsByNames\", err)\n\t\treturn\n\t}\n\tvar whitelistTeams, mergeWhitelistTeams, approvalsWhitelistTeams []int64\n\tif repo.Owner.IsOrganization() {\n\t\twhitelistTeams, err = organization.GetTeamIDsByNames(repo.OwnerID, form.PushWhitelistTeams, false)\n\t\tif err != nil {\n\t\t\tif organization.IsErrTeamNotExist(err) {\n\t\t\t\tctx.Error(http.StatusUnprocessableEntity, \"Team does not exist\", err)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tctx.Error(http.StatusInternalServerError, \"GetTeamIDsByNames\", err)\n\t\t\treturn\n\t\t}\n\t\tmergeWhitelistTeams, err = organization.GetTeamIDsByNames(repo.OwnerID, form.MergeWhitelistTeams, false)\n\t\tif err != nil {\n\t\t\tif organization.IsErrTeamNotExist(err) {\n\t\t\t\tctx.Error(http.StatusUnprocessableEntity, \"Team does not exist\", err)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tctx.Error(http.StatusInternalServerError, \"GetTeamIDsByNames\", err)\n\t\t\treturn\n\t\t}\n\t\tapprovalsWhitelistTeams, err = organization.GetTeamIDsByNames(repo.OwnerID, form.ApprovalsWhitelistTeams, false)\n\t\tif err != nil {\n\t\t\tif organization.IsErrTeamNotExist(err) {\n\t\t\t\tctx.Error(http.StatusUnprocessableEntity, \"Team does not exist\", err)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tctx.Error(http.StatusInternalServerError, \"GetTeamIDsByNames\", err)\n\t\t\treturn\n\t\t}\n\t}\n\n\tprotectBranch = &git_model.ProtectedBranch{\n\t\tRepoID: ctx.Repo.Repository.ID,\n\t\tRuleName: ruleName,\n\t\tCanPush: form.EnablePush,\n\t\tEnableWhitelist: form.EnablePush && form.EnablePushWhitelist,\n\t\tEnableMergeWhitelist: form.EnableMergeWhitelist,\n\t\tWhitelistDeployKeys: form.EnablePush && form.EnablePushWhitelist && form.PushWhitelistDeployKeys,\n\t\tEnableStatusCheck: form.EnableStatusCheck,\n\t\tStatusCheckContexts: form.StatusCheckContexts,\n\t\tEnableApprovalsWhitelist: form.EnableApprovalsWhitelist,\n\t\tRequiredApprovals: requiredApprovals,\n\t\tBlockOnRejectedReviews: form.BlockOnRejectedReviews,\n\t\tBlockOnOfficialReviewRequests: form.BlockOnOfficialReviewRequests,\n\t\tDismissStaleApprovals: form.DismissStaleApprovals,\n\t\tRequireSignedCommits: form.RequireSignedCommits,\n\t\tProtectedFilePatterns: form.ProtectedFilePatterns,\n\t\tUnprotectedFilePatterns: form.UnprotectedFilePatterns,\n\t\tBlockOnOutdatedBranch: form.BlockOnOutdatedBranch,\n\t}\n\n\terr = git_model.UpdateProtectBranch(ctx, ctx.Repo.Repository, protectBranch, git_model.WhitelistOptions{\n\t\tUserIDs: whitelistUsers,\n\t\tTeamIDs: whitelistTeams,\n\t\tMergeUserIDs: mergeWhitelistUsers,\n\t\tMergeTeamIDs: mergeWhitelistTeams,\n\t\tApprovalsUserIDs: approvalsWhitelistUsers,\n\t\tApprovalsTeamIDs: approvalsWhitelistTeams,\n\t})\n\tif err != nil {\n\t\tctx.Error(http.StatusInternalServerError, \"UpdateProtectBranch\", err)\n\t\treturn\n\t}\n\n\tif isBranchExist {\n\t\tif err = pull_service.CheckPRsForBaseBranch(ctx, ctx.Repo.Repository, ruleName); err != nil {\n\t\t\tctx.Error(http.StatusInternalServerError, \"CheckPRsForBaseBranch\", err)\n\t\t\treturn\n\t\t}\n\t} else {\n\t\tif !isPlainRule {\n\t\t\tif ctx.Repo.GitRepo == nil {\n\t\t\t\tctx.Repo.GitRepo, err = git.OpenRepository(ctx, ctx.Repo.Repository.RepoPath())\n\t\t\t\tif err != nil {\n\t\t\t\t\tctx.Error(http.StatusInternalServerError, \"OpenRepository\", err)\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\tdefer func() {\n\t\t\t\t\tctx.Repo.GitRepo.Close()\n\t\t\t\t\tctx.Repo.GitRepo = nil\n\t\t\t\t}()\n\t\t\t}\n\t\t\t// FIXME: since we only need to recheck files protected rules, we could improve this\n\t\t\tmatchedBranches, err := git_model.FindAllMatchedBranches(ctx, ctx.Repo.Repository.ID, ruleName)\n\t\t\tif err != nil {\n\t\t\t\tctx.Error(http.StatusInternalServerError, \"FindAllMatchedBranches\", err)\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tfor _, branchName := range matchedBranches {\n\t\t\t\tif err = pull_service.CheckPRsForBaseBranch(ctx, ctx.Repo.Repository, branchName); err != nil {\n\t\t\t\t\tctx.Error(http.StatusInternalServerError, \"CheckPRsForBaseBranch\", err)\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\n\t// Reload from db to get all whitelists\n\tbp, err := git_model.GetProtectedBranchRuleByName(ctx, ctx.Repo.Repository.ID, ruleName)\n\tif err != nil {\n\t\tctx.Error(http.StatusInternalServerError, \"GetProtectedBranchByID\", err)\n\t\treturn\n\t}\n\tif bp == nil || bp.RepoID != ctx.Repo.Repository.ID {\n\t\tctx.Error(http.StatusInternalServerError, \"New branch protection not found\", err)\n\t\treturn\n\t}\n\n\tctx.JSON(http.StatusCreated, convert.ToBranchProtection(bp))\n}", "func (c *client) RemoveBranchProtection(org, repo, branch string) error {\n\tdurationLogger := c.log(\"RemoveBranchProtection\", org, repo, branch)\n\tdefer durationLogger()\n\n\t_, err := c.request(&request{\n\t\tmethod: http.MethodDelete,\n\t\tpath: fmt.Sprintf(\"/repos/%s/%s/branches/%s/protection\", org, repo, branch),\n\t\torg: org,\n\t\texitCodes: []int{204},\n\t}, nil)\n\treturn err\n}", "func (r *SettingRepository) AddBranchByOrgID(orgID string) (branch models.Branch, err error) {\n\tobjID := bson.NewObjectId()\n\tbranch.ID = objID\n\tbranch.OrgID = bson.ObjectIdHex(orgID)\n\tbranch.Status = \"Active\"\n\tbranch.CreatedAt = time.Now()\n\tbranch.UpdatedAt = time.Now()\n\n\terr = r.C.Insert(&branch)\n\treturn\n}", "func (mr *MockRepositoryClientMockRecorder) RemoveBranchProtection(org, repo, branch interface{}) *gomock.Call {\n\tmr.mock.ctrl.T.Helper()\n\treturn mr.mock.ctrl.RecordCallWithMethodType(mr.mock, \"RemoveBranchProtection\", reflect.TypeOf((*MockRepositoryClient)(nil).RemoveBranchProtection), org, repo, branch)\n}", "func (mr *MockClientMockRecorder) RemoveBranchProtection(org, repo, branch interface{}) *gomock.Call {\n\tmr.mock.ctrl.T.Helper()\n\treturn mr.mock.ctrl.RecordCallWithMethodType(mr.mock, \"RemoveBranchProtection\", reflect.TypeOf((*MockClient)(nil).RemoveBranchProtection), org, repo, branch)\n}", "func UpdateCompanyBranchHyCompanybranchBadRequest(t goatest.TInterface, ctx context.Context, service *goa.Service, ctrl app.HyCompanybranchController, id int, payload *app.UpdateCompanyBranchHyCompanybranchPayload) (http.ResponseWriter, error) {\n\t// Setup service\n\tvar (\n\t\tlogBuf bytes.Buffer\n\t\tresp interface{}\n\n\t\trespSetter goatest.ResponseSetterFunc = func(r interface{}) { resp = r }\n\t)\n\tif service == nil {\n\t\tservice = goatest.Service(&logBuf, respSetter)\n\t} else {\n\t\tlogger := log.New(&logBuf, \"\", log.Ltime)\n\t\tservice.WithLogger(goa.NewLogger(logger))\n\t\tnewEncoder := func(io.Writer) goa.Encoder { return respSetter }\n\t\tservice.Encoder = goa.NewHTTPEncoder() // Make sure the code ends up using this decoder\n\t\tservice.Encoder.Register(newEncoder, \"*/*\")\n\t}\n\n\t// Validate payload\n\terr := payload.Validate()\n\tif err != nil {\n\t\te, ok := err.(goa.ServiceError)\n\t\tif !ok {\n\t\t\tpanic(err) // bug\n\t\t}\n\t\treturn nil, e\n\t}\n\n\t// Setup request context\n\trw := httptest.NewRecorder()\n\tu := &url.URL{\n\t\tPath: fmt.Sprintf(\"/api/company/branch/%v\", id),\n\t}\n\treq, _err := http.NewRequest(\"PUT\", u.String(), nil)\n\tif _err != nil {\n\t\tpanic(\"invalid test \" + _err.Error()) // bug\n\t}\n\tprms := url.Values{}\n\tprms[\"ID\"] = []string{fmt.Sprintf(\"%v\", id)}\n\tif ctx == nil {\n\t\tctx = context.Background()\n\t}\n\tgoaCtx := goa.NewContext(goa.WithAction(ctx, \"HyCompanybranchTest\"), rw, req, prms)\n\tupdateCompanyBranchCtx, __err := app.NewUpdateCompanyBranchHyCompanybranchContext(goaCtx, req, service)\n\tif __err != nil {\n\t\t_e, _ok := __err.(goa.ServiceError)\n\t\tif !_ok {\n\t\t\tpanic(\"invalid test data \" + __err.Error()) // bug\n\t\t}\n\t\treturn nil, _e\n\t}\n\tupdateCompanyBranchCtx.Payload = payload\n\n\t// Perform action\n\t__err = ctrl.UpdateCompanyBranch(updateCompanyBranchCtx)\n\n\t// Validate response\n\tif __err != nil {\n\t\tt.Fatalf(\"controller returned %+v, logs:\\n%s\", __err, logBuf.String())\n\t}\n\tif rw.Code != 400 {\n\t\tt.Errorf(\"invalid response status code: got %+v, expected 400\", rw.Code)\n\t}\n\tvar mt error\n\tif resp != nil {\n\t\tvar __ok bool\n\t\tmt, __ok = resp.(error)\n\t\tif !__ok {\n\t\t\tt.Fatalf(\"invalid response media: got variable of type %T, value %+v, expected instance of error\", resp, resp)\n\t\t}\n\t}\n\n\t// Return results\n\treturn rw, mt\n}", "func handleRepo(ctx context.Context, client *github.Client, repo *github.Repository) error {\n\topt := &github.ListOptions{\n\t\tPerPage: 100,\n\t}\n\n\tbranches, resp, err := client.Repositories.ListBranches(ctx, *repo.Owner.Login, *repo.Name, opt)\n\tif resp.StatusCode == http.StatusNotFound || resp.StatusCode == http.StatusForbidden {\n\t\treturn nil\n\t}\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tfor _, branch := range branches {\n\t\tif branch.GetName() == \"master\" && in(orgs, *repo.Owner.Login) {\n\t\t\t// we must get the individual branch for the branch protection to work\n\t\t\tb, _, err := client.Repositories.GetBranch(ctx, *repo.Owner.Login, *repo.Name, branch.GetName())\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\t// return early if it is already protected\n\t\t\tif b.GetProtected() {\n\t\t\t\tfmt.Printf(\"[OK] %s:%s is already protected\\n\", *repo.FullName, b.GetName())\n\t\t\t\treturn nil\n\t\t\t}\n\n\t\t\tfmt.Printf(\"[UPDATE] %s:%s will be changed to protected\\n\", *repo.FullName, b.GetName())\n\t\t\tif dryrun {\n\t\t\t\t// return early\n\t\t\t\treturn nil\n\t\t\t}\n\n\t\t\t// set the branch to be protected\n\t\t\tif _, _, err := client.Repositories.UpdateBranchProtection(ctx, *repo.Owner.Login, *repo.Name, b.GetName(), &github.ProtectionRequest{\n\t\t\t\tRequiredStatusChecks: &github.RequiredStatusChecks{\n\t\t\t\t\tStrict: false,\n\t\t\t\t\tContexts: []string{},\n\t\t\t\t},\n\t\t\t}); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t}\n\n\treturn nil\n}", "func UpdateCompanyBranchHyCompanybranchNotFound(t goatest.TInterface, ctx context.Context, service *goa.Service, ctrl app.HyCompanybranchController, id int, payload *app.UpdateCompanyBranchHyCompanybranchPayload) http.ResponseWriter {\n\t// Setup service\n\tvar (\n\t\tlogBuf bytes.Buffer\n\n\t\trespSetter goatest.ResponseSetterFunc = func(r interface{}) {}\n\t)\n\tif service == nil {\n\t\tservice = goatest.Service(&logBuf, respSetter)\n\t} else {\n\t\tlogger := log.New(&logBuf, \"\", log.Ltime)\n\t\tservice.WithLogger(goa.NewLogger(logger))\n\t\tnewEncoder := func(io.Writer) goa.Encoder { return respSetter }\n\t\tservice.Encoder = goa.NewHTTPEncoder() // Make sure the code ends up using this decoder\n\t\tservice.Encoder.Register(newEncoder, \"*/*\")\n\t}\n\n\t// Validate payload\n\terr := payload.Validate()\n\tif err != nil {\n\t\te, ok := err.(goa.ServiceError)\n\t\tif !ok {\n\t\t\tpanic(err) // bug\n\t\t}\n\t\tt.Errorf(\"unexpected payload validation error: %+v\", e)\n\t\treturn nil\n\t}\n\n\t// Setup request context\n\trw := httptest.NewRecorder()\n\tu := &url.URL{\n\t\tPath: fmt.Sprintf(\"/api/company/branch/%v\", id),\n\t}\n\treq, _err := http.NewRequest(\"PUT\", u.String(), nil)\n\tif _err != nil {\n\t\tpanic(\"invalid test \" + _err.Error()) // bug\n\t}\n\tprms := url.Values{}\n\tprms[\"ID\"] = []string{fmt.Sprintf(\"%v\", id)}\n\tif ctx == nil {\n\t\tctx = context.Background()\n\t}\n\tgoaCtx := goa.NewContext(goa.WithAction(ctx, \"HyCompanybranchTest\"), rw, req, prms)\n\tupdateCompanyBranchCtx, __err := app.NewUpdateCompanyBranchHyCompanybranchContext(goaCtx, req, service)\n\tif __err != nil {\n\t\t_e, _ok := __err.(goa.ServiceError)\n\t\tif !_ok {\n\t\t\tpanic(\"invalid test data \" + __err.Error()) // bug\n\t\t}\n\t\tt.Errorf(\"unexpected parameter validation error: %+v\", _e)\n\t\treturn nil\n\t}\n\tupdateCompanyBranchCtx.Payload = payload\n\n\t// Perform action\n\t__err = ctrl.UpdateCompanyBranch(updateCompanyBranchCtx)\n\n\t// Validate response\n\tif __err != nil {\n\t\tt.Fatalf(\"controller returned %+v, logs:\\n%s\", __err, logBuf.String())\n\t}\n\tif rw.Code != 404 {\n\t\tt.Errorf(\"invalid response status code: got %+v, expected 404\", rw.Code)\n\t}\n\n\t// Return results\n\treturn rw\n}", "func DeleteBranchProtection(ctx *context.APIContext) {\n\t// swagger:operation DELETE /repos/{owner}/{repo}/branch_protections/{name} repository repoDeleteBranchProtection\n\t// ---\n\t// summary: Delete a specific branch protection for the repository\n\t// produces:\n\t// - application/json\n\t// parameters:\n\t// - name: owner\n\t// in: path\n\t// description: owner of the repo\n\t// type: string\n\t// required: true\n\t// - name: repo\n\t// in: path\n\t// description: name of the repo\n\t// type: string\n\t// required: true\n\t// - name: name\n\t// in: path\n\t// description: name of protected branch\n\t// type: string\n\t// required: true\n\t// responses:\n\t// \"204\":\n\t// \"$ref\": \"#/responses/empty\"\n\t// \"404\":\n\t// \"$ref\": \"#/responses/notFound\"\n\n\trepo := ctx.Repo.Repository\n\tbpName := ctx.Params(\":name\")\n\tbp, err := git_model.GetProtectedBranchRuleByName(ctx, repo.ID, bpName)\n\tif err != nil {\n\t\tctx.Error(http.StatusInternalServerError, \"GetProtectedBranchByID\", err)\n\t\treturn\n\t}\n\tif bp == nil || bp.RepoID != repo.ID {\n\t\tctx.NotFound()\n\t\treturn\n\t}\n\n\tif err := git_model.DeleteProtectedBranch(ctx, ctx.Repo.Repository.ID, bp.ID); err != nil {\n\t\tctx.Error(http.StatusInternalServerError, \"DeleteProtectedBranch\", err)\n\t\treturn\n\t}\n\n\tctx.Status(http.StatusNoContent)\n}", "func (a *Client) UpdateBranch(params *UpdateBranchParams, authInfo runtime.ClientAuthInfoWriter, opts ...ClientOption) (*UpdateBranchOK, error) {\n\t// TODO: Validate the params before sending\n\tif params == nil {\n\t\tparams = NewUpdateBranchParams()\n\t}\n\top := &runtime.ClientOperation{\n\t\tID: \"updateBranch\",\n\t\tMethod: \"PUT\",\n\t\tPathPattern: \"/vcs/branch/{branchID}\",\n\t\tProducesMediaTypes: []string{\"application/json\"},\n\t\tConsumesMediaTypes: []string{\"application/json\"},\n\t\tSchemes: []string{\"http\", \"https\"},\n\t\tParams: params,\n\t\tReader: &UpdateBranchReader{formats: a.formats},\n\t\tAuthInfo: authInfo,\n\t\tContext: params.Context,\n\t\tClient: params.HTTPClient,\n\t}\n\tfor _, opt := range opts {\n\t\topt(op)\n\t}\n\n\tresult, err := a.transport.Submit(op)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tsuccess, ok := result.(*UpdateBranchOK)\n\tif ok {\n\t\treturn success, nil\n\t}\n\t// unexpected success response\n\t// safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue\n\tmsg := fmt.Sprintf(\"unexpected success response for updateBranch: API contract not enforced by server. Client expected to get an error, but got: %T\", result)\n\tpanic(msg)\n}", "func UpdateCompanyBranchHyCompanybranchOKID(t goatest.TInterface, ctx context.Context, service *goa.Service, ctrl app.HyCompanybranchController, id int, payload *app.UpdateCompanyBranchHyCompanybranchPayload) (http.ResponseWriter, *app.CompanyID) {\n\t// Setup service\n\tvar (\n\t\tlogBuf bytes.Buffer\n\t\tresp interface{}\n\n\t\trespSetter goatest.ResponseSetterFunc = func(r interface{}) { resp = r }\n\t)\n\tif service == nil {\n\t\tservice = goatest.Service(&logBuf, respSetter)\n\t} else {\n\t\tlogger := log.New(&logBuf, \"\", log.Ltime)\n\t\tservice.WithLogger(goa.NewLogger(logger))\n\t\tnewEncoder := func(io.Writer) goa.Encoder { return respSetter }\n\t\tservice.Encoder = goa.NewHTTPEncoder() // Make sure the code ends up using this decoder\n\t\tservice.Encoder.Register(newEncoder, \"*/*\")\n\t}\n\n\t// Validate payload\n\terr := payload.Validate()\n\tif err != nil {\n\t\te, ok := err.(goa.ServiceError)\n\t\tif !ok {\n\t\t\tpanic(err) // bug\n\t\t}\n\t\tt.Errorf(\"unexpected payload validation error: %+v\", e)\n\t\treturn nil, nil\n\t}\n\n\t// Setup request context\n\trw := httptest.NewRecorder()\n\tu := &url.URL{\n\t\tPath: fmt.Sprintf(\"/api/company/branch/%v\", id),\n\t}\n\treq, _err := http.NewRequest(\"PUT\", u.String(), nil)\n\tif _err != nil {\n\t\tpanic(\"invalid test \" + _err.Error()) // bug\n\t}\n\tprms := url.Values{}\n\tprms[\"ID\"] = []string{fmt.Sprintf(\"%v\", id)}\n\tif ctx == nil {\n\t\tctx = context.Background()\n\t}\n\tgoaCtx := goa.NewContext(goa.WithAction(ctx, \"HyCompanybranchTest\"), rw, req, prms)\n\tupdateCompanyBranchCtx, __err := app.NewUpdateCompanyBranchHyCompanybranchContext(goaCtx, req, service)\n\tif __err != nil {\n\t\t_e, _ok := __err.(goa.ServiceError)\n\t\tif !_ok {\n\t\t\tpanic(\"invalid test data \" + __err.Error()) // bug\n\t\t}\n\t\tt.Errorf(\"unexpected parameter validation error: %+v\", _e)\n\t\treturn nil, nil\n\t}\n\tupdateCompanyBranchCtx.Payload = payload\n\n\t// Perform action\n\t__err = ctrl.UpdateCompanyBranch(updateCompanyBranchCtx)\n\n\t// Validate response\n\tif __err != nil {\n\t\tt.Fatalf(\"controller returned %+v, logs:\\n%s\", __err, logBuf.String())\n\t}\n\tif rw.Code != 200 {\n\t\tt.Errorf(\"invalid response status code: got %+v, expected 200\", rw.Code)\n\t}\n\tvar mt *app.CompanyID\n\tif resp != nil {\n\t\tvar __ok bool\n\t\tmt, __ok = resp.(*app.CompanyID)\n\t\tif !__ok {\n\t\t\tt.Fatalf(\"invalid response media: got variable of type %T, value %+v, expected instance of app.CompanyID\", resp, resp)\n\t\t}\n\t\t__err = mt.Validate()\n\t\tif __err != nil {\n\t\t\tt.Errorf(\"invalid response media type: %s\", __err)\n\t\t}\n\t}\n\n\t// Return results\n\treturn rw, mt\n}", "func UpdateCompanyBranchHyCompanybranchOK(t goatest.TInterface, ctx context.Context, service *goa.Service, ctrl app.HyCompanybranchController, id int, payload *app.UpdateCompanyBranchHyCompanybranchPayload) (http.ResponseWriter, *app.Company) {\n\t// Setup service\n\tvar (\n\t\tlogBuf bytes.Buffer\n\t\tresp interface{}\n\n\t\trespSetter goatest.ResponseSetterFunc = func(r interface{}) { resp = r }\n\t)\n\tif service == nil {\n\t\tservice = goatest.Service(&logBuf, respSetter)\n\t} else {\n\t\tlogger := log.New(&logBuf, \"\", log.Ltime)\n\t\tservice.WithLogger(goa.NewLogger(logger))\n\t\tnewEncoder := func(io.Writer) goa.Encoder { return respSetter }\n\t\tservice.Encoder = goa.NewHTTPEncoder() // Make sure the code ends up using this decoder\n\t\tservice.Encoder.Register(newEncoder, \"*/*\")\n\t}\n\n\t// Validate payload\n\terr := payload.Validate()\n\tif err != nil {\n\t\te, ok := err.(goa.ServiceError)\n\t\tif !ok {\n\t\t\tpanic(err) // bug\n\t\t}\n\t\tt.Errorf(\"unexpected payload validation error: %+v\", e)\n\t\treturn nil, nil\n\t}\n\n\t// Setup request context\n\trw := httptest.NewRecorder()\n\tu := &url.URL{\n\t\tPath: fmt.Sprintf(\"/api/company/branch/%v\", id),\n\t}\n\treq, _err := http.NewRequest(\"PUT\", u.String(), nil)\n\tif _err != nil {\n\t\tpanic(\"invalid test \" + _err.Error()) // bug\n\t}\n\tprms := url.Values{}\n\tprms[\"ID\"] = []string{fmt.Sprintf(\"%v\", id)}\n\tif ctx == nil {\n\t\tctx = context.Background()\n\t}\n\tgoaCtx := goa.NewContext(goa.WithAction(ctx, \"HyCompanybranchTest\"), rw, req, prms)\n\tupdateCompanyBranchCtx, __err := app.NewUpdateCompanyBranchHyCompanybranchContext(goaCtx, req, service)\n\tif __err != nil {\n\t\t_e, _ok := __err.(goa.ServiceError)\n\t\tif !_ok {\n\t\t\tpanic(\"invalid test data \" + __err.Error()) // bug\n\t\t}\n\t\tt.Errorf(\"unexpected parameter validation error: %+v\", _e)\n\t\treturn nil, nil\n\t}\n\tupdateCompanyBranchCtx.Payload = payload\n\n\t// Perform action\n\t__err = ctrl.UpdateCompanyBranch(updateCompanyBranchCtx)\n\n\t// Validate response\n\tif __err != nil {\n\t\tt.Fatalf(\"controller returned %+v, logs:\\n%s\", __err, logBuf.String())\n\t}\n\tif rw.Code != 200 {\n\t\tt.Errorf(\"invalid response status code: got %+v, expected 200\", rw.Code)\n\t}\n\tvar mt *app.Company\n\tif resp != nil {\n\t\tvar __ok bool\n\t\tmt, __ok = resp.(*app.Company)\n\t\tif !__ok {\n\t\t\tt.Fatalf(\"invalid response media: got variable of type %T, value %+v, expected instance of app.Company\", resp, resp)\n\t\t}\n\t\t__err = mt.Validate()\n\t\tif __err != nil {\n\t\t\tt.Errorf(\"invalid response media type: %s\", __err)\n\t\t}\n\t}\n\n\t// Return results\n\treturn rw, mt\n}", "func GetProtectBranchesByRepoID(repoID int64) ([]*ProtectBranch, error) {\n\tprotectBranches := make([]*ProtectBranch, 0, 2)\n\treturn protectBranches, x.Where(\"repo_id = ? and protected = ?\", repoID, true).Asc(\"name\").Find(&protectBranches)\n}", "func (s *SmartContract) UpdateOrgLogin(ctx contractapi.TransactionContextInterface, userName string, password string) error {\n\tuserLogin := UserLogin{\n\t\tPassword: password,\n\t}\n\n\tinfoAsBytes, _ := json.Marshal(userLogin)\n\n\treturn ctx.GetStub().PutState(\"login-org-\" + userName, infoAsBytes)\n}", "func (mr *MockSendUpdateQueryMockRecorder) UpdateBranch(arg0, arg1, arg2 interface{}) *gomock.Call {\n\treturn mr.mock.ctrl.RecordCallWithMethodType(mr.mock, \"UpdateBranch\", reflect.TypeOf((*MockSendUpdateQuery)(nil).UpdateBranch), arg0, arg1, arg2)\n}", "func (service *Service) UpdateProtectionMode(d9SecurityGroupID, protectionMode string) (*CloudSecurityGroupResponse, *http.Response, error) {\n\tif protectionMode != \"FullManage\" && protectionMode != \"ReadOnly\" {\n\t\treturn nil, nil, fmt.Errorf(\"protection mode can be FullManage or ReadOnly\")\n\t}\n\n\tv := new(CloudSecurityGroupResponse)\n\trelativeURL := fmt.Sprintf(\"%s/%s/%s\", awsSgResourcePath, d9SecurityGroupID, awsSgResourceProtectionMode)\n\tbody := UpdateProtectionModeQueryParameters{\n\t\tProtectionMode: protectionMode,\n\t}\n\n\tresp, err := service.Client.NewRequestDo(\"POST\", relativeURL, nil, body, v)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\treturn v, resp, nil\n}", "func (mr *MockPullRequestClientMockRecorder) UpdatePullRequestBranch(org, repo, number, expectedHeadSha interface{}) *gomock.Call {\n\tmr.mock.ctrl.T.Helper()\n\treturn mr.mock.ctrl.RecordCallWithMethodType(mr.mock, \"UpdatePullRequestBranch\", reflect.TypeOf((*MockPullRequestClient)(nil).UpdatePullRequestBranch), org, repo, number, expectedHeadSha)\n}", "func NewBranchProtection(ctx *pulumi.Context,\n\tname string, args *BranchProtectionArgs, opts ...pulumi.ResourceOption) (*BranchProtection, error) {\n\tif args == nil {\n\t\treturn nil, errors.New(\"missing one or more required arguments\")\n\t}\n\n\tif args.Branch == nil {\n\t\treturn nil, errors.New(\"invalid value for required argument 'Branch'\")\n\t}\n\tif args.Project == nil {\n\t\treturn nil, errors.New(\"invalid value for required argument 'Project'\")\n\t}\n\topts = internal.PkgResourceDefaultOpts(opts)\n\tvar resource BranchProtection\n\terr := ctx.RegisterResource(\"gitlab:index/branchProtection:BranchProtection\", name, args, &resource, opts...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &resource, nil\n}", "func (m *MarkerIndexBranchIDMapping) Update(other objectstorage.StorableObject) {\n\tpanic(\"updates disabled\")\n}", "func (mr *MockRepositoryInterfaceMockRecorder) GitHubUpdateClaGroupID(ctx, repositoryID, claGroupID interface{}) *gomock.Call {\n\tmr.mock.ctrl.T.Helper()\n\treturn mr.mock.ctrl.RecordCallWithMethodType(mr.mock, \"GitHubUpdateClaGroupID\", reflect.TypeOf((*MockRepositoryInterface)(nil).GitHubUpdateClaGroupID), ctx, repositoryID, claGroupID)\n}", "func UpdateAccount(w http.ResponseWriter, r *http.Request) {\n\tlogin := mux.Vars(r)[\"login\"]\n\toauth, ok := OAuthToken(r)\n\tif !ok {\n\t\tpanic(\"Request was authorized but no OAuth token is available!\") // this should never happen\n\t}\n\n\taccount, ok := data.GetAccountByLogin(login)\n\tif !ok {\n\t\tPrintErrorJSON(w, r, \"The requested account does not exist\", http.StatusNotFound)\n\t\treturn\n\t}\n\n\tif oauth.Token.AccountUUID.String != account.UUID || !oauth.Match.Contains(\"account-write\") && !oauth.Match.Contains(\"account-admin\") {\n\t\tPrintErrorJSON(w, r, \"Access to requested account forbidden\", http.StatusUnauthorized)\n\t\treturn\n\t}\n\n\tmarshal := &data.AccountMarshaler{WithMail: true, WithAffiliation: true, Account: account}\n\n\tdec := json.NewDecoder(r.Body)\n\terr := dec.Decode(marshal)\n\tif err != nil {\n\t\tPrintErrorJSON(w, r, \"Error while processing account\", http.StatusBadRequest)\n\t\treturn\n\t}\n\n\terr = account.Update()\n\tif err != nil {\n\t\tPrintErrorJSON(w, r, \"Error while processing account\", http.StatusBadRequest)\n\t\treturn\n\t}\n\n\tw.Header().Add(\"Cache-Control\", \"no-cache\")\n\tw.Header().Add(\"Content-Type\", \"application/json\")\n\tenc := json.NewEncoder(w)\n\terr = enc.Encode(marshal)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n}", "func (_PermInterface *PermInterfaceTransactor) ApproveOrg(opts *bind.TransactOpts, _orgId string, _enodeId string, _ip string, _port uint16, _raftport uint16, _account common.Address) (*types.Transaction, error) {\n\treturn _PermInterface.contract.Transact(opts, \"approveOrg\", _orgId, _enodeId, _ip, _port, _raftport, _account)\n}", "func (repo PostgresPartnershipRequestBlacklistRepository) UpdatePartnershipRequestBlacklist(blacklist businesslogic.PartnershipRequestBlacklistEntry) error {\n\tif repo.Database == nil {\n\t\treturn errors.New(dalutil.DataSourceNotSpecifiedError(repo))\n\t}\n\tstmt := repo.SqlBuilder.Update(\"\").Table(DasPartnershipRequestBlacklistTable)\n\tif blacklist.ID > 0 {\n\t\tstmt = stmt.Set(DAS_PARTNERSHIP_REQUEST_BLACKLIST_COL_WHITELISTED_IND, blacklist.Whitelisted)\n\n\t\tvar err error\n\t\tif tx, txErr := repo.Database.Begin(); txErr != nil {\n\t\t\treturn txErr\n\t\t} else {\n\t\t\t_, err = stmt.RunWith(repo.Database).Exec()\n\t\t\ttx.Commit()\n\t\t}\n\t\treturn err\n\t} else {\n\t\treturn errors.New(\"blacklist is not specified\")\n\t}\n}", "func (s *Server) updateOrgDeliveryFees(w http.ResponseWriter, r *http.Request) (interface{}, error) {\n\torg, allowed, err := s.orgModAllowed(w, r, false)\n\tif !allowed {\n\t\treturn nil, err\n\t}\n\n\treturn s.updateDeliveryFees(w, r, org.ID)\n}", "func (s *GitlabSCM) UpdateOrgMembership(ctx context.Context, opt *OrgMembershipOptions) error {\n\t// TODO no implementation provided yet\n\treturn ErrNotSupported{\n\t\tSCM: \"gitlab\",\n\t\tMethod: \"UpdateOrgMembership\",\n\t}\n}", "func (mr *MockClientMockRecorder) UpdatePullRequestBranch(org, repo, number, expectedHeadSha interface{}) *gomock.Call {\n\tmr.mock.ctrl.T.Helper()\n\treturn mr.mock.ctrl.RecordCallWithMethodType(mr.mock, \"UpdatePullRequestBranch\", reflect.TypeOf((*MockClient)(nil).UpdatePullRequestBranch), org, repo, number, expectedHeadSha)\n}", "func (b *BranchDAG) SetBranchConfirmed(branchID BranchID) (err error) {\n\tif b.InclusionState(branchID) == Confirmed {\n\t\treturn\n\t}\n\n\tif _, branchErr := b.SetBranchMonotonicallyLiked(branchID, true); branchErr != nil {\n\t\terr = errors.Errorf(\"failed to set Branch with %s to be monotonically liked: %w\", branchID, branchErr)\n\t\treturn\n\t}\n\n\tif _, branchErr := b.SetBranchFinalized(branchID, true); branchErr != nil {\n\t\terr = errors.Errorf(\"failed to set Branch with %s to be finalized: %w\", branchID, branchErr)\n\t\treturn\n\t}\n\n\treturn\n}", "func NewUpdateOrganizationBillingAddressMethodNotAllowed() *UpdateOrganizationBillingAddressMethodNotAllowed {\n\treturn &UpdateOrganizationBillingAddressMethodNotAllowed{}\n}", "func (c *client) EditOrgHook(org string, id int, req HookRequest) error {\n\tc.log(\"EditOrgHook\", org, id)\n\treturn c.editHook(org, nil, id, req)\n}", "func (s service) UpdateFundingAgreement(ctx context.Context, docID, fundingID []byte, data *Data) (documents.Model, jobs.JobID, error) {\n\tmodel, err := s.docSrv.GetCurrentVersion(ctx, docID)\n\tif err != nil {\n\t\tlog.Error(err)\n\t\treturn nil, jobs.NilJobID(), documents.ErrDocumentNotFound\n\t}\n\n\tdata.AgreementID = hexutil.Encode(fundingID)\n\tidx, err := extensions.FindAttributeSetIDX(model, data.AgreementID, AttrFundingLabel, agreementIDLabel, fundingFieldKey)\n\tif err != nil {\n\t\treturn nil, jobs.NilJobID(), err\n\t}\n\n\tvar collabs []identity.DID\n\tfor _, id := range []string{data.BorrowerID, data.FunderID} {\n\t\tdid, err := identity.NewDIDFromString(id)\n\t\tif err != nil {\n\t\t\treturn nil, jobs.NilJobID(), err\n\t\t}\n\n\t\tcollabs = append(collabs, did)\n\t}\n\n\t// overwriting is not enough because it is not required that\n\t// the funding payload contains all funding attributes\n\tmodel, err = extensions.DeleteAttributesSet(model, Data{}, idx, fundingFieldKey)\n\tif err != nil {\n\t\treturn nil, jobs.NilJobID(), err\n\t}\n\n\tattributes, err := extensions.FillAttributeList(*data, idx, fundingFieldKey)\n\tif err != nil {\n\t\treturn nil, jobs.NilJobID(), err\n\t}\n\n\terr = model.AddAttributes(\n\t\tdocuments.CollaboratorsAccess{\n\t\t\tReadWriteCollaborators: collabs,\n\t\t},\n\t\ttrue,\n\t\tattributes...,\n\t)\n\tif err != nil {\n\t\treturn nil, jobs.NilJobID(), err\n\t}\n\n\tmodel, jobID, _, err := s.docSrv.Update(ctx, model)\n\tif err != nil {\n\t\treturn nil, jobs.NilJobID(), err\n\t}\n\n\treturn model, jobID, nil\n}", "func (vcd *TestVCD) Test_UpdateOrg(check *C) {\n\tif vcd.skipAdminTests {\n\t\tcheck.Skip(fmt.Sprintf(TestRequiresSysAdminPrivileges, check.TestName()))\n\t}\n\torg, _ := GetAdminOrgByName(vcd.client, TestUpdateOrg)\n\tif org != (AdminOrg{}) {\n\t\terr := org.Delete(true, true)\n\t\tcheck.Assert(err, IsNil)\n\t}\n\ttask, err := CreateOrg(vcd.client, TestUpdateOrg, TestUpdateOrg, TestUpdateOrg, &types.OrgSettings{\n\t\tOrgLdapSettings: &types.OrgLdapSettingsType{OrgLdapMode: \"NONE\"},\n\t}, true)\n\tcheck.Assert(err, IsNil)\n\terr = task.WaitTaskCompletion()\n\tcheck.Assert(err, IsNil)\n\tAddToCleanupList(TestUpdateOrg, \"org\", \"\", \"TestUpdateOrg\")\n\t// fetch newly created org\n\torg, err = GetAdminOrgByName(vcd.client, TestUpdateOrg)\n\tcheck.Assert(err, IsNil)\n\tcheck.Assert(org.AdminOrg.Name, Equals, TestUpdateOrg)\n\tcheck.Assert(org.AdminOrg.Description, Equals, TestUpdateOrg)\n\torg.AdminOrg.OrgSettings.OrgGeneralSettings.DeployedVMQuota = 100\n\ttask, err = org.Update()\n\tcheck.Assert(err, IsNil)\n\t// Wait until update is complete\n\terr = task.WaitTaskCompletion()\n\tcheck.Assert(err, IsNil)\n\t// Refresh\n\terr = org.Refresh()\n\tcheck.Assert(err, IsNil)\n\tcheck.Assert(org.AdminOrg.OrgSettings.OrgGeneralSettings.DeployedVMQuota, Equals, 100)\n\t// Delete, with force and recursive true\n\terr = org.Delete(true, true)\n\tcheck.Assert(err, IsNil)\n\tdoesOrgExist(check, vcd)\n}", "func UpdateLimitAccount(currentLimit float64) {\n\n\terr := utils.CheckTempFile(PathFileC)\n\tif err != nil {\n\t\treturn\n\t}\n\n\tjsonFile, err := os.Open(PathFileC)\n\tif err != nil {\n\t\tfmt.Println(err)\n\t}\n\tdefer jsonFile.Close()\n\n\taccountJSON, err := ioutil.ReadAll(jsonFile)\n\tif err != nil {\n\t\tfmt.Println(err)\n\t}\n\n\tvar account models.Account\n\n\terr = json.Unmarshal(accountJSON, &account)\n\tif err != nil {\n\t\tfmt.Println(err)\n\t}\n\n\taccount.Accounts.AvailableLimit = currentLimit\n\n\taccountJSON, err = json.Marshal(account)\n\tif err != nil {\n\t\tfmt.Println(err)\n\t}\n\n\terr = ioutil.WriteFile(PathFileC, accountJSON, 0644)\n\tif err != nil {\n\t\tfmt.Println(err)\n\t}\n}", "func (r *SettingRepository) EditBranchByID(branch *models.Branch) error {\n\terr := r.C.Update(bson.M{\"_id\": branch.ID},\n\t\tbson.M{\"$set\": bson.M{\n\t\t\t\"name\": branch.Name,\n\t\t\t\"updatedat\": time.Now(),\n\t\t\t\"status\": branch.Status,\n\t\t}})\n\treturn err\n}", "func (c *client) UpdatePullRequestBranch(org, repo string, number int, expectedHeadSha *string) error {\n\tdurationLogger := c.log(\"UpdatePullRequestBranch\", org, repo)\n\tdefer durationLogger()\n\n\tdata := struct {\n\t\t// The expected SHA of the pull request's HEAD ref. This is the most recent commit on the pull request's branch.\n\t\t// If the expected SHA does not match the pull request's HEAD, you will receive a 422 Unprocessable Entity status.\n\t\t// You can use the \"List commits\" endpoint to find the most recent commit SHA. Default: SHA of the pull request's current HEAD ref.\n\t\tExpectedHeadSha *string `json:\"expected_head_sha,omitempty\"`\n\t}{\n\t\tExpectedHeadSha: expectedHeadSha,\n\t}\n\n\tcode, err := c.request(&request{\n\t\tmethod: http.MethodPut,\n\t\tpath: fmt.Sprintf(\"/repos/%s/%s/pulls/%d/update-branch\", org, repo, number),\n\t\taccept: \"application/vnd.github.lydian-preview+json\",\n\t\torg: org,\n\t\trequestBody: &data,\n\t\texitCodes: []int{202, 422},\n\t}, nil)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif code == http.StatusUnprocessableEntity {\n\t\tmsg := \"mismatch expected head sha\"\n\t\tif expectedHeadSha != nil {\n\t\t\tmsg = fmt.Sprintf(\"%s: %s\", msg, *expectedHeadSha)\n\t\t}\n\t\treturn errors.New(msg)\n\t}\n\n\treturn nil\n}", "func UpdateCompanyBranchHyCompanybranchOKIdname(t goatest.TInterface, ctx context.Context, service *goa.Service, ctrl app.HyCompanybranchController, id int, payload *app.UpdateCompanyBranchHyCompanybranchPayload) (http.ResponseWriter, *app.CompanyIdname) {\n\t// Setup service\n\tvar (\n\t\tlogBuf bytes.Buffer\n\t\tresp interface{}\n\n\t\trespSetter goatest.ResponseSetterFunc = func(r interface{}) { resp = r }\n\t)\n\tif service == nil {\n\t\tservice = goatest.Service(&logBuf, respSetter)\n\t} else {\n\t\tlogger := log.New(&logBuf, \"\", log.Ltime)\n\t\tservice.WithLogger(goa.NewLogger(logger))\n\t\tnewEncoder := func(io.Writer) goa.Encoder { return respSetter }\n\t\tservice.Encoder = goa.NewHTTPEncoder() // Make sure the code ends up using this decoder\n\t\tservice.Encoder.Register(newEncoder, \"*/*\")\n\t}\n\n\t// Validate payload\n\terr := payload.Validate()\n\tif err != nil {\n\t\te, ok := err.(goa.ServiceError)\n\t\tif !ok {\n\t\t\tpanic(err) // bug\n\t\t}\n\t\tt.Errorf(\"unexpected payload validation error: %+v\", e)\n\t\treturn nil, nil\n\t}\n\n\t// Setup request context\n\trw := httptest.NewRecorder()\n\tu := &url.URL{\n\t\tPath: fmt.Sprintf(\"/api/company/branch/%v\", id),\n\t}\n\treq, _err := http.NewRequest(\"PUT\", u.String(), nil)\n\tif _err != nil {\n\t\tpanic(\"invalid test \" + _err.Error()) // bug\n\t}\n\tprms := url.Values{}\n\tprms[\"ID\"] = []string{fmt.Sprintf(\"%v\", id)}\n\tif ctx == nil {\n\t\tctx = context.Background()\n\t}\n\tgoaCtx := goa.NewContext(goa.WithAction(ctx, \"HyCompanybranchTest\"), rw, req, prms)\n\tupdateCompanyBranchCtx, __err := app.NewUpdateCompanyBranchHyCompanybranchContext(goaCtx, req, service)\n\tif __err != nil {\n\t\t_e, _ok := __err.(goa.ServiceError)\n\t\tif !_ok {\n\t\t\tpanic(\"invalid test data \" + __err.Error()) // bug\n\t\t}\n\t\tt.Errorf(\"unexpected parameter validation error: %+v\", _e)\n\t\treturn nil, nil\n\t}\n\tupdateCompanyBranchCtx.Payload = payload\n\n\t// Perform action\n\t__err = ctrl.UpdateCompanyBranch(updateCompanyBranchCtx)\n\n\t// Validate response\n\tif __err != nil {\n\t\tt.Fatalf(\"controller returned %+v, logs:\\n%s\", __err, logBuf.String())\n\t}\n\tif rw.Code != 200 {\n\t\tt.Errorf(\"invalid response status code: got %+v, expected 200\", rw.Code)\n\t}\n\tvar mt *app.CompanyIdname\n\tif resp != nil {\n\t\tvar __ok bool\n\t\tmt, __ok = resp.(*app.CompanyIdname)\n\t\tif !__ok {\n\t\t\tt.Fatalf(\"invalid response media: got variable of type %T, value %+v, expected instance of app.CompanyIdname\", resp, resp)\n\t\t}\n\t\t__err = mt.Validate()\n\t\tif __err != nil {\n\t\t\tt.Errorf(\"invalid response media type: %s\", __err)\n\t\t}\n\t}\n\n\t// Return results\n\treturn rw, mt\n}", "func (_PermInterface *PermInterfaceSession) ApproveOrg(_orgId string, _enodeId string, _ip string, _port uint16, _raftport uint16, _account common.Address) (*types.Transaction, error) {\n\treturn _PermInterface.Contract.ApproveOrg(&_PermInterface.TransactOpts, _orgId, _enodeId, _ip, _port, _raftport, _account)\n}", "func (p *perm) CommitChange() {\n\tp.enforcer.SavePolicy()\n}", "func (m *MarkerBranchIDMappingManager) SetBranchID(marker *markers.Marker, branchID ledgerstate.BranchID) {\n\tm.tangle.Storage.MarkerIndexBranchIDMapping(marker.SequenceID(), NewMarkerIndexBranchIDMapping).Consume(func(markerIndexBranchIDMapping *MarkerIndexBranchIDMapping) {\n\t\tmarkerIndexBranchIDMapping.SetBranchID(marker.Index(), branchID)\n\t})\n}", "func (db *DB) UpdateCipher(newData bw.Cipher, owner string, ciphID string) error {\n\tiowner, err := strconv.ParseInt(owner, 10, 64)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\ticiphID, err := strconv.ParseInt(ciphID, 10, 64)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tfavorite := 0\n\tif newData.Favorite {\n\t\tfavorite = 1\n\t}\n\n\tstmt, err := db.db.Prepare(\"UPDATE ciphers SET type=$1, revisiondate=$2, data=$3, folderid=$4, favorite=$5 WHERE id=$6 AND owner=$7\")\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tbdata, err := newData.Data.Bytes()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t_, err = stmt.Exec(newData.Type, time.Now().Unix(), bdata, newData.FolderId, favorite, iciphID, iowner)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}", "func (_PermInterface *PermInterfaceTransactorSession) ApproveOrg(_orgId string, _enodeId string, _ip string, _port uint16, _raftport uint16, _account common.Address) (*types.Transaction, error) {\n\treturn _PermInterface.Contract.ApproveOrg(&_PermInterface.TransactOpts, _orgId, _enodeId, _ip, _port, _raftport, _account)\n}", "func (_PermInterface *PermInterfaceTransactor) UpdateOrgStatus(opts *bind.TransactOpts, _orgId string, _action *big.Int) (*types.Transaction, error) {\n\treturn _PermInterface.contract.Transact(opts, \"updateOrgStatus\", _orgId, _action)\n}", "func (m *MockSendUpdateQuery) UpdateBranch(arg0 context.Context, arg1 interfaces.NewBranch, arg2 bool) error {\n\tret := m.ctrl.Call(m, \"UpdateBranch\", arg0, arg1, arg2)\n\tret0, _ := ret[0].(error)\n\treturn ret0\n}", "func (r *DeviceCompliancePolicyGroupAssignmentRequest) Update(ctx context.Context, reqObj *DeviceCompliancePolicyGroupAssignment) error {\n\treturn r.JSONRequest(ctx, \"PATCH\", \"\", reqObj, nil)\n}", "func (_PermInterface *PermInterfaceSession) ApproveBlacklistedAccountRecovery(_orgId string, _account common.Address) (*types.Transaction, error) {\n\treturn _PermInterface.Contract.ApproveBlacklistedAccountRecovery(&_PermInterface.TransactOpts, _orgId, _account)\n}", "func (mr *MockRepositoryClientMockRecorder) GetBranchProtection(org, repo, branch interface{}) *gomock.Call {\n\tmr.mock.ctrl.T.Helper()\n\treturn mr.mock.ctrl.RecordCallWithMethodType(mr.mock, \"GetBranchProtection\", reflect.TypeOf((*MockRepositoryClient)(nil).GetBranchProtection), org, repo, branch)\n}", "func (_PermInterface *PermInterfaceTransactor) ApproveBlacklistedAccountRecovery(opts *bind.TransactOpts, _orgId string, _account common.Address) (*types.Transaction, error) {\n\treturn _PermInterface.contract.Transact(opts, \"approveBlacklistedAccountRecovery\", _orgId, _account)\n}", "func (c *client) UpdateOrgMembership(org, user string, admin bool) (*OrgMembership, error) {\n\tc.log(\"UpdateOrgMembership\", org, user, admin)\n\tom := OrgMembership{}\n\tif admin {\n\t\tom.Role = RoleAdmin\n\t} else {\n\t\tom.Role = RoleMember\n\t}\n\tif c.dry {\n\t\treturn &om, nil\n\t}\n\n\t_, err := c.request(&request{\n\t\tmethod: http.MethodPut,\n\t\tpath: fmt.Sprintf(\"/orgs/%s/memberships/%s\", org, user),\n\t\torg: org,\n\t\trequestBody: &om,\n\t\texitCodes: []int{200},\n\t}, &om)\n\treturn &om, err\n}", "func (c *Client) UpdateBarcodeRule(br *BarcodeRule) error {\n\treturn c.UpdateBarcodeRules([]int64{br.Id.Get()}, br)\n}", "func (operator *AccessOperator) UpdateCommit(cxt context.Context, option *UpdateCommitOption) error {\n\t//business first\n\tbusiness, err := operator.GetBusiness(cxt, operator.Business)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif business == nil {\n\t\tlogger.V(3).Infof(\"UpdateCommit: found no relative Business %s\", operator.Business)\n\t\treturn fmt.Errorf(\"No relative Business %s\", operator.Business)\n\t}\n\n\trequest := &accessserver.UpdateCommitReq{\n\t\tSeq: pkgcommon.Sequence(),\n\t\tBid: business.Bid,\n\t\tCommitid: option.CommitID,\n\t\tTemplateid: option.TemplateID,\n\t\tTemplate: option.Template,\n\t\tConfigs: option.Configs,\n\t\tChanges: option.Changes,\n\t\tOperator: operator.User,\n\t}\n\tgrpcOptions := []grpc.CallOption{\n\t\tgrpc.WaitForReady(true),\n\t}\n\tresponse, err := operator.Client.UpdateCommit(cxt, request, grpcOptions...)\n\tif err != nil {\n\t\tlogger.V(3).Infof(\"UpdateCommit %s failed, %s\", option.CommitID, err.Error())\n\t\treturn err\n\t}\n\tif response.ErrCode != common.ErrCode_E_OK {\n\t\tlogger.V(3).Infof(\"UpdateCommit %s successfully, but response Err, %s\", option.CommitID, response.ErrMsg)\n\t\treturn fmt.Errorf(\"%s\", response.ErrMsg)\n\t}\n\treturn nil\n\n}", "func (o *AuthUserUserPermission) UpdateGP(whitelist ...string) {\n\tif err := o.Update(boil.GetDB(), whitelist...); err != nil {\n\t\tpanic(boil.WrapErr(err))\n\t}\n}", "func (r *WorkbookWorksheetProtectionRequest) Update(ctx context.Context, reqObj *WorkbookWorksheetProtection) error {\n\treturn r.JSONRequest(ctx, \"PATCH\", \"\", reqObj, nil)\n}", "func (db *MySQLDB) UpdateTenant(ctx context.Context, tenant *Tenant) error {\n\tfLog := mysqlLog.WithField(\"func\", \"UpdateTenant\").WithField(\"RequestID\", ctx.Value(constants.RequestID))\n\n\texist, err := db.IsTenantRecIDExist(ctx, tenant.RecID)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif !exist {\n\t\treturn ErrNotFound\n\t}\n\n\torigin, err := db.GetTenantByRecID(ctx, tenant.RecID)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdomainChanged := origin.Domain != tenant.Domain\n\n\tq := \"UPDATE HANSIP_TENANT SET TENANT_NAME=?, TENANT_DOMAIN=?, DESCRIPTION=? WHERE REC_ID=?\"\n\t_, err = db.instance.ExecContext(ctx, q,\n\t\ttenant.Name, tenant.Domain, tenant.Description, tenant.RecID)\n\tif err != nil {\n\t\tfLog.Errorf(\"db.instance.ExecContext got %s. SQL = %s\", err.Error(), q)\n\t\treturn &ErrDBExecuteError{\n\t\t\tWrapped: err,\n\t\t\tMessage: \"Error UpdateTenant\",\n\t\t\tSQL: q,\n\t\t}\n\t}\n\n\tif domainChanged {\n\t\tq = \"UPDATE HANSIP_ROLE SET ROLE_DOMAIN=? WHERE ROLE_DOMAIN=?\"\n\t\t_, err = db.instance.ExecContext(ctx, q,\n\t\t\ttenant.Domain, origin.Domain)\n\t\tif err != nil {\n\t\t\tfLog.Errorf(\"db.instance.ExecContext got %s. SQL = %s\", err.Error(), q)\n\t\t\treturn &ErrDBExecuteError{\n\t\t\t\tWrapped: err,\n\t\t\t\tMessage: \"Error UpdateTenant\",\n\t\t\t\tSQL: q,\n\t\t\t}\n\t\t}\n\n\t\tq = \"UPDATE HANSIP_GROUP SET GROUP_DOMAIN=? WHERE GROUP_DOMAIN=?\"\n\t\t_, err = db.instance.ExecContext(ctx, q,\n\t\t\ttenant.Domain, origin.Domain)\n\t\tif err != nil {\n\t\t\tfLog.Errorf(\"db.instance.ExecContext got %s. SQL = %s\", err.Error(), q)\n\t\t\treturn &ErrDBExecuteError{\n\t\t\t\tWrapped: err,\n\t\t\t\tMessage: \"Error UpdateTenant\",\n\t\t\t\tSQL: q,\n\t\t\t}\n\t\t}\n\t}\n\n\treturn nil\n}", "func addBranchRestrictions(ft *factsTable, b *Block, br branch) {\n\tc := b.Controls[0]\n\tswitch br {\n\tcase negative:\n\t\taddRestrictions(b, ft, boolean, nil, c, eq)\n\tcase positive:\n\t\taddRestrictions(b, ft, boolean, nil, c, lt|gt)\n\tdefault:\n\t\tpanic(\"unknown branch\")\n\t}\n\tif tr, has := domainRelationTable[c.Op]; has {\n\t\t// When we branched from parent we learned a new set of\n\t\t// restrictions. Update the factsTable accordingly.\n\t\td := tr.d\n\t\tif d == signed && ft.isNonNegative(c.Args[0]) && ft.isNonNegative(c.Args[1]) {\n\t\t\td |= unsigned\n\t\t}\n\t\tswitch c.Op {\n\t\tcase OpIsInBounds, OpIsSliceInBounds:\n\t\t\t// 0 <= a0 < a1 (or 0 <= a0 <= a1)\n\t\t\t//\n\t\t\t// On the positive branch, we learn:\n\t\t\t// signed: 0 <= a0 < a1 (or 0 <= a0 <= a1)\n\t\t\t// unsigned: a0 < a1 (or a0 <= a1)\n\t\t\t//\n\t\t\t// On the negative branch, we learn (0 > a0 ||\n\t\t\t// a0 >= a1). In the unsigned domain, this is\n\t\t\t// simply a0 >= a1 (which is the reverse of the\n\t\t\t// positive branch, so nothing surprising).\n\t\t\t// But in the signed domain, we can't express the ||\n\t\t\t// condition, so check if a0 is non-negative instead,\n\t\t\t// to be able to learn something.\n\t\t\tswitch br {\n\t\t\tcase negative:\n\t\t\t\td = unsigned\n\t\t\t\tif ft.isNonNegative(c.Args[0]) {\n\t\t\t\t\td |= signed\n\t\t\t\t}\n\t\t\t\taddRestrictions(b, ft, d, c.Args[0], c.Args[1], tr.r^(lt|gt|eq))\n\t\t\tcase positive:\n\t\t\t\taddRestrictions(b, ft, signed, ft.zero, c.Args[0], lt|eq)\n\t\t\t\taddRestrictions(b, ft, d, c.Args[0], c.Args[1], tr.r)\n\t\t\t}\n\t\tdefault:\n\t\t\tswitch br {\n\t\t\tcase negative:\n\t\t\t\taddRestrictions(b, ft, d, c.Args[0], c.Args[1], tr.r^(lt|gt|eq))\n\t\t\tcase positive:\n\t\t\t\taddRestrictions(b, ft, d, c.Args[0], c.Args[1], tr.r)\n\t\t\t}\n\t\t}\n\n\t}\n}", "func (mr *MockClientMockRecorder) GetBranchProtection(org, repo, branch interface{}) *gomock.Call {\n\tmr.mock.ctrl.T.Helper()\n\treturn mr.mock.ctrl.RecordCallWithMethodType(mr.mock, \"GetBranchProtection\", reflect.TypeOf((*MockClient)(nil).GetBranchProtection), org, repo, branch)\n}", "func (fbo *folderBranchOps) MigrateToImplicitTeam(\n\tctx context.Context, id tlf.ID) (err error) {\n\t// Only MasterBranch FBOs may be migrated.\n\tfb := data.FolderBranch{Tlf: id, Branch: data.MasterBranch}\n\tif fb != fbo.folderBranch {\n\t\t// TODO: log instead of panic?\n\t\tpanic(WrongOpsError{fbo.folderBranch, fb})\n\t}\n\n\tfbo.log.CDebugf(ctx, \"Starting migration of TLF %s\", id)\n\tdefer func() {\n\t\tfbo.deferLog.CDebugf(\n\t\t\tctx, \"Finished migration of TLF %s, err=%+v\", id, err)\n\t}()\n\n\tif id.Type() != tlf.Private && id.Type() != tlf.Public {\n\t\treturn errors.Errorf(\"Cannot migrate a TLF of type: %s\", id.Type())\n\t}\n\n\tlState := makeFBOLockState()\n\tfbo.mdWriterLock.Lock(lState)\n\tdefer fbo.mdWriterLock.Unlock(lState)\n\n\tmd, err := fbo.getMDForMigrationLocked(ctx, lState)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif md == (ImmutableRootMetadata{}) {\n\t\tfbo.log.CDebugf(ctx, \"Nothing to upgrade\")\n\t\treturn nil\n\t}\n\n\tif md.IsFinal() {\n\t\tfbo.log.CDebugf(ctx, \"No need to upgrade a finalized TLF\")\n\t\treturn nil\n\t}\n\n\tif md.TypeForKeying() == tlf.TeamKeying {\n\t\tfbo.log.CDebugf(ctx, \"Already migrated\")\n\t\treturn nil\n\t}\n\n\tname := string(md.GetTlfHandle().GetCanonicalName())\n\tfbo.log.CDebugf(ctx, \"Looking up implicit team for %s\", name)\n\tnewHandle, err := tlfhandle.ParseHandle(\n\t\tctx, fbo.config.KBPKI(), fbo.config.MDOps(), fbo.config,\n\t\tname, id.Type())\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// Make sure the new handle contains just a team.\n\tif newHandle.TypeForKeying() != tlf.TeamKeying {\n\t\treturn errors.New(\"No corresponding implicit team yet\")\n\t}\n\n\tsession, err := fbo.config.KBPKI().GetCurrentSession(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tisWriter := true // getMDForMigrationLocked already checked this.\n\tnewMD, err := md.MakeSuccessorWithNewHandle(\n\t\tctx, newHandle, fbo.config.MetadataVersion(), fbo.config.Codec(),\n\t\tfbo.config.KeyManager(), fbo.config.KBPKI(), fbo.config.KBPKI(),\n\t\tfbo.config, md.mdID, isWriter)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif newMD.TypeForKeying() != tlf.TeamKeying {\n\t\treturn errors.New(\"Migration failed\")\n\t}\n\n\t// Add an empty operation to satisfy assumptions elsewhere.\n\tnewMD.AddOp(newRekeyOp())\n\n\treturn fbo.finalizeMDRekeyWriteLocked(\n\t\tctx, lState, newMD, session.VerifyingKey)\n}", "func (_PermInterface *PermInterfaceSession) UpdateOrgStatus(_orgId string, _action *big.Int) (*types.Transaction, error) {\n\treturn _PermInterface.Contract.UpdateOrgStatus(&_PermInterface.TransactOpts, _orgId, _action)\n}", "func (c *client) GetBranchProtection(org, repo, branch string) (*BranchProtection, error) {\n\tdurationLogger := c.log(\"GetBranchProtection\", org, repo, branch)\n\tdefer durationLogger()\n\n\tcode, body, err := c.requestRaw(&request{\n\t\tmethod: http.MethodGet,\n\t\tpath: fmt.Sprintf(\"/repos/%s/%s/branches/%s/protection\", org, repo, branch),\n\t\torg: org,\n\t\t// GitHub returns 404 for this call if either:\n\t\t// - The branch is not protected\n\t\t// - The access token used does not have sufficient privileges\n\t\t// We therefore need to introspect the response body.\n\t\texitCodes: []int{200, 404},\n\t})\n\n\tswitch {\n\tcase err != nil:\n\t\treturn nil, err\n\tcase code == 200:\n\t\tvar bp BranchProtection\n\t\tif err := json.Unmarshal(body, &bp); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn &bp, nil\n\tcase code == 404:\n\t\t// continue\n\tdefault:\n\t\treturn nil, fmt.Errorf(\"unexpected status code: %d\", code)\n\t}\n\n\tvar ge githubError\n\tif err := json.Unmarshal(body, &ge); err != nil {\n\t\treturn nil, err\n\t}\n\n\t// If the error was because the branch is not protected, we return a\n\t// nil pointer to indicate this.\n\tif ge.Message == \"Branch not protected\" {\n\t\treturn nil, nil\n\t}\n\n\t// Otherwise we got some other 404 error.\n\treturn nil, fmt.Errorf(\"getting branch protection 404: %s\", ge.Message)\n}", "func (m *MarkerIndexBranchIDMapping) SetBranchID(index markers.Index, branchID ledgerstate.BranchID) {\n\tm.mappingMutex.Lock()\n\tdefer m.mappingMutex.Unlock()\n\n\tm.mapping.Set(index, branchID)\n}", "func IsUserInProtectBranchWhitelist(repoID, userID int64, branch string) bool {\n\thas, err := x.Where(\"repo_id = ?\", repoID).And(\"user_id = ?\", userID).And(\"name = ?\", branch).Get(new(ProtectBranchWhitelist))\n\treturn has && err == nil\n}", "func (m *MockRepositoryClient) RemoveBranchProtection(org, repo, branch string) error {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"RemoveBranchProtection\", org, repo, branch)\n\tret0, _ := ret[0].(error)\n\treturn ret0\n}", "func (m *MockClient) RemoveBranchProtection(org, repo, branch string) error {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"RemoveBranchProtection\", org, repo, branch)\n\tret0, _ := ret[0].(error)\n\treturn ret0\n}", "func (_PermInterface *PermInterfaceTransactorSession) ApproveBlacklistedAccountRecovery(_orgId string, _account common.Address) (*types.Transaction, error) {\n\treturn _PermInterface.Contract.ApproveBlacklistedAccountRecovery(&_PermInterface.TransactOpts, _orgId, _account)\n}", "func UpdateAccountEmail(w http.ResponseWriter, r *http.Request) {\n\n\tlogin := mux.Vars(r)[\"login\"]\n\toauth, ok := OAuthToken(r)\n\tif !ok {\n\t\tpanic(\"Missing OAuth token\")\n\t}\n\n\tacc, ok := data.GetAccountByLogin(login)\n\tif !ok {\n\t\tPrintErrorJSON(w, r, \"The requested account does not exist\", http.StatusNotFound)\n\t\treturn\n\t}\n\n\tif oauth.Token.AccountUUID.String != acc.UUID || !oauth.Match.Contains(\"account-write\") {\n\t\tPrintErrorJSON(w, r, \"Unauthorized account access\", http.StatusUnauthorized)\n\t\treturn\n\t}\n\n\tcred := &struct {\n\t\tPassword string `json:\"password\"`\n\t\tEmail string `json:\"email\"`\n\t}{}\n\n\tdec := json.NewDecoder(r.Body)\n\t_ = dec.Decode(cred)\n\n\tif !acc.VerifyPassword(cred.Password) {\n\t\tvalErr := &util.ValidationError{\n\t\t\tMessage: \"Invalid password\",\n\t\t\tFieldErrors: map[string]string{\"password\": \"Invalid password\"}}\n\t\tPrintErrorJSON(w, r, valErr, http.StatusBadRequest)\n\t\treturn\n\t}\n\n\terr := acc.UpdateEmail(cred.Email)\n\tif err != nil {\n\t\tPrintErrorJSON(w, r, err, http.StatusBadRequest)\n\t\treturn\n\t}\n\n\ttmplFields := &struct {\n\t\tFrom string\n\t\tTo string\n\t\tSubject string\n\t\tBody string\n\t}{}\n\ttmplFields.From = conf.GetSmtpCredentials().From\n\ttmplFields.To = cred.Email\n\ttmplFields.Subject = \"GIN account confirmation\"\n\ttmplFields.Body = \"The e-mail address of your GIN account has been successfully changed.\"\n\n\tcontent := util.MakeEmailTemplate(\"emailplain.txt\", tmplFields)\n\temail := &data.Email{}\n\terr = email.Create(util.NewStringSet(cred.Email), content.Bytes())\n\tif err != nil {\n\t\tmsg := \"An error occurred trying to create change e-mail address confirmation.\"\n\t\tPrintErrorJSON(w, r, msg, http.StatusInternalServerError)\n\t\treturn\n\t}\n}", "func NewUpdateOrganizationBillingAddressForbidden() *UpdateOrganizationBillingAddressForbidden {\n\treturn &UpdateOrganizationBillingAddressForbidden{}\n}", "func (m *MockPullRequestClient) UpdatePullRequestBranch(org, repo string, number int, expectedHeadSha *string) error {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"UpdatePullRequestBranch\", org, repo, number, expectedHeadSha)\n\tret0, _ := ret[0].(error)\n\treturn ret0\n}", "func Update(w http.ResponseWriter, r *http.Request) {\n\tauthUser, err := auth.GetUserFromJWT(w, r)\n\tif err != nil {\n\t\tresponse.FormatStandardResponse(false, \"error-auth\", \"\", err.Error(), w)\n\t\treturn\n\t}\n\n\tdefer r.Body.Close()\n\n\t// Decode the JSON body\n\tacct := datastore.Account{}\n\terr = json.NewDecoder(r.Body).Decode(&acct)\n\tswitch {\n\t// Check we have some data\n\tcase err == io.EOF:\n\t\tw.WriteHeader(http.StatusBadRequest)\n\t\tresponse.FormatStandardResponse(false, \"error-account-data\", \"\", \"No account data supplied\", w)\n\t\treturn\n\t\t// Check for parsing errors\n\tcase err != nil:\n\t\tw.WriteHeader(http.StatusBadRequest)\n\t\tresponse.FormatStandardResponse(false, \"error-decode-json\", \"\", err.Error(), w)\n\t\treturn\n\t}\n\n\tupdateHandler(w, authUser, false, acct)\n}", "func (m *MockClient) UpdatePullRequestBranch(org, repo string, number int, expectedHeadSha *string) error {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"UpdatePullRequestBranch\", org, repo, number, expectedHeadSha)\n\tret0, _ := ret[0].(error)\n\treturn ret0\n}", "func (s *BucketService) UpdateBucket(ctx context.Context, id influxdb.ID, upd influxdb.BucketUpdate) (*influxdb.Bucket, error) {\n\tb, err := s.s.FindBucketByID(ctx, id)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif err := authorizeWriteBucket(ctx, b.OrgID, id); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn s.s.UpdateBucket(ctx, id, upd)\n}", "func (fbo *folderBranchOps) TeamAbandoned(\n\tctx context.Context, tid keybase1.TeamID) {\n\tctx, cancelFunc := fbo.newCtxWithFBOIDWithCtx(ctx)\n\tdefer cancelFunc()\n\tfbo.log.CDebugf(ctx, \"Abandoning team %s\", tid)\n\tfbo.locallyFinalizeTLF(ctx)\n}", "func UpdateAccountPassword(w http.ResponseWriter, r *http.Request) {\n\tlogin := mux.Vars(r)[\"login\"]\n\toauth, ok := OAuthToken(r)\n\tif !ok {\n\t\tpanic(\"Request was authorized but no OAuth token is available!\") // this should never happen\n\t}\n\n\taccount, ok := data.GetAccountByLogin(login)\n\tif !ok {\n\t\tPrintErrorJSON(w, r, \"The requested account does not exist\", http.StatusNotFound)\n\t\treturn\n\t}\n\n\tif oauth.Token.AccountUUID.String != account.UUID || !oauth.Match.Contains(\"account-write\") {\n\t\tPrintErrorJSON(w, r, \"Access to requested account forbidden\", http.StatusUnauthorized)\n\t\treturn\n\t}\n\n\tpwData := &struct {\n\t\tPasswordOld string `json:\"password_old\"`\n\t\tPasswordNew string `json:\"password_new\"`\n\t\tPasswordNewRepeat string `json:\"password_new_repeat\"`\n\t}{}\n\tdec := json.NewDecoder(r.Body)\n\t_ = dec.Decode(pwData)\n\n\tif !account.VerifyPassword(pwData.PasswordOld) {\n\t\terr := &util.ValidationError{\n\t\t\tMessage: \"Unable to set password\",\n\t\t\tFieldErrors: map[string]string{\"password_old\": \"Wrong password\"}}\n\t\tPrintErrorJSON(w, r, err, http.StatusBadRequest)\n\t\treturn\n\t}\n\n\tif len(pwData.PasswordNew) < 6 {\n\t\terr := &util.ValidationError{\n\t\t\tMessage: \"Unable to set password\",\n\t\t\tFieldErrors: map[string]string{\"password_new\": \"Password must be at least 6 characters long\"}}\n\t\tPrintErrorJSON(w, r, err, http.StatusBadRequest)\n\t\treturn\n\t}\n\n\tif pwData.PasswordNew != pwData.PasswordNewRepeat {\n\t\terr := &util.ValidationError{\n\t\t\tMessage: \"Unable to set password\",\n\t\t\tFieldErrors: map[string]string{\"password_new_repeat\": \"Repeated password does not match\"}}\n\t\tPrintErrorJSON(w, r, err, http.StatusBadRequest)\n\t\treturn\n\t}\n\n\terr := account.UpdatePassword(pwData.PasswordNew)\n\tif err != nil {\n\t\tPrintErrorJSON(w, r, err, http.StatusInternalServerError)\n\t\treturn\n\t}\n}", "func (r *DeviceCompliancePolicyAssignmentRequest) Update(ctx context.Context, reqObj *DeviceCompliancePolicyAssignment) error {\n\treturn r.JSONRequest(ctx, \"PATCH\", \"\", reqObj, nil)\n}", "func (mr *MockOrganizationClientMockRecorder) UpdateOrgMembership(org, user, admin interface{}) *gomock.Call {\n\tmr.mock.ctrl.T.Helper()\n\treturn mr.mock.ctrl.RecordCallWithMethodType(mr.mock, \"UpdateOrgMembership\", reflect.TypeOf((*MockOrganizationClient)(nil).UpdateOrgMembership), org, user, admin)\n}", "func (c *AuthenticationComponent) UpdateAccount(ctx context.Context, Id uint32, email string) error {\n\tif err, _ := c.isValidID(Id); err != nil {\n\t\tc.Logger.Error(err.Error())\n\t\treturn err\n\t}\n\n\tif err, _ := c.isValidEmail(email); err != nil {\n\t\tc.Logger.Error(err.Error())\n\t\treturn err\n\t}\n\n\taccountId := strconv.Itoa(int(Id))\n\tif err := c.Client.Update(accountId, email); err != nil {\n\t\tc.Logger.Error(err.Error())\n\t\treturn err\n\t}\n\n\tc.Logger.Info(\"Successfully updated user account\", zap.Int(\"Id\", int(Id)))\n\treturn nil\n}", "func (_PermInterface *PermInterfaceTransactorSession) UpdateOrgStatus(_orgId string, _action *big.Int) (*types.Transaction, error) {\n\treturn _PermInterface.Contract.UpdateOrgStatus(&_PermInterface.TransactOpts, _orgId, _action)\n}", "func (mr *MockClientMockRecorder) UpdateOrgMembership(org, user, admin interface{}) *gomock.Call {\n\tmr.mock.ctrl.T.Helper()\n\treturn mr.mock.ctrl.RecordCallWithMethodType(mr.mock, \"UpdateOrgMembership\", reflect.TypeOf((*MockClient)(nil).UpdateOrgMembership), org, user, admin)\n}", "func UpdateVLANGroupMembership(w http.ResponseWriter, r *http.Request) {\n\thr := HTTPResponse{w}\n\tvlan, ok := getVLANHelper(hr, r)\n\tif !ok {\n\t\treturn\n\t}\n\tvar newGroups []string\n\tif err := json.NewDecoder(r.Body).Decode(&newGroups); err != nil {\n\t\thr.JSONMsg(http.StatusBadRequest, err.Error())\n\t\treturn\n\t}\n\n\t// Determine group modifications necessary\n\t// -1 : Removed\n\t// 0 : No change\n\t// 1 : Added\n\tctx := GetContext(r)\n\tgroupModifications := make(map[string]int)\n\tfor _, oldGroup := range vlan.VLANGroups() {\n\t\tgroupModifications[oldGroup] = -1\n\t}\n\tfor _, newGroup := range newGroups {\n\t\tgroupModifications[newGroup]++\n\t}\n\t// Make group modifications\n\tfor groupID, action := range groupModifications {\n\t\tvlanGroup, err := ctx.VLANGroup(groupID)\n\t\tif err != nil {\n\t\t\tif ctx.IsKeyNotFound(err) {\n\t\t\t\thr.JSONMsg(http.StatusBadRequest, \"group not found\")\n\t\t\t} else {\n\t\t\t\thr.JSONMsg(http.StatusInternalServerError, err.Error())\n\t\t\t}\n\t\t\treturn\n\t\t}\n\t\tswitch action {\n\t\tcase -1:\n\t\t\tif err := vlanGroup.RemoveVLAN(vlan); err != nil {\n\t\t\t\thr.JSONError(http.StatusInternalServerError, err)\n\t\t\t\treturn\n\t\t\t}\n\t\tcase 0:\n\t\t\tbreak\n\t\tcase 1:\n\t\t\tif err := vlanGroup.AddVLAN(vlan); err != nil {\n\t\t\t\thr.JSONError(http.StatusInternalServerError, err)\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t}\n\n\thr.JSON(http.StatusOK, vlan.VLANGroups())\n}", "func UpdateAccount(w http.ResponseWriter, r *http.Request) {\n\tw.WriteHeader(http.StatusNotImplemented)\n}", "func (oo *OnuDeviceEntry) ModifySwImageActiveCommit(ctx context.Context, aCommitted uint8) {\n\too.mutexOnuSwImageIndications.Lock()\n\tdefer oo.mutexOnuSwImageIndications.Unlock()\n\tlogger.Debugw(ctx, \"software-image set active entity commit flag\", log.Fields{\n\t\t\"device-id\": oo.deviceID, \"committed\": aCommitted})\n\too.onuSwImageIndications.ActiveEntityEntry.IsCommitted = aCommitted\n\t//commit flag is not part of persistency data (yet) - no need to update that\n}", "func (o *Auth) UpdateGP(whitelist ...string) {\n\tif err := o.Update(boil.GetDB(), whitelist...); err != nil {\n\t\tpanic(boil.WrapErr(err))\n\t}\n}", "func (a *Client) MergeBranch(params *MergeBranchParams, authInfo runtime.ClientAuthInfoWriter, opts ...ClientOption) (*MergeBranchOK, error) {\n\t// TODO: Validate the params before sending\n\tif params == nil {\n\t\tparams = NewMergeBranchParams()\n\t}\n\top := &runtime.ClientOperation{\n\t\tID: \"mergeBranch\",\n\t\tMethod: \"POST\",\n\t\tPathPattern: \"/vcs/branch/{branchID}/merge\",\n\t\tProducesMediaTypes: []string{\"application/json\"},\n\t\tConsumesMediaTypes: []string{\"application/json\"},\n\t\tSchemes: []string{\"http\", \"https\"},\n\t\tParams: params,\n\t\tReader: &MergeBranchReader{formats: a.formats},\n\t\tAuthInfo: authInfo,\n\t\tContext: params.Context,\n\t\tClient: params.HTTPClient,\n\t}\n\tfor _, opt := range opts {\n\t\topt(op)\n\t}\n\n\tresult, err := a.transport.Submit(op)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tsuccess, ok := result.(*MergeBranchOK)\n\tif ok {\n\t\treturn success, nil\n\t}\n\t// unexpected success response\n\t// safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue\n\tmsg := fmt.Sprintf(\"unexpected success response for mergeBranch: API contract not enforced by server. Client expected to get an error, but got: %T\", result)\n\tpanic(msg)\n}", "func UpdateCgroupPermission(CgroupBase string, device *types.Device, isAddDevice bool) error {\n\tvar path string\n\n\tif isAddDevice {\n\t\tpath = filepath.Join(CgroupBase, \"devices.allow\")\n\t} else {\n\t\tpath = filepath.Join(CgroupBase, \"devices.deny\")\n\t}\n\tvalue := device.CgroupString()\n\tif err := ioutil.WriteFile(path, []byte(value), 0600); err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}", "func (mr *MockClientMockRecorder) UpdateTeamRepo(id, org, repo, permission interface{}) *gomock.Call {\n\tmr.mock.ctrl.T.Helper()\n\treturn mr.mock.ctrl.RecordCallWithMethodType(mr.mock, \"UpdateTeamRepo\", reflect.TypeOf((*MockClient)(nil).UpdateTeamRepo), id, org, repo, permission)\n}", "func (o BranchProtectionOutput) BranchProtectionId() pulumi.IntOutput {\n\treturn o.ApplyT(func(v *BranchProtection) pulumi.IntOutput { return v.BranchProtectionId }).(pulumi.IntOutput)\n}", "func (m *MockClient) UpdateOrgMembership(org, user string, admin bool) (*github.OrgMembership, error) {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"UpdateOrgMembership\", org, user, admin)\n\tret0, _ := ret[0].(*github.OrgMembership)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}", "func (c *client) UpdateTeamRepo(id int, org, repo string, permission TeamPermission) error {\n\tc.logger.WithField(\"methodName\", \"UpdateTeamRepo\").\n\t\tWarn(\"method is deprecated, and will result in multiple api calls to achieve result\")\n\tdurationLogger := c.log(\"UpdateTeamRepo\", id, org, repo, permission)\n\tdefer durationLogger()\n\n\tif c.fake || c.dry {\n\t\treturn nil\n\t}\n\n\torganization, err := c.GetOrg(org)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tdata := struct {\n\t\tPermission string `json:\"permission\"`\n\t}{\n\t\tPermission: string(permission),\n\t}\n\n\t_, err = c.request(&request{\n\t\tmethod: http.MethodPut,\n\t\tpath: fmt.Sprintf(\"/organizations/%d/team/%d/repos/%s/%s\", organization.Id, id, org, repo),\n\t\torg: org,\n\t\trequestBody: &data,\n\t\texitCodes: []int{204},\n\t}, nil)\n\treturn err\n}", "func (m *MockOrganizationClient) UpdateOrgMembership(org, user string, admin bool) (*github.OrgMembership, error) {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"UpdateOrgMembership\", org, user, admin)\n\tret0, _ := ret[0].(*github.OrgMembership)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}", "func (b Branch) Fix(ctx context.Context, c *github.Client, owner, repo string) error {\n\tlog.Warn().\n\t\tStr(\"org\", owner).\n\t\tStr(\"repo\", repo).\n\t\tStr(\"area\", polName).\n\t\tMsg(\"Action fix is configured, but not implemented.\")\n\treturn nil\n}", "func (o *AuthUser) UpdateGP(whitelist ...string) {\n\tif err := o.Update(boil.GetDB(), whitelist...); err != nil {\n\t\tpanic(boil.WrapErr(err))\n\t}\n}", "func (b *BranchDAG) updateInclusionState(branchID BranchID, inclusionState InclusionState) (err error) {\n\t// initialize stack for iteration\n\tbranchStack := list.New()\n\tbranchStack.PushBack(branchID)\n\n\t// iterate through stack\nProcessStack:\n\tfor branchStack.Len() >= 1 {\n\t\t// retrieve first element from the stack\n\t\tcurrentStackElement := branchStack.Front()\n\t\tbranchStack.Remove(currentStackElement)\n\n\t\t// load Branch\n\t\tcurrentCachedBranch := b.Branch(currentStackElement.Value.(BranchID))\n\n\t\t// unwrap current CachedBranch\n\t\tcurrentBranch := currentCachedBranch.Unwrap()\n\t\tif currentBranch == nil {\n\t\t\tcurrentCachedBranch.Release()\n\t\t\terr = errors.Errorf(\"failed to load Branch with %s: %w\", currentCachedBranch.ID(), cerrors.ErrFatal)\n\t\t\treturn\n\t\t}\n\n\t\t// execute case dependent logic\n\t\tswitch inclusionState {\n\t\tcase Confirmed:\n\t\t\t// abort if the current Branch is not liked or not finalized\n\t\t\tif !currentBranch.Liked() || !currentBranch.Finalized() {\n\t\t\t\tcurrentCachedBranch.Release()\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\t// abort if any parent Branch is not confirmed\n\t\t\tfor parentBranchID := range currentBranch.Parents() {\n\t\t\t\t// load parent Branch\n\t\t\t\tcachedParentBranch := b.Branch(parentBranchID)\n\n\t\t\t\t// unwrap parent Branch\n\t\t\t\tparentBranch := cachedParentBranch.Unwrap()\n\t\t\t\tif parentBranch == nil {\n\t\t\t\t\tcurrentCachedBranch.Release()\n\t\t\t\t\tcachedParentBranch.Release()\n\t\t\t\t\terr = errors.Errorf(\"failed to load parent Branch with %s: %w\", parentBranchID, cerrors.ErrFatal)\n\t\t\t\t\treturn\n\t\t\t\t}\n\n\t\t\t\t// abort if parent Branch is not confirmed\n\t\t\t\tif parentBranch.InclusionState() != Confirmed {\n\t\t\t\t\tcurrentCachedBranch.Release()\n\t\t\t\t\tcachedParentBranch.Release()\n\t\t\t\t\tcontinue ProcessStack\n\t\t\t\t}\n\n\t\t\t\t// release parent CachedBranch\n\t\t\t\tcachedParentBranch.Release()\n\t\t\t}\n\n\t\t\t// abort if the Branch is already confirmed\n\t\t\tif !currentBranch.SetInclusionState(Confirmed) {\n\t\t\t\tcurrentCachedBranch.Release()\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\t// trigger event\n\t\t\tb.Events.BranchConfirmed.Trigger(NewBranchDAGEvent(currentCachedBranch))\n\t\tcase Rejected:\n\t\t\t// abort if the current Branch is not confirmed already\n\t\t\tif !currentBranch.SetInclusionState(Rejected) {\n\t\t\t\tcurrentCachedBranch.Release()\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\t// trigger event\n\t\t\tb.Events.BranchRejected.Trigger(NewBranchDAGEvent(currentCachedBranch))\n\t\tcase Pending:\n\t\t\t// abort if the current Branch is not confirmed already\n\t\t\tif !currentBranch.SetInclusionState(Pending) {\n\t\t\t\tcurrentCachedBranch.Release()\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\t// trigger event\n\t\t\tb.Events.BranchPending.Trigger(NewBranchDAGEvent(currentCachedBranch))\n\t\t}\n\n\t\t// iterate through ChildBranch references and queue found Branches for propagation\n\t\tcachedChildBranchReferences := b.ChildBranches(currentBranch.ID())\n\t\tfor _, cachedChildBranchReference := range cachedChildBranchReferences {\n\t\t\t// unwrap ChildBranch reference\n\t\t\tchildBranchReference := cachedChildBranchReference.Unwrap()\n\t\t\tif childBranchReference == nil {\n\t\t\t\tcurrentCachedBranch.Release()\n\t\t\t\tcachedChildBranchReferences.Release()\n\t\t\t\terr = errors.Errorf(\"failed to load ChildBranch reference: %w\", cerrors.ErrFatal)\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\t// queue child Branch for propagation\n\t\t\tbranchStack.PushBack(childBranchReference.ChildBranchID())\n\t\t}\n\t\tcachedChildBranchReferences.Release()\n\n\t\t// release current CachedBranch\n\t\tcurrentCachedBranch.Release()\n\t}\n\n\treturn\n}", "func (r *RBAC) UpdateWhiteList(system, uid string, whitelist ...string) error {\n\tr.Cache.RemoveUser(system, uid)\n\treturn r.User.UpdateWhiteList(system, uid, whitelist...)\n}" ]
[ "0.6461851", "0.6423092", "0.6416864", "0.63546205", "0.61288905", "0.5720693", "0.5681801", "0.53321487", "0.5272768", "0.48651224", "0.4766242", "0.47318128", "0.45781916", "0.45240498", "0.45019257", "0.4427853", "0.4406283", "0.4384094", "0.43788534", "0.4349381", "0.43306437", "0.42248893", "0.42192593", "0.41811898", "0.41733363", "0.41718102", "0.417088", "0.41558424", "0.41506955", "0.41321653", "0.41203552", "0.41159227", "0.41019717", "0.40998563", "0.40937644", "0.40817487", "0.4080539", "0.4060079", "0.4054799", "0.4052626", "0.40225667", "0.40176925", "0.40074202", "0.399842", "0.39811942", "0.39528945", "0.3928695", "0.39006966", "0.38763925", "0.38722143", "0.38699263", "0.38648373", "0.38373187", "0.38314694", "0.38301647", "0.38275024", "0.38209575", "0.38156772", "0.38138902", "0.38106024", "0.3799447", "0.37932268", "0.3788386", "0.37852642", "0.3778877", "0.37767273", "0.37735566", "0.37731063", "0.37706372", "0.37658823", "0.37622204", "0.37590364", "0.3752378", "0.3747476", "0.37431014", "0.3736712", "0.3735243", "0.37256756", "0.37224832", "0.37195218", "0.3717617", "0.37172323", "0.37145248", "0.37094778", "0.37057728", "0.3691134", "0.368982", "0.3688388", "0.36744207", "0.36706236", "0.3670572", "0.3665798", "0.36581868", "0.36563718", "0.3652379", "0.36516804", "0.36483437", "0.36480847", "0.36432683", "0.36404896" ]
0.758683
0
GetProtectBranchesByRepoID returns a list of ProtectBranch in given repository.
func GetProtectBranchesByRepoID(repoID int64) ([]*ProtectBranch, error) { protectBranches := make([]*ProtectBranch, 0, 2) return protectBranches, x.Where("repo_id = ? and protected = ?", repoID, true).Asc("name").Find(&protectBranches) }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (c *client) GetBranches(org, repo string, onlyProtected bool) ([]Branch, error) {\n\tdurationLogger := c.log(\"GetBranches\", org, repo, onlyProtected)\n\tdefer durationLogger()\n\n\tvar branches []Branch\n\terr := c.readPaginatedResultsWithValues(\n\t\tfmt.Sprintf(\"/repos/%s/%s/branches\", org, repo),\n\t\turl.Values{\n\t\t\t\"protected\": []string{strconv.FormatBool(onlyProtected)},\n\t\t\t\"per_page\": []string{\"100\"},\n\t\t},\n\t\tacceptNone,\n\t\torg,\n\t\tfunc() interface{} { // newObj\n\t\t\treturn &[]Branch{}\n\t\t},\n\t\tfunc(obj interface{}) {\n\t\t\tbranches = append(branches, *(obj.(*[]Branch))...)\n\t\t},\n\t)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn branches, nil\n}", "func (r *SettingRepository) GetBranchesByOrgID(orgID string) []models.Branch {\n\tvar branches []models.Branch\n\torgid := bson.ObjectIdHex(orgID)\n\titer := r.C.Find(bson.M{\"orgid\": orgid}).Iter()\n\tresult := models.Branch{}\n\tfor iter.Next(&result) {\n\t\tbranches = append(branches, result)\n\t}\n\treturn branches\n}", "func ListBranchProtections(ctx *context.APIContext) {\n\t// swagger:operation GET /repos/{owner}/{repo}/branch_protections repository repoListBranchProtection\n\t// ---\n\t// summary: List branch protections for a repository\n\t// produces:\n\t// - application/json\n\t// parameters:\n\t// - name: owner\n\t// in: path\n\t// description: owner of the repo\n\t// type: string\n\t// required: true\n\t// - name: repo\n\t// in: path\n\t// description: name of the repo\n\t// type: string\n\t// required: true\n\t// responses:\n\t// \"200\":\n\t// \"$ref\": \"#/responses/BranchProtectionList\"\n\n\trepo := ctx.Repo.Repository\n\tbps, err := git_model.FindRepoProtectedBranchRules(ctx, repo.ID)\n\tif err != nil {\n\t\tctx.Error(http.StatusInternalServerError, \"GetProtectedBranches\", err)\n\t\treturn\n\t}\n\tapiBps := make([]*api.BranchProtection, len(bps))\n\tfor i := range bps {\n\t\tapiBps[i] = convert.ToBranchProtection(bps[i])\n\t}\n\n\tctx.JSON(http.StatusOK, apiBps)\n}", "func (c *Client) ListRepoBranches(user, repo string) ([]*Branch, error) {\n\tbranches := make([]*Branch, 0, 10)\n\treturn branches, c.getParsedResponse(\"GET\", fmt.Sprintf(\"/repos/%s/%s/branches\", user, repo), nil, nil, &branches)\n}", "func GetBranches(repo *models.Repository, skip, limit int) ([]*git.Branch, int, error) {\n\treturn git.GetBranchesByPath(repo.RepoPath(), skip, limit)\n}", "func (g *GitLab) Branches(ctx context.Context, user *model.User, repo *model.Repo, p *model.ListOptions) ([]string, error) {\n\ttoken := common.UserToken(ctx, repo, user)\n\tclient, err := newClient(g.url, token, g.SkipVerify)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t_repo, err := g.getProject(ctx, client, repo.Owner, repo.Name)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tgitlabBranches, _, err := client.Branches.ListBranches(_repo.ID,\n\t\t&gitlab.ListBranchesOptions{ListOptions: gitlab.ListOptions{Page: p.Page, PerPage: p.PerPage}},\n\t\tgitlab.WithContext(ctx))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tbranches := make([]string, 0)\n\tfor _, branch := range gitlabBranches {\n\t\tbranches = append(branches, branch.Name)\n\t}\n\treturn branches, nil\n}", "func handleRepo(ctx context.Context, client *github.Client, repo *github.Repository) error {\n\topt := &github.ListOptions{\n\t\tPerPage: 100,\n\t}\n\n\tbranches, resp, err := client.Repositories.ListBranches(ctx, *repo.Owner.Login, *repo.Name, opt)\n\tif resp.StatusCode == http.StatusNotFound || resp.StatusCode == http.StatusForbidden {\n\t\treturn nil\n\t}\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tfor _, branch := range branches {\n\t\tif branch.GetName() == \"master\" && in(orgs, *repo.Owner.Login) {\n\t\t\t// we must get the individual branch for the branch protection to work\n\t\t\tb, _, err := client.Repositories.GetBranch(ctx, *repo.Owner.Login, *repo.Name, branch.GetName())\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\t// return early if it is already protected\n\t\t\tif b.GetProtected() {\n\t\t\t\tfmt.Printf(\"[OK] %s:%s is already protected\\n\", *repo.FullName, b.GetName())\n\t\t\t\treturn nil\n\t\t\t}\n\n\t\t\tfmt.Printf(\"[UPDATE] %s:%s will be changed to protected\\n\", *repo.FullName, b.GetName())\n\t\t\tif dryrun {\n\t\t\t\t// return early\n\t\t\t\treturn nil\n\t\t\t}\n\n\t\t\t// set the branch to be protected\n\t\t\tif _, _, err := client.Repositories.UpdateBranchProtection(ctx, *repo.Owner.Login, *repo.Name, b.GetName(), &github.ProtectionRequest{\n\t\t\t\tRequiredStatusChecks: &github.RequiredStatusChecks{\n\t\t\t\t\tStrict: false,\n\t\t\t\t\tContexts: []string{},\n\t\t\t\t},\n\t\t\t}); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t}\n\n\treturn nil\n}", "func GetBranches(token, owner, repo string) ([]*github.Branch, error) {\n\tclient, ctx := getOauthClient(token)\n\tres, _, err := client.Repositories.ListBranches(ctx, owner, repo, nil)\n\tif res == nil || err != nil {\n\t\treturn nil, err\n\t}\n\treturn res, nil\n}", "func GetBranchProtection(ctx *context.APIContext) {\n\t// swagger:operation GET /repos/{owner}/{repo}/branch_protections/{name} repository repoGetBranchProtection\n\t// ---\n\t// summary: Get a specific branch protection for the repository\n\t// produces:\n\t// - application/json\n\t// parameters:\n\t// - name: owner\n\t// in: path\n\t// description: owner of the repo\n\t// type: string\n\t// required: true\n\t// - name: repo\n\t// in: path\n\t// description: name of the repo\n\t// type: string\n\t// required: true\n\t// - name: name\n\t// in: path\n\t// description: name of protected branch\n\t// type: string\n\t// required: true\n\t// responses:\n\t// \"200\":\n\t// \"$ref\": \"#/responses/BranchProtection\"\n\t// \"404\":\n\t// \"$ref\": \"#/responses/notFound\"\n\n\trepo := ctx.Repo.Repository\n\tbpName := ctx.Params(\":name\")\n\tbp, err := git_model.GetProtectedBranchRuleByName(ctx, repo.ID, bpName)\n\tif err != nil {\n\t\tctx.Error(http.StatusInternalServerError, \"GetProtectedBranchByID\", err)\n\t\treturn\n\t}\n\tif bp == nil || bp.RepoID != repo.ID {\n\t\tctx.NotFound()\n\t\treturn\n\t}\n\n\tctx.JSON(http.StatusOK, convert.ToBranchProtection(bp))\n}", "func (client *Client) Branches(issueID, repositoryType string) (branches []*Branch, err error) {\n\tpath := fmt.Sprintf(\"/rest/dev-status/latest/issue/detail?issueId=%s&applicationType=%s&dataType=branch\", issueID, repositoryType)\n\tres, err := client.getRequest(path, http.StatusOK)\n\tif err != nil {\n\t\treturn []*Branch{}, fmt.Errorf(\"Branches failed request: %w\", err)\n\t}\n\tdefer res.Body.Close()\n\n\tvar result DevStatus\n\tbodyBytes, err := ioutil.ReadAll(res.Body)\n\tif err != nil {\n\t\treturn []*Branch{}, fmt.Errorf(\"Branches failed to read response body %w\", err)\n\t}\n\n\tif err := json.Unmarshal(bodyBytes, &result); err != nil {\n\t\treturn []*Branch{}, fmt.Errorf(\"Branches failed unmarshal response body: %w\", err)\n\t}\n\n\tif len(result.Errors) > 0 {\n\t\treturn []*Branch{}, fmt.Errorf(\"Branches found unexpected errors: %s\", result.Errors)\n\t}\n\tbranches = make([]*Branch, 0)\n\tfor _, detail := range result.Details {\n\t\tbranches = append(branches, detail.Branches...)\n\t}\n\treturn branches, nil\n}", "func (server *RepositoriesService) ListBranches(ctx context.Context, project string, repo string, opt *ListOpts) (*Branches, *http.Response, error) {\n\tu := fmt.Sprintf(\"rest/api/1.0/projects/%s/repos/%s/branches\", project, repo)\n\treq, err := server.v1Client.NewRequest(http.MethodGet, u, nil, opt)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\tvar branches Branches\n\tresp, err := server.v1Client.Do(req, &branches)\n\tlog.Infof(\"branch: %+v, error: %+v\", branches, err)\n\treturn &branches, resp, err\n}", "func (m *MockRepositoryClient) GetBranches(org, repo string, onlyProtected bool) ([]github.Branch, error) {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"GetBranches\", org, repo, onlyProtected)\n\tret0, _ := ret[0].([]github.Branch)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}", "func ListBranches(ctx *context.APIContext) {\n\t// swagger:operation GET /repos/{owner}/{repo}/branches repository repoListBranches\n\t// ---\n\t// summary: List a repository's branches\n\t// produces:\n\t// - application/json\n\t// parameters:\n\t// - name: owner\n\t// in: path\n\t// description: owner of the repo\n\t// type: string\n\t// required: true\n\t// - name: repo\n\t// in: path\n\t// description: name of the repo\n\t// type: string\n\t// required: true\n\t// - name: page\n\t// in: query\n\t// description: page number of results to return (1-based)\n\t// type: integer\n\t// - name: limit\n\t// in: query\n\t// description: page size of results\n\t// type: integer\n\t// responses:\n\t// \"200\":\n\t// \"$ref\": \"#/responses/BranchList\"\n\n\tvar totalNumOfBranches int64\n\tvar apiBranches []*api.Branch\n\n\tlistOptions := utils.GetListOptions(ctx)\n\n\tif !ctx.Repo.Repository.IsEmpty {\n\t\tif ctx.Repo.GitRepo == nil {\n\t\t\tctx.Error(http.StatusInternalServerError, \"Load git repository failed\", nil)\n\t\t\treturn\n\t\t}\n\n\t\tbranchOpts := git_model.FindBranchOptions{\n\t\t\tListOptions: listOptions,\n\t\t\tRepoID: ctx.Repo.Repository.ID,\n\t\t\tIsDeletedBranch: util.OptionalBoolFalse,\n\t\t}\n\t\tvar err error\n\t\ttotalNumOfBranches, err = git_model.CountBranches(ctx, branchOpts)\n\t\tif err != nil {\n\t\t\tctx.Error(http.StatusInternalServerError, \"CountBranches\", err)\n\t\t\treturn\n\t\t}\n\t\tif totalNumOfBranches == 0 { // sync branches immediately because non-empty repository should have at least 1 branch\n\t\t\ttotalNumOfBranches, err = repo_module.SyncRepoBranches(ctx, ctx.Repo.Repository.ID, 0)\n\t\t\tif err != nil {\n\t\t\t\tctx.ServerError(\"SyncRepoBranches\", err)\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\n\t\trules, err := git_model.FindRepoProtectedBranchRules(ctx, ctx.Repo.Repository.ID)\n\t\tif err != nil {\n\t\t\tctx.Error(http.StatusInternalServerError, \"FindMatchedProtectedBranchRules\", err)\n\t\t\treturn\n\t\t}\n\n\t\tbranches, err := git_model.FindBranches(ctx, branchOpts)\n\t\tif err != nil {\n\t\t\tctx.Error(http.StatusInternalServerError, \"GetBranches\", err)\n\t\t\treturn\n\t\t}\n\n\t\tapiBranches = make([]*api.Branch, 0, len(branches))\n\t\tfor i := range branches {\n\t\t\tc, err := ctx.Repo.GitRepo.GetBranchCommit(branches[i].Name)\n\t\t\tif err != nil {\n\t\t\t\t// Skip if this branch doesn't exist anymore.\n\t\t\t\tif git.IsErrNotExist(err) {\n\t\t\t\t\ttotalNumOfBranches--\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\tctx.Error(http.StatusInternalServerError, \"GetCommit\", err)\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tbranchProtection := rules.GetFirstMatched(branches[i].Name)\n\t\t\tapiBranch, err := convert.ToBranch(ctx, ctx.Repo.Repository, branches[i].Name, c, branchProtection, ctx.Doer, ctx.Repo.IsAdmin())\n\t\t\tif err != nil {\n\t\t\t\tctx.Error(http.StatusInternalServerError, \"convert.ToBranch\", err)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tapiBranches = append(apiBranches, apiBranch)\n\t\t}\n\t}\n\n\tctx.SetLinkHeader(int(totalNumOfBranches), listOptions.PageSize)\n\tctx.SetTotalCountHeader(totalNumOfBranches)\n\tctx.JSON(http.StatusOK, apiBranches)\n}", "func (m *MockClient) GetBranches(org, repo string, onlyProtected bool) ([]github.Branch, error) {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"GetBranches\", org, repo, onlyProtected)\n\tret0, _ := ret[0].([]github.Branch)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}", "func GetBranches(db *sqlx.DB) ([]Branch, error) {\n\n\tobjects := []Branch{}\n\n\terr := db.Select(&objects, \"SELECT * FROM branches\")\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn objects, nil\n}", "func GetBranches() []string {\n\treturn branches\n}", "func GetProtectBranchOfRepoByName(repoID int64, name string) (*ProtectBranch, error) {\n\tprotectBranch := &ProtectBranch{\n\t\tRepoID: repoID,\n\t\tName: name,\n\t}\n\thas, err := x.Get(protectBranch)\n\tif err != nil {\n\t\treturn nil, err\n\t} else if !has {\n\t\treturn nil, ErrBranchNotExist{args: map[string]any{\"name\": name}}\n\t}\n\treturn protectBranch, nil\n}", "func (g *Gitlab) ListBranches(scm *api.SCMConfig, repo string) ([]string, error) {\n\tclient, err := newGitlabClient(scm.Server, scm.Username, scm.Token)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tbranches, _, err := client.Branches.ListBranches(repo)\n\tif err != nil {\n\t\tlog.Errorf(\"Fail to list branches for %s\", repo)\n\t\treturn nil, err\n\t}\n\n\tbranchNames := make([]string, len(branches))\n\tfor i, branch := range branches {\n\t\tbranchNames[i] = branch.Name\n\t}\n\n\treturn branchNames, nil\n}", "func (r *repoImpl) getFilteredBranches(ctx context.Context) ([]*git.Branch, error) {\n\tgitilesBranches, err := r.gitiles.Branches(ctx)\n\tif err != nil {\n\t\treturn nil, skerr.Wrap(err)\n\t}\n\t// Filter by includeBranches.\n\tnumBranches := len(gitilesBranches)\n\tif len(r.includeBranches) > 0 {\n\t\tnumBranches = len(r.includeBranches)\n\t}\n\tbranches := make([]*git.Branch, 0, numBranches)\n\tfor _, branch := range gitilesBranches {\n\t\tif (len(r.includeBranches) == 0 || util.In(branch.Name, r.includeBranches)) && (len(r.excludeBranches) == 0 || !util.In(branch.Name, r.excludeBranches)) {\n\t\t\tbranches = append(branches, branch)\n\t\t}\n\t}\n\treturn branches, nil\n}", "func (g *V3) ListBranches(repo string) ([]string, error) {\n\tbranches, resp, err := g.client.Branches.ListBranches(repo)\n\tif err != nil {\n\t\tlog.Errorf(\"Fail to list branches for %s\", repo)\n\t\treturn nil, convertGitlabError(err, resp)\n\t}\n\n\tbranchNames := make([]string, len(branches))\n\tfor i, branch := range branches {\n\t\tbranchNames[i] = branch.Name\n\t}\n\n\treturn branchNames, nil\n}", "func GetBranches(dbOwner, dbFolder, dbName string) (branches map[string]BranchEntry, err error) {\n\tdbQuery := `\n\t\tSELECT db.branch_heads\n\t\tFROM sqlite_databases AS db\n\t\tWHERE db.user_id = (\n\t\t\t\tSELECT user_id\n\t\t\t\tFROM users\n\t\t\t\tWHERE lower(user_name) = lower($1)\n\t\t\t)\n\t\t\tAND db.folder = $2\n\t\t\tAND db.db_name = $3`\n\terr = pdb.QueryRow(dbQuery, dbOwner, dbFolder, dbName).Scan(&branches)\n\tif err != nil {\n\t\tlog.Printf(\"Error when retrieving branch heads for database '%s%s%s': %v\\n\", dbOwner, dbFolder, dbName,\n\t\t\terr)\n\t\treturn nil, err\n\t}\n\treturn branches, nil\n}", "func Branches(etcdClient *etcd.Client, etcdPrefix string, repo string) col.Collection {\n\treturn col.NewCollection(\n\t\tetcdClient,\n\t\tpath.Join(etcdPrefix, branchesPrefix, repo),\n\t\tnil,\n\t\t&pfs.BranchInfo{},\n\t\tfunc(key string) error {\n\t\t\tif uuid.IsUUIDWithoutDashes(key) {\n\t\t\t\treturn fmt.Errorf(\"branch name cannot be a UUID V4\")\n\t\t\t}\n\t\t\treturn nil\n\t\t},\n\t\tnil,\n\t)\n}", "func (handler *InitHandler) handleBranches(c Community, r Repository) error {\n\t// if the branches are defined in the repositories, it means that\n\t// all the branches defined in the community will not inherited by repositories\n\tmapBranches := make(map[string]string)\n\n\tif len(r.ProtectedBranches) > 0 {\n\t\t// using repository branches\n\t\tglog.Infof(\"using repository branches: %s\", *r.Name)\n\t\tfor _, b := range r.ProtectedBranches {\n\t\t\tmapBranches[b] = b\n\t\t}\n\t} else {\n\t\t// using community branches\n\t\tglog.Infof(\"using community branches: %s\", *r.Name)\n\t\tfor _, b := range c.ProtectedBranches {\n\t\t\tmapBranches[b] = b\n\t\t}\n\t}\n\n\t// get branches from DB\n\tvar bs []database.Branches\n\terr := database.DBConnection.Model(&database.Branches{}).\n\t\tWhere(\"owner = ? and repo = ?\", c.Name, r.Name).Find(&bs).Error\n\tif err != nil {\n\t\tglog.Errorf(\"unable to get branches: %v\", err)\n\t\treturn err\n\t}\n\tmapBranchesInDB := make(map[string]string)\n\tfor _, b := range bs {\n\t\tmapBranchesInDB[b.Name] = strconv.Itoa(int(b.ID))\n\t}\n\n\t// un-protected branches\n\terr = handler.removeBranchProtections(c, r, mapBranches, mapBranchesInDB)\n\tif err != nil {\n\t\tglog.Errorf(\"unable to un-protected branches: %v\", err)\n\t}\n\n\t// protected branches\n\terr = handler.addBranchProtections(c, r, mapBranches, mapBranchesInDB)\n\tif err != nil {\n\t\tglog.Errorf(\"unable to protected branches: %v\", err)\n\t}\n\n\treturn nil\n}", "func CreateBranchProtection(ctx *context.APIContext) {\n\t// swagger:operation POST /repos/{owner}/{repo}/branch_protections repository repoCreateBranchProtection\n\t// ---\n\t// summary: Create a branch protections for a repository\n\t// consumes:\n\t// - application/json\n\t// produces:\n\t// - application/json\n\t// parameters:\n\t// - name: owner\n\t// in: path\n\t// description: owner of the repo\n\t// type: string\n\t// required: true\n\t// - name: repo\n\t// in: path\n\t// description: name of the repo\n\t// type: string\n\t// required: true\n\t// - name: body\n\t// in: body\n\t// schema:\n\t// \"$ref\": \"#/definitions/CreateBranchProtectionOption\"\n\t// responses:\n\t// \"201\":\n\t// \"$ref\": \"#/responses/BranchProtection\"\n\t// \"403\":\n\t// \"$ref\": \"#/responses/forbidden\"\n\t// \"404\":\n\t// \"$ref\": \"#/responses/notFound\"\n\t// \"422\":\n\t// \"$ref\": \"#/responses/validationError\"\n\n\tform := web.GetForm(ctx).(*api.CreateBranchProtectionOption)\n\trepo := ctx.Repo.Repository\n\n\truleName := form.RuleName\n\tif ruleName == \"\" {\n\t\truleName = form.BranchName //nolint\n\t}\n\tif len(ruleName) == 0 {\n\t\tctx.Error(http.StatusBadRequest, \"both rule_name and branch_name are empty\", \"both rule_name and branch_name are empty\")\n\t\treturn\n\t}\n\n\tisPlainRule := !git_model.IsRuleNameSpecial(ruleName)\n\tvar isBranchExist bool\n\tif isPlainRule {\n\t\tisBranchExist = git.IsBranchExist(ctx.Req.Context(), ctx.Repo.Repository.RepoPath(), ruleName)\n\t}\n\n\tprotectBranch, err := git_model.GetProtectedBranchRuleByName(ctx, repo.ID, ruleName)\n\tif err != nil {\n\t\tctx.Error(http.StatusInternalServerError, \"GetProtectBranchOfRepoByName\", err)\n\t\treturn\n\t} else if protectBranch != nil {\n\t\tctx.Error(http.StatusForbidden, \"Create branch protection\", \"Branch protection already exist\")\n\t\treturn\n\t}\n\n\tvar requiredApprovals int64\n\tif form.RequiredApprovals > 0 {\n\t\trequiredApprovals = form.RequiredApprovals\n\t}\n\n\twhitelistUsers, err := user_model.GetUserIDsByNames(ctx, form.PushWhitelistUsernames, false)\n\tif err != nil {\n\t\tif user_model.IsErrUserNotExist(err) {\n\t\t\tctx.Error(http.StatusUnprocessableEntity, \"User does not exist\", err)\n\t\t\treturn\n\t\t}\n\t\tctx.Error(http.StatusInternalServerError, \"GetUserIDsByNames\", err)\n\t\treturn\n\t}\n\tmergeWhitelistUsers, err := user_model.GetUserIDsByNames(ctx, form.MergeWhitelistUsernames, false)\n\tif err != nil {\n\t\tif user_model.IsErrUserNotExist(err) {\n\t\t\tctx.Error(http.StatusUnprocessableEntity, \"User does not exist\", err)\n\t\t\treturn\n\t\t}\n\t\tctx.Error(http.StatusInternalServerError, \"GetUserIDsByNames\", err)\n\t\treturn\n\t}\n\tapprovalsWhitelistUsers, err := user_model.GetUserIDsByNames(ctx, form.ApprovalsWhitelistUsernames, false)\n\tif err != nil {\n\t\tif user_model.IsErrUserNotExist(err) {\n\t\t\tctx.Error(http.StatusUnprocessableEntity, \"User does not exist\", err)\n\t\t\treturn\n\t\t}\n\t\tctx.Error(http.StatusInternalServerError, \"GetUserIDsByNames\", err)\n\t\treturn\n\t}\n\tvar whitelistTeams, mergeWhitelistTeams, approvalsWhitelistTeams []int64\n\tif repo.Owner.IsOrganization() {\n\t\twhitelistTeams, err = organization.GetTeamIDsByNames(repo.OwnerID, form.PushWhitelistTeams, false)\n\t\tif err != nil {\n\t\t\tif organization.IsErrTeamNotExist(err) {\n\t\t\t\tctx.Error(http.StatusUnprocessableEntity, \"Team does not exist\", err)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tctx.Error(http.StatusInternalServerError, \"GetTeamIDsByNames\", err)\n\t\t\treturn\n\t\t}\n\t\tmergeWhitelistTeams, err = organization.GetTeamIDsByNames(repo.OwnerID, form.MergeWhitelistTeams, false)\n\t\tif err != nil {\n\t\t\tif organization.IsErrTeamNotExist(err) {\n\t\t\t\tctx.Error(http.StatusUnprocessableEntity, \"Team does not exist\", err)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tctx.Error(http.StatusInternalServerError, \"GetTeamIDsByNames\", err)\n\t\t\treturn\n\t\t}\n\t\tapprovalsWhitelistTeams, err = organization.GetTeamIDsByNames(repo.OwnerID, form.ApprovalsWhitelistTeams, false)\n\t\tif err != nil {\n\t\t\tif organization.IsErrTeamNotExist(err) {\n\t\t\t\tctx.Error(http.StatusUnprocessableEntity, \"Team does not exist\", err)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tctx.Error(http.StatusInternalServerError, \"GetTeamIDsByNames\", err)\n\t\t\treturn\n\t\t}\n\t}\n\n\tprotectBranch = &git_model.ProtectedBranch{\n\t\tRepoID: ctx.Repo.Repository.ID,\n\t\tRuleName: ruleName,\n\t\tCanPush: form.EnablePush,\n\t\tEnableWhitelist: form.EnablePush && form.EnablePushWhitelist,\n\t\tEnableMergeWhitelist: form.EnableMergeWhitelist,\n\t\tWhitelistDeployKeys: form.EnablePush && form.EnablePushWhitelist && form.PushWhitelistDeployKeys,\n\t\tEnableStatusCheck: form.EnableStatusCheck,\n\t\tStatusCheckContexts: form.StatusCheckContexts,\n\t\tEnableApprovalsWhitelist: form.EnableApprovalsWhitelist,\n\t\tRequiredApprovals: requiredApprovals,\n\t\tBlockOnRejectedReviews: form.BlockOnRejectedReviews,\n\t\tBlockOnOfficialReviewRequests: form.BlockOnOfficialReviewRequests,\n\t\tDismissStaleApprovals: form.DismissStaleApprovals,\n\t\tRequireSignedCommits: form.RequireSignedCommits,\n\t\tProtectedFilePatterns: form.ProtectedFilePatterns,\n\t\tUnprotectedFilePatterns: form.UnprotectedFilePatterns,\n\t\tBlockOnOutdatedBranch: form.BlockOnOutdatedBranch,\n\t}\n\n\terr = git_model.UpdateProtectBranch(ctx, ctx.Repo.Repository, protectBranch, git_model.WhitelistOptions{\n\t\tUserIDs: whitelistUsers,\n\t\tTeamIDs: whitelistTeams,\n\t\tMergeUserIDs: mergeWhitelistUsers,\n\t\tMergeTeamIDs: mergeWhitelistTeams,\n\t\tApprovalsUserIDs: approvalsWhitelistUsers,\n\t\tApprovalsTeamIDs: approvalsWhitelistTeams,\n\t})\n\tif err != nil {\n\t\tctx.Error(http.StatusInternalServerError, \"UpdateProtectBranch\", err)\n\t\treturn\n\t}\n\n\tif isBranchExist {\n\t\tif err = pull_service.CheckPRsForBaseBranch(ctx, ctx.Repo.Repository, ruleName); err != nil {\n\t\t\tctx.Error(http.StatusInternalServerError, \"CheckPRsForBaseBranch\", err)\n\t\t\treturn\n\t\t}\n\t} else {\n\t\tif !isPlainRule {\n\t\t\tif ctx.Repo.GitRepo == nil {\n\t\t\t\tctx.Repo.GitRepo, err = git.OpenRepository(ctx, ctx.Repo.Repository.RepoPath())\n\t\t\t\tif err != nil {\n\t\t\t\t\tctx.Error(http.StatusInternalServerError, \"OpenRepository\", err)\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\tdefer func() {\n\t\t\t\t\tctx.Repo.GitRepo.Close()\n\t\t\t\t\tctx.Repo.GitRepo = nil\n\t\t\t\t}()\n\t\t\t}\n\t\t\t// FIXME: since we only need to recheck files protected rules, we could improve this\n\t\t\tmatchedBranches, err := git_model.FindAllMatchedBranches(ctx, ctx.Repo.Repository.ID, ruleName)\n\t\t\tif err != nil {\n\t\t\t\tctx.Error(http.StatusInternalServerError, \"FindAllMatchedBranches\", err)\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tfor _, branchName := range matchedBranches {\n\t\t\t\tif err = pull_service.CheckPRsForBaseBranch(ctx, ctx.Repo.Repository, branchName); err != nil {\n\t\t\t\t\tctx.Error(http.StatusInternalServerError, \"CheckPRsForBaseBranch\", err)\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\n\t// Reload from db to get all whitelists\n\tbp, err := git_model.GetProtectedBranchRuleByName(ctx, ctx.Repo.Repository.ID, ruleName)\n\tif err != nil {\n\t\tctx.Error(http.StatusInternalServerError, \"GetProtectedBranchByID\", err)\n\t\treturn\n\t}\n\tif bp == nil || bp.RepoID != ctx.Repo.Repository.ID {\n\t\tctx.Error(http.StatusInternalServerError, \"New branch protection not found\", err)\n\t\treturn\n\t}\n\n\tctx.JSON(http.StatusCreated, convert.ToBranchProtection(bp))\n}", "func (repo *HelmRepoRepository) ListHelmReposByProjectID(\n\tprojectID uint,\n) ([]*models.HelmRepo, error) {\n\thrs := []*models.HelmRepo{}\n\n\tif err := repo.db.Preload(\"TokenCache\").Where(\"project_id = ?\", projectID).Find(&hrs).Error; err != nil {\n\t\treturn nil, err\n\t}\n\n\tfor _, hr := range hrs {\n\t\trepo.DecryptHelmRepoData(hr, repo.key)\n\t}\n\n\treturn hrs, nil\n}", "func (c *client) GetBranchProtection(org, repo, branch string) (*BranchProtection, error) {\n\tdurationLogger := c.log(\"GetBranchProtection\", org, repo, branch)\n\tdefer durationLogger()\n\n\tcode, body, err := c.requestRaw(&request{\n\t\tmethod: http.MethodGet,\n\t\tpath: fmt.Sprintf(\"/repos/%s/%s/branches/%s/protection\", org, repo, branch),\n\t\torg: org,\n\t\t// GitHub returns 404 for this call if either:\n\t\t// - The branch is not protected\n\t\t// - The access token used does not have sufficient privileges\n\t\t// We therefore need to introspect the response body.\n\t\texitCodes: []int{200, 404},\n\t})\n\n\tswitch {\n\tcase err != nil:\n\t\treturn nil, err\n\tcase code == 200:\n\t\tvar bp BranchProtection\n\t\tif err := json.Unmarshal(body, &bp); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn &bp, nil\n\tcase code == 404:\n\t\t// continue\n\tdefault:\n\t\treturn nil, fmt.Errorf(\"unexpected status code: %d\", code)\n\t}\n\n\tvar ge githubError\n\tif err := json.Unmarshal(body, &ge); err != nil {\n\t\treturn nil, err\n\t}\n\n\t// If the error was because the branch is not protected, we return a\n\t// nil pointer to indicate this.\n\tif ge.Message == \"Branch not protected\" {\n\t\treturn nil, nil\n\t}\n\n\t// Otherwise we got some other 404 error.\n\treturn nil, fmt.Errorf(\"getting branch protection 404: %s\", ge.Message)\n}", "func (a *RepoAPI) getBranches(params interface{}) (resp *rpc.Response) {\n\tm := objx.New(cast.ToStringMap(params))\n\treturn rpc.Success(util.Map{\"branches\": a.mods.Repo.GetBranches(m.Get(\"name\").Str())})\n}", "func FindCommitsByRepoID(ctx context.Context, db DB, value string) ([]*Commit, error) {\n\tq := \"SELECT `commit`.`id`,`commit`.`checksum`,`commit`.`repo_id`,`commit`.`sha`,`commit`.`branch`,`commit`.`message`,`commit`.`mergecommit`,`commit`.`excluded`,`commit`.`parent`,`commit`.`parent_id`,`commit`.`date`,`commit`.`author_user_id`,`commit`.`committer_user_id`,`commit`.`ordinal`,`commit`.`customer_id`,`commit`.`ref_type`,`commit`.`ref_id`,`commit`.`metadata` FROM `commit` WHERE `repo_id` = ? LIMIT 1\"\n\trows, err := db.QueryContext(ctx, q, orm.ToSQLString(value))\n\tif err == sql.ErrNoRows {\n\t\treturn nil, nil\n\t}\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer rows.Close()\n\tresults := make([]*Commit, 0)\n\tfor rows.Next() {\n\t\tvar _ID sql.NullString\n\t\tvar _Checksum sql.NullString\n\t\tvar _RepoID sql.NullString\n\t\tvar _Sha sql.NullString\n\t\tvar _Branch sql.NullString\n\t\tvar _Message sql.NullString\n\t\tvar _Mergecommit sql.NullBool\n\t\tvar _Excluded sql.NullBool\n\t\tvar _Parent sql.NullString\n\t\tvar _ParentID sql.NullString\n\t\tvar _Date sql.NullInt64\n\t\tvar _AuthorUserID sql.NullString\n\t\tvar _CommitterUserID sql.NullString\n\t\tvar _Ordinal sql.NullInt64\n\t\tvar _CustomerID sql.NullString\n\t\tvar _RefType sql.NullString\n\t\tvar _RefID sql.NullString\n\t\tvar _Metadata sql.NullString\n\t\terr := rows.Scan(\n\t\t\t&_ID,\n\t\t\t&_Checksum,\n\t\t\t&_RepoID,\n\t\t\t&_Sha,\n\t\t\t&_Branch,\n\t\t\t&_Message,\n\t\t\t&_Mergecommit,\n\t\t\t&_Excluded,\n\t\t\t&_Parent,\n\t\t\t&_ParentID,\n\t\t\t&_Date,\n\t\t\t&_AuthorUserID,\n\t\t\t&_CommitterUserID,\n\t\t\t&_Ordinal,\n\t\t\t&_CustomerID,\n\t\t\t&_RefType,\n\t\t\t&_RefID,\n\t\t\t&_Metadata,\n\t\t)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tt := &Commit{}\n\t\tif _ID.Valid {\n\t\t\tt.SetID(_ID.String)\n\t\t}\n\t\tif _Checksum.Valid {\n\t\t\tt.SetChecksum(_Checksum.String)\n\t\t}\n\t\tif _RepoID.Valid {\n\t\t\tt.SetRepoID(_RepoID.String)\n\t\t}\n\t\tif _Sha.Valid {\n\t\t\tt.SetSha(_Sha.String)\n\t\t}\n\t\tif _Branch.Valid {\n\t\t\tt.SetBranch(_Branch.String)\n\t\t}\n\t\tif _Message.Valid {\n\t\t\tt.SetMessage(_Message.String)\n\t\t}\n\t\tif _Mergecommit.Valid {\n\t\t\tt.SetMergecommit(_Mergecommit.Bool)\n\t\t}\n\t\tif _Excluded.Valid {\n\t\t\tt.SetExcluded(_Excluded.Bool)\n\t\t}\n\t\tif _Parent.Valid {\n\t\t\tt.SetParent(_Parent.String)\n\t\t}\n\t\tif _ParentID.Valid {\n\t\t\tt.SetParentID(_ParentID.String)\n\t\t}\n\t\tif _Date.Valid {\n\t\t\tt.SetDate(_Date.Int64)\n\t\t}\n\t\tif _AuthorUserID.Valid {\n\t\t\tt.SetAuthorUserID(_AuthorUserID.String)\n\t\t}\n\t\tif _CommitterUserID.Valid {\n\t\t\tt.SetCommitterUserID(_CommitterUserID.String)\n\t\t}\n\t\tif _Ordinal.Valid {\n\t\t\tt.SetOrdinal(int32(_Ordinal.Int64))\n\t\t}\n\t\tif _CustomerID.Valid {\n\t\t\tt.SetCustomerID(_CustomerID.String)\n\t\t}\n\t\tif _RefType.Valid {\n\t\t\tt.SetRefType(_RefType.String)\n\t\t}\n\t\tif _RefID.Valid {\n\t\t\tt.SetRefID(_RefID.String)\n\t\t}\n\t\tif _Metadata.Valid {\n\t\t\tt.SetMetadata(_Metadata.String)\n\t\t}\n\t\tresults = append(results, t)\n\t}\n\treturn results, nil\n}", "func (c APIClient) ListBranch(repoName string) ([]*pfs.BranchInfo, error) {\n\tbranchInfos, err := c.PfsAPIClient.ListBranch(\n\t\tc.Ctx(),\n\t\t&pfs.ListBranchRequest{\n\t\t\tRepo: NewRepo(repoName),\n\t\t},\n\t)\n\tif err != nil {\n\t\treturn nil, grpcutil.ScrubGRPC(err)\n\t}\n\treturn branchInfos.BranchInfo, nil\n}", "func d4getBranches(node *d4nodeT, branch *d4branchT, parVars *d4partitionVarsT) {\n\t// Load the branch buffer\n\tfor index := 0; index < d4maxNodes; index++ {\n\t\tparVars.branchBuf[index] = node.branch[index]\n\t}\n\tparVars.branchBuf[d4maxNodes] = *branch\n\tparVars.branchCount = d4maxNodes + 1\n\n\t// Calculate rect containing all in the set\n\tparVars.coverSplit = parVars.branchBuf[0].rect\n\tfor index := 1; index < d4maxNodes+1; index++ {\n\t\tparVars.coverSplit = d4combineRect(&parVars.coverSplit, &parVars.branchBuf[index].rect)\n\t}\n\tparVars.coverSplitArea = d4calcRectVolume(&parVars.coverSplit)\n}", "func FindCommitsByRepoIDTx(ctx context.Context, tx Tx, value string) ([]*Commit, error) {\n\tq := \"SELECT `commit`.`id`,`commit`.`checksum`,`commit`.`repo_id`,`commit`.`sha`,`commit`.`branch`,`commit`.`message`,`commit`.`mergecommit`,`commit`.`excluded`,`commit`.`parent`,`commit`.`parent_id`,`commit`.`date`,`commit`.`author_user_id`,`commit`.`committer_user_id`,`commit`.`ordinal`,`commit`.`customer_id`,`commit`.`ref_type`,`commit`.`ref_id`,`commit`.`metadata` FROM `commit` WHERE `repo_id` = ? LIMIT 1\"\n\trows, err := tx.QueryContext(ctx, q, orm.ToSQLString(value))\n\tif err == sql.ErrNoRows {\n\t\treturn nil, nil\n\t}\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer rows.Close()\n\tresults := make([]*Commit, 0)\n\tfor rows.Next() {\n\t\tvar _ID sql.NullString\n\t\tvar _Checksum sql.NullString\n\t\tvar _RepoID sql.NullString\n\t\tvar _Sha sql.NullString\n\t\tvar _Branch sql.NullString\n\t\tvar _Message sql.NullString\n\t\tvar _Mergecommit sql.NullBool\n\t\tvar _Excluded sql.NullBool\n\t\tvar _Parent sql.NullString\n\t\tvar _ParentID sql.NullString\n\t\tvar _Date sql.NullInt64\n\t\tvar _AuthorUserID sql.NullString\n\t\tvar _CommitterUserID sql.NullString\n\t\tvar _Ordinal sql.NullInt64\n\t\tvar _CustomerID sql.NullString\n\t\tvar _RefType sql.NullString\n\t\tvar _RefID sql.NullString\n\t\tvar _Metadata sql.NullString\n\t\terr := rows.Scan(\n\t\t\t&_ID,\n\t\t\t&_Checksum,\n\t\t\t&_RepoID,\n\t\t\t&_Sha,\n\t\t\t&_Branch,\n\t\t\t&_Message,\n\t\t\t&_Mergecommit,\n\t\t\t&_Excluded,\n\t\t\t&_Parent,\n\t\t\t&_ParentID,\n\t\t\t&_Date,\n\t\t\t&_AuthorUserID,\n\t\t\t&_CommitterUserID,\n\t\t\t&_Ordinal,\n\t\t\t&_CustomerID,\n\t\t\t&_RefType,\n\t\t\t&_RefID,\n\t\t\t&_Metadata,\n\t\t)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tt := &Commit{}\n\t\tif _ID.Valid {\n\t\t\tt.SetID(_ID.String)\n\t\t}\n\t\tif _Checksum.Valid {\n\t\t\tt.SetChecksum(_Checksum.String)\n\t\t}\n\t\tif _RepoID.Valid {\n\t\t\tt.SetRepoID(_RepoID.String)\n\t\t}\n\t\tif _Sha.Valid {\n\t\t\tt.SetSha(_Sha.String)\n\t\t}\n\t\tif _Branch.Valid {\n\t\t\tt.SetBranch(_Branch.String)\n\t\t}\n\t\tif _Message.Valid {\n\t\t\tt.SetMessage(_Message.String)\n\t\t}\n\t\tif _Mergecommit.Valid {\n\t\t\tt.SetMergecommit(_Mergecommit.Bool)\n\t\t}\n\t\tif _Excluded.Valid {\n\t\t\tt.SetExcluded(_Excluded.Bool)\n\t\t}\n\t\tif _Parent.Valid {\n\t\t\tt.SetParent(_Parent.String)\n\t\t}\n\t\tif _ParentID.Valid {\n\t\t\tt.SetParentID(_ParentID.String)\n\t\t}\n\t\tif _Date.Valid {\n\t\t\tt.SetDate(_Date.Int64)\n\t\t}\n\t\tif _AuthorUserID.Valid {\n\t\t\tt.SetAuthorUserID(_AuthorUserID.String)\n\t\t}\n\t\tif _CommitterUserID.Valid {\n\t\t\tt.SetCommitterUserID(_CommitterUserID.String)\n\t\t}\n\t\tif _Ordinal.Valid {\n\t\t\tt.SetOrdinal(int32(_Ordinal.Int64))\n\t\t}\n\t\tif _CustomerID.Valid {\n\t\t\tt.SetCustomerID(_CustomerID.String)\n\t\t}\n\t\tif _RefType.Valid {\n\t\t\tt.SetRefType(_RefType.String)\n\t\t}\n\t\tif _RefID.Valid {\n\t\t\tt.SetRefID(_RefID.String)\n\t\t}\n\t\tif _Metadata.Valid {\n\t\t\tt.SetMetadata(_Metadata.String)\n\t\t}\n\t\tresults = append(results, t)\n\t}\n\treturn results, nil\n}", "func d12getBranches(node *d12nodeT, branch *d12branchT, parVars *d12partitionVarsT) {\n\t// Load the branch buffer\n\tfor index := 0; index < d12maxNodes; index++ {\n\t\tparVars.branchBuf[index] = node.branch[index]\n\t}\n\tparVars.branchBuf[d12maxNodes] = *branch\n\tparVars.branchCount = d12maxNodes + 1\n\n\t// Calculate rect containing all in the set\n\tparVars.coverSplit = parVars.branchBuf[0].rect\n\tfor index := 1; index < d12maxNodes+1; index++ {\n\t\tparVars.coverSplit = d12combineRect(&parVars.coverSplit, &parVars.branchBuf[index].rect)\n\t}\n\tparVars.coverSplitArea = d12calcRectVolume(&parVars.coverSplit)\n}", "func (r *Bitbucket) GetRepos(user *model.User) ([]*model.Repo, error) {\n\tvar repos []*model.Repo\n\tvar client = bitbucket.New(\n\t\tr.Client,\n\t\tr.Secret,\n\t\tuser.Access,\n\t\tuser.Secret,\n\t)\n\tvar list, err = client.Repos.List()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar remote = r.GetKind()\n\tvar hostname = r.GetHost()\n\n\tfor _, item := range list {\n\t\t// for now we only support git repos\n\t\tif item.Scm != \"git\" {\n\t\t\tcontinue\n\t\t}\n\n\t\t// these are the urls required to clone the repository\n\t\t// TODO use the bitbucketurl.Host and bitbucketurl.Scheme instead of hardcoding\n\t\t// so that we can support Stash.\n\t\tvar html = fmt.Sprintf(\"https://bitbucket.org/%s/%s\", item.Owner, item.Slug)\n\t\tvar clone = fmt.Sprintf(\"https://bitbucket.org/%s/%s.git\", item.Owner, item.Slug)\n\t\tvar ssh = fmt.Sprintf(\"git@bitbucket.org:%s/%s.git\", item.Owner, item.Slug)\n\n\t\tvar repo = model.Repo{\n\t\t\tUserID: user.ID,\n\t\t\tRemote: remote,\n\t\t\tHost: hostname,\n\t\t\tOwner: item.Owner,\n\t\t\tName: item.Slug,\n\t\t\tPrivate: item.Private,\n\t\t\tURL: html,\n\t\t\tCloneURL: clone,\n\t\t\tGitURL: clone,\n\t\t\tSSHURL: ssh,\n\t\t\tRole: &model.Perm{\n\t\t\t\tAdmin: true,\n\t\t\t\tWrite: true,\n\t\t\t\tRead: true,\n\t\t\t},\n\t\t}\n\n\t\tif repo.Private {\n\t\t\trepo.CloneURL = repo.SSHURL\n\t\t}\n\n\t\trepos = append(repos, &repo)\n\t}\n\n\treturn repos, err\n}", "func (app *App) HandleGetBranches(w http.ResponseWriter, r *http.Request) {\n\n\tclient, err := app.githubAppClientFromRequest(r)\n\n\tif err != nil {\n\t\tapp.handleErrorInternal(err, w)\n\t\treturn\n\t}\n\n\towner := chi.URLParam(r, \"owner\")\n\tname := chi.URLParam(r, \"name\")\n\n\t// List all branches for a specified repo\n\tallBranches, resp, err := client.Repositories.ListBranches(context.Background(), owner, name, &github.ListOptions{\n\t\tPerPage: 100,\n\t})\n\n\tif err != nil {\n\t\tapp.handleErrorInternal(err, w)\n\t\treturn\n\t}\n\n\t// make workers to get branches concurrently\n\tconst WCOUNT = 5\n\tnumPages := resp.LastPage + 1\n\tvar workerErr error\n\tvar mu sync.Mutex\n\tvar wg sync.WaitGroup\n\n\tworker := func(cp int) {\n\t\tdefer wg.Done()\n\n\t\tfor cp < numPages {\n\t\t\topts := &github.ListOptions{\n\t\t\t\tPage: cp,\n\t\t\t\tPerPage: 100,\n\t\t\t}\n\n\t\t\tbranches, _, err := client.Repositories.ListBranches(context.Background(), owner, name, opts)\n\n\t\t\tif err != nil {\n\t\t\t\tmu.Lock()\n\t\t\t\tworkerErr = err\n\t\t\t\tmu.Unlock()\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tmu.Lock()\n\t\t\tallBranches = append(allBranches, branches...)\n\t\t\tmu.Unlock()\n\n\t\t\tcp += WCOUNT\n\t\t}\n\t}\n\n\tvar numJobs int\n\tif numPages > WCOUNT {\n\t\tnumJobs = WCOUNT\n\t} else {\n\t\tnumJobs = numPages\n\t}\n\n\twg.Add(numJobs)\n\n\t// page 1 is already loaded so we start with 2\n\tfor i := 1; i <= numJobs; i++ {\n\t\tgo worker(i + 1)\n\t}\n\n\twg.Wait()\n\n\tif workerErr != nil {\n\t\tapp.handleErrorInternal(workerErr, w)\n\t\treturn\n\t}\n\n\tres := make([]string, 0)\n\tfor _, b := range allBranches {\n\t\tres = append(res, b.GetName())\n\t}\n\n\tjson.NewEncoder(w).Encode(res)\n}", "func GetBranchProtection(ctx *pulumi.Context,\n\tname string, id pulumi.IDInput, state *BranchProtectionState, opts ...pulumi.ResourceOption) (*BranchProtection, error) {\n\tvar resource BranchProtection\n\terr := ctx.ReadResource(\"gitlab:index/branchProtection:BranchProtection\", name, id, state, &resource, opts...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &resource, nil\n}", "func (u *URL) SplitRepoPath() []string {\n\treturn strings.Split(u.RepoPath, \"/\")\n}", "func DeleteBranchProtection(ctx *context.APIContext) {\n\t// swagger:operation DELETE /repos/{owner}/{repo}/branch_protections/{name} repository repoDeleteBranchProtection\n\t// ---\n\t// summary: Delete a specific branch protection for the repository\n\t// produces:\n\t// - application/json\n\t// parameters:\n\t// - name: owner\n\t// in: path\n\t// description: owner of the repo\n\t// type: string\n\t// required: true\n\t// - name: repo\n\t// in: path\n\t// description: name of the repo\n\t// type: string\n\t// required: true\n\t// - name: name\n\t// in: path\n\t// description: name of protected branch\n\t// type: string\n\t// required: true\n\t// responses:\n\t// \"204\":\n\t// \"$ref\": \"#/responses/empty\"\n\t// \"404\":\n\t// \"$ref\": \"#/responses/notFound\"\n\n\trepo := ctx.Repo.Repository\n\tbpName := ctx.Params(\":name\")\n\tbp, err := git_model.GetProtectedBranchRuleByName(ctx, repo.ID, bpName)\n\tif err != nil {\n\t\tctx.Error(http.StatusInternalServerError, \"GetProtectedBranchByID\", err)\n\t\treturn\n\t}\n\tif bp == nil || bp.RepoID != repo.ID {\n\t\tctx.NotFound()\n\t\treturn\n\t}\n\n\tif err := git_model.DeleteProtectedBranch(ctx, ctx.Repo.Repository.ID, bp.ID); err != nil {\n\t\tctx.Error(http.StatusInternalServerError, \"DeleteProtectedBranch\", err)\n\t\treturn\n\t}\n\n\tctx.Status(http.StatusNoContent)\n}", "func (c APIClient) ListCommitByRepo(repoName string) ([]*pfs.CommitInfo, error) {\n\treturn c.ListCommit(repoName, \"\", \"\", 0)\n}", "func EditBranchProtection(ctx *context.APIContext) {\n\t// swagger:operation PATCH /repos/{owner}/{repo}/branch_protections/{name} repository repoEditBranchProtection\n\t// ---\n\t// summary: Edit a branch protections for a repository. Only fields that are set will be changed\n\t// consumes:\n\t// - application/json\n\t// produces:\n\t// - application/json\n\t// parameters:\n\t// - name: owner\n\t// in: path\n\t// description: owner of the repo\n\t// type: string\n\t// required: true\n\t// - name: repo\n\t// in: path\n\t// description: name of the repo\n\t// type: string\n\t// required: true\n\t// - name: name\n\t// in: path\n\t// description: name of protected branch\n\t// type: string\n\t// required: true\n\t// - name: body\n\t// in: body\n\t// schema:\n\t// \"$ref\": \"#/definitions/EditBranchProtectionOption\"\n\t// responses:\n\t// \"200\":\n\t// \"$ref\": \"#/responses/BranchProtection\"\n\t// \"404\":\n\t// \"$ref\": \"#/responses/notFound\"\n\t// \"422\":\n\t// \"$ref\": \"#/responses/validationError\"\n\tform := web.GetForm(ctx).(*api.EditBranchProtectionOption)\n\trepo := ctx.Repo.Repository\n\tbpName := ctx.Params(\":name\")\n\tprotectBranch, err := git_model.GetProtectedBranchRuleByName(ctx, repo.ID, bpName)\n\tif err != nil {\n\t\tctx.Error(http.StatusInternalServerError, \"GetProtectedBranchByID\", err)\n\t\treturn\n\t}\n\tif protectBranch == nil || protectBranch.RepoID != repo.ID {\n\t\tctx.NotFound()\n\t\treturn\n\t}\n\n\tif form.EnablePush != nil {\n\t\tif !*form.EnablePush {\n\t\t\tprotectBranch.CanPush = false\n\t\t\tprotectBranch.EnableWhitelist = false\n\t\t\tprotectBranch.WhitelistDeployKeys = false\n\t\t} else {\n\t\t\tprotectBranch.CanPush = true\n\t\t\tif form.EnablePushWhitelist != nil {\n\t\t\t\tif !*form.EnablePushWhitelist {\n\t\t\t\t\tprotectBranch.EnableWhitelist = false\n\t\t\t\t\tprotectBranch.WhitelistDeployKeys = false\n\t\t\t\t} else {\n\t\t\t\t\tprotectBranch.EnableWhitelist = true\n\t\t\t\t\tif form.PushWhitelistDeployKeys != nil {\n\t\t\t\t\t\tprotectBranch.WhitelistDeployKeys = *form.PushWhitelistDeployKeys\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\n\tif form.EnableMergeWhitelist != nil {\n\t\tprotectBranch.EnableMergeWhitelist = *form.EnableMergeWhitelist\n\t}\n\n\tif form.EnableStatusCheck != nil {\n\t\tprotectBranch.EnableStatusCheck = *form.EnableStatusCheck\n\t}\n\n\tif form.StatusCheckContexts != nil {\n\t\tprotectBranch.StatusCheckContexts = form.StatusCheckContexts\n\t}\n\n\tif form.RequiredApprovals != nil && *form.RequiredApprovals >= 0 {\n\t\tprotectBranch.RequiredApprovals = *form.RequiredApprovals\n\t}\n\n\tif form.EnableApprovalsWhitelist != nil {\n\t\tprotectBranch.EnableApprovalsWhitelist = *form.EnableApprovalsWhitelist\n\t}\n\n\tif form.BlockOnRejectedReviews != nil {\n\t\tprotectBranch.BlockOnRejectedReviews = *form.BlockOnRejectedReviews\n\t}\n\n\tif form.BlockOnOfficialReviewRequests != nil {\n\t\tprotectBranch.BlockOnOfficialReviewRequests = *form.BlockOnOfficialReviewRequests\n\t}\n\n\tif form.DismissStaleApprovals != nil {\n\t\tprotectBranch.DismissStaleApprovals = *form.DismissStaleApprovals\n\t}\n\n\tif form.RequireSignedCommits != nil {\n\t\tprotectBranch.RequireSignedCommits = *form.RequireSignedCommits\n\t}\n\n\tif form.ProtectedFilePatterns != nil {\n\t\tprotectBranch.ProtectedFilePatterns = *form.ProtectedFilePatterns\n\t}\n\n\tif form.UnprotectedFilePatterns != nil {\n\t\tprotectBranch.UnprotectedFilePatterns = *form.UnprotectedFilePatterns\n\t}\n\n\tif form.BlockOnOutdatedBranch != nil {\n\t\tprotectBranch.BlockOnOutdatedBranch = *form.BlockOnOutdatedBranch\n\t}\n\n\tvar whitelistUsers []int64\n\tif form.PushWhitelistUsernames != nil {\n\t\twhitelistUsers, err = user_model.GetUserIDsByNames(ctx, form.PushWhitelistUsernames, false)\n\t\tif err != nil {\n\t\t\tif user_model.IsErrUserNotExist(err) {\n\t\t\t\tctx.Error(http.StatusUnprocessableEntity, \"User does not exist\", err)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tctx.Error(http.StatusInternalServerError, \"GetUserIDsByNames\", err)\n\t\t\treturn\n\t\t}\n\t} else {\n\t\twhitelistUsers = protectBranch.WhitelistUserIDs\n\t}\n\tvar mergeWhitelistUsers []int64\n\tif form.MergeWhitelistUsernames != nil {\n\t\tmergeWhitelistUsers, err = user_model.GetUserIDsByNames(ctx, form.MergeWhitelistUsernames, false)\n\t\tif err != nil {\n\t\t\tif user_model.IsErrUserNotExist(err) {\n\t\t\t\tctx.Error(http.StatusUnprocessableEntity, \"User does not exist\", err)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tctx.Error(http.StatusInternalServerError, \"GetUserIDsByNames\", err)\n\t\t\treturn\n\t\t}\n\t} else {\n\t\tmergeWhitelistUsers = protectBranch.MergeWhitelistUserIDs\n\t}\n\tvar approvalsWhitelistUsers []int64\n\tif form.ApprovalsWhitelistUsernames != nil {\n\t\tapprovalsWhitelistUsers, err = user_model.GetUserIDsByNames(ctx, form.ApprovalsWhitelistUsernames, false)\n\t\tif err != nil {\n\t\t\tif user_model.IsErrUserNotExist(err) {\n\t\t\t\tctx.Error(http.StatusUnprocessableEntity, \"User does not exist\", err)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tctx.Error(http.StatusInternalServerError, \"GetUserIDsByNames\", err)\n\t\t\treturn\n\t\t}\n\t} else {\n\t\tapprovalsWhitelistUsers = protectBranch.ApprovalsWhitelistUserIDs\n\t}\n\n\tvar whitelistTeams, mergeWhitelistTeams, approvalsWhitelistTeams []int64\n\tif repo.Owner.IsOrganization() {\n\t\tif form.PushWhitelistTeams != nil {\n\t\t\twhitelistTeams, err = organization.GetTeamIDsByNames(repo.OwnerID, form.PushWhitelistTeams, false)\n\t\t\tif err != nil {\n\t\t\t\tif organization.IsErrTeamNotExist(err) {\n\t\t\t\t\tctx.Error(http.StatusUnprocessableEntity, \"Team does not exist\", err)\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\tctx.Error(http.StatusInternalServerError, \"GetTeamIDsByNames\", err)\n\t\t\t\treturn\n\t\t\t}\n\t\t} else {\n\t\t\twhitelistTeams = protectBranch.WhitelistTeamIDs\n\t\t}\n\t\tif form.MergeWhitelistTeams != nil {\n\t\t\tmergeWhitelistTeams, err = organization.GetTeamIDsByNames(repo.OwnerID, form.MergeWhitelistTeams, false)\n\t\t\tif err != nil {\n\t\t\t\tif organization.IsErrTeamNotExist(err) {\n\t\t\t\t\tctx.Error(http.StatusUnprocessableEntity, \"Team does not exist\", err)\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\tctx.Error(http.StatusInternalServerError, \"GetTeamIDsByNames\", err)\n\t\t\t\treturn\n\t\t\t}\n\t\t} else {\n\t\t\tmergeWhitelistTeams = protectBranch.MergeWhitelistTeamIDs\n\t\t}\n\t\tif form.ApprovalsWhitelistTeams != nil {\n\t\t\tapprovalsWhitelistTeams, err = organization.GetTeamIDsByNames(repo.OwnerID, form.ApprovalsWhitelistTeams, false)\n\t\t\tif err != nil {\n\t\t\t\tif organization.IsErrTeamNotExist(err) {\n\t\t\t\t\tctx.Error(http.StatusUnprocessableEntity, \"Team does not exist\", err)\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\tctx.Error(http.StatusInternalServerError, \"GetTeamIDsByNames\", err)\n\t\t\t\treturn\n\t\t\t}\n\t\t} else {\n\t\t\tapprovalsWhitelistTeams = protectBranch.ApprovalsWhitelistTeamIDs\n\t\t}\n\t}\n\n\terr = git_model.UpdateProtectBranch(ctx, ctx.Repo.Repository, protectBranch, git_model.WhitelistOptions{\n\t\tUserIDs: whitelistUsers,\n\t\tTeamIDs: whitelistTeams,\n\t\tMergeUserIDs: mergeWhitelistUsers,\n\t\tMergeTeamIDs: mergeWhitelistTeams,\n\t\tApprovalsUserIDs: approvalsWhitelistUsers,\n\t\tApprovalsTeamIDs: approvalsWhitelistTeams,\n\t})\n\tif err != nil {\n\t\tctx.Error(http.StatusInternalServerError, \"UpdateProtectBranch\", err)\n\t\treturn\n\t}\n\n\tisPlainRule := !git_model.IsRuleNameSpecial(bpName)\n\tvar isBranchExist bool\n\tif isPlainRule {\n\t\tisBranchExist = git.IsBranchExist(ctx.Req.Context(), ctx.Repo.Repository.RepoPath(), bpName)\n\t}\n\n\tif isBranchExist {\n\t\tif err = pull_service.CheckPRsForBaseBranch(ctx, ctx.Repo.Repository, bpName); err != nil {\n\t\t\tctx.Error(http.StatusInternalServerError, \"CheckPrsForBaseBranch\", err)\n\t\t\treturn\n\t\t}\n\t} else {\n\t\tif !isPlainRule {\n\t\t\tif ctx.Repo.GitRepo == nil {\n\t\t\t\tctx.Repo.GitRepo, err = git.OpenRepository(ctx, ctx.Repo.Repository.RepoPath())\n\t\t\t\tif err != nil {\n\t\t\t\t\tctx.Error(http.StatusInternalServerError, \"OpenRepository\", err)\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\tdefer func() {\n\t\t\t\t\tctx.Repo.GitRepo.Close()\n\t\t\t\t\tctx.Repo.GitRepo = nil\n\t\t\t\t}()\n\t\t\t}\n\n\t\t\t// FIXME: since we only need to recheck files protected rules, we could improve this\n\t\t\tmatchedBranches, err := git_model.FindAllMatchedBranches(ctx, ctx.Repo.Repository.ID, protectBranch.RuleName)\n\t\t\tif err != nil {\n\t\t\t\tctx.Error(http.StatusInternalServerError, \"FindAllMatchedBranches\", err)\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tfor _, branchName := range matchedBranches {\n\t\t\t\tif err = pull_service.CheckPRsForBaseBranch(ctx, ctx.Repo.Repository, branchName); err != nil {\n\t\t\t\t\tctx.Error(http.StatusInternalServerError, \"CheckPrsForBaseBranch\", err)\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\n\t// Reload from db to ensure get all whitelists\n\tbp, err := git_model.GetProtectedBranchRuleByName(ctx, repo.ID, bpName)\n\tif err != nil {\n\t\tctx.Error(http.StatusInternalServerError, \"GetProtectedBranchBy\", err)\n\t\treturn\n\t}\n\tif bp == nil || bp.RepoID != ctx.Repo.Repository.ID {\n\t\tctx.Error(http.StatusInternalServerError, \"New branch protection not found\", err)\n\t\treturn\n\t}\n\n\tctx.JSON(http.StatusOK, convert.ToBranchProtection(bp))\n}", "func (gc *GithubClient) ListCommits(org, repo string, ID int) ([]*github.RepositoryCommit, error) {\n\toptions := &github.ListOptions{}\n\tgenericList, err := gc.depaginate(\n\t\tfmt.Sprintf(\"listing commits in Pull Requests '%d'\", ID),\n\t\tmaxRetryCount,\n\t\toptions,\n\t\tfunc() ([]interface{}, *github.Response, error) {\n\t\t\tpage, resp, err := gc.Client.PullRequests.ListCommits(ctx, org, repo, ID, options)\n\t\t\tvar interfaceList []interface{}\n\t\t\tif nil == err {\n\t\t\t\tfor _, commit := range page {\n\t\t\t\t\tinterfaceList = append(interfaceList, commit)\n\t\t\t\t}\n\t\t\t}\n\t\t\treturn interfaceList, resp, err\n\t\t},\n\t)\n\tres := make([]*github.RepositoryCommit, len(genericList))\n\tfor i, elem := range genericList {\n\t\tres[i] = elem.(*github.RepositoryCommit)\n\t}\n\treturn res, err\n}", "func (client *Client) FetchBranches(ctx context.Context, owner string, repo string) ([]model.Branch, error) {\n\topt := github.ListOptions{}\n\n\titems := []model.Branch{}\n\n\tfor {\n\t\tbranches, resp, err := client.github.Repositories.ListBranches(ctx, owner, repo, &opt)\n\t\tif err != nil {\n\t\t\treturn items, err\n\t\t}\n\n\t\tfor _, branch := range branches {\n\t\t\titems = append(items, model.ConvertBranch(owner, repo, branch))\n\t\t}\n\n\t\tif resp.NextPage == 0 {\n\t\t\treturn items, nil\n\t\t}\n\n\t\topt.Page = resp.NextPage\n\t}\n}", "func d16getBranches(node *d16nodeT, branch *d16branchT, parVars *d16partitionVarsT) {\n\t// Load the branch buffer\n\tfor index := 0; index < d16maxNodes; index++ {\n\t\tparVars.branchBuf[index] = node.branch[index]\n\t}\n\tparVars.branchBuf[d16maxNodes] = *branch\n\tparVars.branchCount = d16maxNodes + 1\n\n\t// Calculate rect containing all in the set\n\tparVars.coverSplit = parVars.branchBuf[0].rect\n\tfor index := 1; index < d16maxNodes+1; index++ {\n\t\tparVars.coverSplit = d16combineRect(&parVars.coverSplit, &parVars.branchBuf[index].rect)\n\t}\n\tparVars.coverSplitArea = d16calcRectVolume(&parVars.coverSplit)\n}", "func d2getBranches(node *d2nodeT, branch *d2branchT, parVars *d2partitionVarsT) {\n\t// Load the branch buffer\n\tfor index := 0; index < d2maxNodes; index++ {\n\t\tparVars.branchBuf[index] = node.branch[index]\n\t}\n\tparVars.branchBuf[d2maxNodes] = *branch\n\tparVars.branchCount = d2maxNodes + 1\n\n\t// Calculate rect containing all in the set\n\tparVars.coverSplit = parVars.branchBuf[0].rect\n\tfor index := 1; index < d2maxNodes+1; index++ {\n\t\tparVars.coverSplit = d2combineRect(&parVars.coverSplit, &parVars.branchBuf[index].rect)\n\t}\n\tparVars.coverSplitArea = d2calcRectVolume(&parVars.coverSplit)\n}", "func (b *BranchList) GetBranches() []*Branch {\n\treturn b.Branches\n}", "func GetBranchesMethods(w *wiki.WikiData) (r entities.Result) {\n\tr.WikiData, _ = wiki.FilterPagesRegs(w, wiki.MakeRegs(false, []string{`\\[\\[Kategorie:VSgS.*\\]\\]`, `\\[\\[Kategorie:Směry, školy, teorie a koncepce sociologického a sociálního myšlení\\]\\]`}))\n\tfor _, p := range r.WikiData.Page {\n\t\tr.Nodes = append(r.Nodes, entities.Node{\"Metody\", p.Title, p.Revision.Text, []string{}, GetLinks(string(p.Revision.Text)), \"\"})\n\t}\n\treturn r\n}", "func (c *singleClient) ListRepository(repo string) ([]string, error) {\n\treturn c.doList(repo, func(repo string, filter ListFilter) (\n\t\ttagmodels.ListResponse, error) {\n\n\t\treturn c.ListRepositoryWithPagination(repo, filter)\n\t})\n}", "func d9getBranches(node *d9nodeT, branch *d9branchT, parVars *d9partitionVarsT) {\n\t// Load the branch buffer\n\tfor index := 0; index < d9maxNodes; index++ {\n\t\tparVars.branchBuf[index] = node.branch[index]\n\t}\n\tparVars.branchBuf[d9maxNodes] = *branch\n\tparVars.branchCount = d9maxNodes + 1\n\n\t// Calculate rect containing all in the set\n\tparVars.coverSplit = parVars.branchBuf[0].rect\n\tfor index := 1; index < d9maxNodes+1; index++ {\n\t\tparVars.coverSplit = d9combineRect(&parVars.coverSplit, &parVars.branchBuf[index].rect)\n\t}\n\tparVars.coverSplitArea = d9calcRectVolume(&parVars.coverSplit)\n}", "func d10getBranches(node *d10nodeT, branch *d10branchT, parVars *d10partitionVarsT) {\n\t// Load the branch buffer\n\tfor index := 0; index < d10maxNodes; index++ {\n\t\tparVars.branchBuf[index] = node.branch[index]\n\t}\n\tparVars.branchBuf[d10maxNodes] = *branch\n\tparVars.branchCount = d10maxNodes + 1\n\n\t// Calculate rect containing all in the set\n\tparVars.coverSplit = parVars.branchBuf[0].rect\n\tfor index := 1; index < d10maxNodes+1; index++ {\n\t\tparVars.coverSplit = d10combineRect(&parVars.coverSplit, &parVars.branchBuf[index].rect)\n\t}\n\tparVars.coverSplitArea = d10calcRectVolume(&parVars.coverSplit)\n}", "func getRepoList(projectID int64) ([]string, error) {\n\t/*\n\t\tuiUser := os.Getenv(\"UI_USR\")\n\t\tif len(uiUser) == 0 {\n\t\t\tuiUser = \"admin\"\n\t\t}\n\t\tuiPwd := os.Getenv(\"UI_PWD\")\n\t\tif len(uiPwd) == 0 {\n\t\t\tuiPwd = \"Harbor12345\"\n\t\t}\n\t*/\n\tuiURL := config.LocalUIURL()\n\tclient := &http.Client{}\n\treq, err := http.NewRequest(\"GET\", uiURL+\"/api/repositories?project_id=\"+strconv.Itoa(int(projectID)), nil)\n\tif err != nil {\n\t\tlog.Errorf(\"Error when creating request: %v\", err)\n\t\treturn nil, err\n\t}\n\t//req.SetBasicAuth(uiUser, uiPwd)\n\treq.AddCookie(&http.Cookie{Name: models.UISecretCookie, Value: config.UISecret()})\n\t//dump, err := httputil.DumpRequest(req, true)\n\t//log.Debugf(\"req: %q\", dump)\n\tresp, err := client.Do(req)\n\tif err != nil {\n\t\tlog.Errorf(\"Error when calling UI api to get repositories, error: %v\", err)\n\t\treturn nil, err\n\t}\n\tdefer resp.Body.Close()\n\tif resp.StatusCode != http.StatusOK {\n\t\tlog.Errorf(\"Unexpected status code: %d\", resp.StatusCode)\n\t\tdump, _ := httputil.DumpResponse(resp, true)\n\t\tlog.Debugf(\"response: %q\", dump)\n\t\treturn nil, fmt.Errorf(\"Unexpected status code when getting repository list: %d\", resp.StatusCode)\n\t}\n\n\tbody, err := ioutil.ReadAll(resp.Body)\n\tif err != nil {\n\t\tlog.Errorf(\"Failed to read the response body, error: %v\", err)\n\t\treturn nil, err\n\t}\n\tvar repoList []string\n\terr = json.Unmarshal(body, &repoList)\n\treturn repoList, err\n}", "func (b *BranchListBuilder) obtainReflogBranches() []*models.Branch {\n\tfoundBranchesMap := map[string]bool{}\n\tre := regexp.MustCompile(`checkout: moving from ([\\S]+) to ([\\S]+)`)\n\treflogBranches := make([]*models.Branch, 0, len(b.ReflogCommits))\n\tfor _, commit := range b.ReflogCommits {\n\t\tif match := re.FindStringSubmatch(commit.Name); len(match) == 3 {\n\t\t\trecency := utils.UnixToTimeAgo(commit.UnixTimestamp)\n\t\t\tfor _, branchName := range match[1:] {\n\t\t\t\tif !foundBranchesMap[branchName] {\n\t\t\t\t\tfoundBranchesMap[branchName] = true\n\t\t\t\t\treflogBranches = append(reflogBranches, &models.Branch{\n\t\t\t\t\t\tRecency: recency,\n\t\t\t\t\t\tName: branchName,\n\t\t\t\t\t})\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\treturn reflogBranches\n}", "func (p GithubRepoHost) AddBranchProtection(repoID string) (BranchProtectionRule, error) {\n\tif isDebug() {\n\t\tfmt.Printf(\"Adding branch protection on %s\\n\", repoID)\n\t}\n\n\trules := fetchBranchProtectionRules()\n\tinput := githubv4.CreateBranchProtectionRuleInput{\n\t\tRepositoryID: repoID,\n\t\tPattern: *githubv4.NewString(githubv4.String(rules.Pattern)),\n\t\tDismissesStaleReviews: githubv4.NewBoolean(githubv4.Boolean(rules.DismissesStaleReviews)),\n\t\tIsAdminEnforced: githubv4.NewBoolean(githubv4.Boolean(rules.IsAdminEnforced)),\n\t\tRequiresApprovingReviews: githubv4.NewBoolean(githubv4.Boolean(rules.RequiresApprovingReviews)),\n\t\tRequiredApprovingReviewCount: githubv4.NewInt(githubv4.Int(rules.RequiredApprovingReviewCount)),\n\t\tRequiresStatusChecks: githubv4.NewBoolean(githubv4.Boolean(rules.RequiresStatusChecks)),\n\t}\n\n\tchecks := make([]githubv4.String, len(rules.RequiredStatusCheckContexts))\n\tfor i, name := range rules.RequiredStatusCheckContexts {\n\t\tchecks[i] = *githubv4.NewString(githubv4.String(name))\n\t}\n\tinput.RequiredStatusCheckContexts = &checks\n\n\tvar m CreateRuleMutation\n\tclient := buildClient()\n\terr := client.Mutate(context.Background(), &m, input, nil)\n\treturn m.CreateBranchProtectionRule.BranchProtectionRule, err\n}", "func (c *client) ListTeamRepos(org string, id int) ([]Repo, error) {\n\tc.logger.WithField(\"methodName\", \"ListTeamRepos\").\n\t\tWarn(\"method is deprecated, and will result in multiple api calls to achieve result\")\n\tdurationLogger := c.log(\"ListTeamRepos\", org, id)\n\tdefer durationLogger()\n\n\tif c.fake {\n\t\treturn nil, nil\n\t}\n\n\torganization, err := c.GetOrg(org)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tpath := fmt.Sprintf(\"/organizations/%d/team/%d/repos\", organization.Id, id)\n\tvar repos []Repo\n\terr = c.readPaginatedResultsWithValues(\n\t\tpath,\n\t\turl.Values{\n\t\t\t\"per_page\": []string{\"100\"},\n\t\t},\n\t\t\"application/vnd.github+json\",\n\t\torg,\n\t\tfunc() interface{} {\n\t\t\treturn &[]Repo{}\n\t\t},\n\t\tfunc(obj interface{}) {\n\t\t\tfor _, repo := range *obj.(*[]Repo) {\n\t\t\t\t// Currently, GitHub API returns false for all permission levels\n\t\t\t\t// for a repo on which the team has 'Maintain' or 'Triage' role.\n\t\t\t\t// This check is to avoid listing a repo under the team but\n\t\t\t\t// showing the permission level as none.\n\t\t\t\tif LevelFromPermissions(repo.Permissions) != None {\n\t\t\t\t\trepos = append(repos, repo)\n\t\t\t\t}\n\t\t\t}\n\t\t},\n\t)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn repos, nil\n}", "func repoBranchesDecode(b []byte) (map[string][]string, error) {\n\t// binaryReader returns strings pointing into b to avoid allocations. We\n\t// don't own b, so we create a copy of it.\n\tr := binaryReader{b: append([]byte{}, b...)}\n\n\t// Version\n\tif v := r.byt(); v != 1 {\n\t\treturn nil, fmt.Errorf(\"unsupported RepoBranches encoding version %d\", v)\n\t}\n\n\t// Length\n\tl := r.uvarint()\n\trepoBranches := make(map[string][]string, l)\n\n\tfor i := 0; i < l; i++ {\n\t\tname := r.str()\n\n\t\tbranchesLen := int(r.byt())\n\n\t\t// Special case \"HEAD\"\n\t\tif branchesLen == 0 {\n\t\t\trepoBranches[name] = head\n\t\t\tcontinue\n\t\t}\n\n\t\tbranches := make([]string, branchesLen)\n\t\tfor j := 0; j < branchesLen; j++ {\n\t\t\tbranches[j] = r.str()\n\t\t}\n\t\trepoBranches[name] = branches\n\t}\n\n\treturn repoBranches, r.err\n}", "func (g *GitRepo) sortBranches() []string {\n\tsorted := []string{\"master\"}\n\tfor _, branch := range g.branches {\n\t\tif branch == \"master\" || branch == \"origin/master\" {\n\t\t\tcontinue\n\t\t}\n\t\tsorted = append(sorted, branch)\n\t}\n\treturn sorted\n}", "func (c *client) ListRepoHooks(org, repo string) ([]Hook, error) {\n\tc.log(\"ListRepoHooks\", org, repo)\n\treturn c.listHooks(org, &repo)\n}", "func (g *GitLocal) RemoteBranches(dir string) ([]string, error) {\n\treturn g.GitCLI.RemoteBranches(dir)\n}", "func d14getBranches(node *d14nodeT, branch *d14branchT, parVars *d14partitionVarsT) {\n\t// Load the branch buffer\n\tfor index := 0; index < d14maxNodes; index++ {\n\t\tparVars.branchBuf[index] = node.branch[index]\n\t}\n\tparVars.branchBuf[d14maxNodes] = *branch\n\tparVars.branchCount = d14maxNodes + 1\n\n\t// Calculate rect containing all in the set\n\tparVars.coverSplit = parVars.branchBuf[0].rect\n\tfor index := 1; index < d14maxNodes+1; index++ {\n\t\tparVars.coverSplit = d14combineRect(&parVars.coverSplit, &parVars.branchBuf[index].rect)\n\t}\n\tparVars.coverSplitArea = d14calcRectVolume(&parVars.coverSplit)\n}", "func d18getBranches(node *d18nodeT, branch *d18branchT, parVars *d18partitionVarsT) {\n\t// Load the branch buffer\n\tfor index := 0; index < d18maxNodes; index++ {\n\t\tparVars.branchBuf[index] = node.branch[index]\n\t}\n\tparVars.branchBuf[d18maxNodes] = *branch\n\tparVars.branchCount = d18maxNodes + 1\n\n\t// Calculate rect containing all in the set\n\tparVars.coverSplit = parVars.branchBuf[0].rect\n\tfor index := 1; index < d18maxNodes+1; index++ {\n\t\tparVars.coverSplit = d18combineRect(&parVars.coverSplit, &parVars.branchBuf[index].rect)\n\t}\n\tparVars.coverSplitArea = d18calcRectVolume(&parVars.coverSplit)\n}", "func GetBranches(web *router.WebRequest) *model.Container {\n\tprojectName := web.GetQueryParam(\"project\")\n\tname := web.GetQueryParam(\"name\")\n\tif projectName == \"\" || name == \"\" {\n\t\treturn model.ErrorResponse(model.MessageItem{\n\t\t\tCode: \"invalid-request\",\n\t\t\tMessage: \"project name is required\",\n\t\t}, 500)\n\t}\n\tlist, err := factory.GetGitClient().ListBranches(projectName, name)\n\tif err != nil {\n\t\treturn model.ErrorResponse(model.MessageItem{\n\t\t\tCode: \"list-tag-error\",\n\t\t\tMessage: err.Error(),\n\t\t}, 500)\n\t}\n\tdata := make([]interface{}, 0)\n\tfor _, item := range list {\n\t\tdata = append(data, item)\n\t}\n\treturn model.ListResponse(data)\n}", "func UpdateBranchProtection() error {\n\tvar wg sync.WaitGroup\n\trequests, err := getBranchProtectionRequests()\n\n\tif err != nil {\n\t\treturn err\n\t}\n\n\twg.Add(len(requests))\n\towner, repo := getOwnerRepo()\n\n\tfor _, bp := range requests {\n\t\tgo func(bp BranchProtection) {\n\t\t\tdefer wg.Done()\n\t\t\t_, _, err := cli.Repositories.UpdateBranchProtection(ctx, owner, repo, bp.Branch, bp.Protection)\n\n\t\t\tif err != nil {\n\t\t\t\tlog.Fatal(err)\n\t\t\t}\n\n\t\t\t_, err = fmt.Fprintln(writer, fmt.Sprintf(\"branch %v has been protected\", bp.Branch))\n\n\t\t\tif err != nil {\n\t\t\t\tlog.Fatal(err)\n\t\t\t}\n\t\t}(bp)\n\t}\n\n\twg.Wait()\n\n\treturn nil\n}", "func (mr *MockRepositoryClientMockRecorder) GetBranches(org, repo, onlyProtected interface{}) *gomock.Call {\n\tmr.mock.ctrl.T.Helper()\n\treturn mr.mock.ctrl.RecordCallWithMethodType(mr.mock, \"GetBranches\", reflect.TypeOf((*MockRepositoryClient)(nil).GetBranches), org, repo, onlyProtected)\n}", "func GetBotsForRepo(repo string) ([]string, error) {\n\tcfg, err := GetProjectConfig(repo)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn GetBotsForConfig(cfg), nil\n}", "func d20getBranches(node *d20nodeT, branch *d20branchT, parVars *d20partitionVarsT) {\n\t// Load the branch buffer\n\tfor index := 0; index < d20maxNodes; index++ {\n\t\tparVars.branchBuf[index] = node.branch[index]\n\t}\n\tparVars.branchBuf[d20maxNodes] = *branch\n\tparVars.branchCount = d20maxNodes + 1\n\n\t// Calculate rect containing all in the set\n\tparVars.coverSplit = parVars.branchBuf[0].rect\n\tfor index := 1; index < d20maxNodes+1; index++ {\n\t\tparVars.coverSplit = d20combineRect(&parVars.coverSplit, &parVars.branchBuf[index].rect)\n\t}\n\tparVars.coverSplitArea = d20calcRectVolume(&parVars.coverSplit)\n}", "func d19getBranches(node *d19nodeT, branch *d19branchT, parVars *d19partitionVarsT) {\n\t// Load the branch buffer\n\tfor index := 0; index < d19maxNodes; index++ {\n\t\tparVars.branchBuf[index] = node.branch[index]\n\t}\n\tparVars.branchBuf[d19maxNodes] = *branch\n\tparVars.branchCount = d19maxNodes + 1\n\n\t// Calculate rect containing all in the set\n\tparVars.coverSplit = parVars.branchBuf[0].rect\n\tfor index := 1; index < d19maxNodes+1; index++ {\n\t\tparVars.coverSplit = d19combineRect(&parVars.coverSplit, &parVars.branchBuf[index].rect)\n\t}\n\tparVars.coverSplitArea = d19calcRectVolume(&parVars.coverSplit)\n}", "func UpdateOrgProtectBranch(repo *Repository, protectBranch *ProtectBranch, whitelistUserIDs, whitelistTeamIDs string) (err error) {\n\tif err = repo.GetOwner(); err != nil {\n\t\treturn fmt.Errorf(\"GetOwner: %v\", err)\n\t} else if !repo.Owner.IsOrganization() {\n\t\treturn fmt.Errorf(\"expect repository owner to be an organization\")\n\t}\n\n\thasUsersChanged := false\n\tvalidUserIDs := tool.StringsToInt64s(strings.Split(protectBranch.WhitelistUserIDs, \",\"))\n\tif protectBranch.WhitelistUserIDs != whitelistUserIDs {\n\t\thasUsersChanged = true\n\t\tuserIDs := tool.StringsToInt64s(strings.Split(whitelistUserIDs, \",\"))\n\t\tvalidUserIDs = make([]int64, 0, len(userIDs))\n\t\tfor _, userID := range userIDs {\n\t\t\tif !Perms.Authorize(context.TODO(), userID, repo.ID, AccessModeWrite,\n\t\t\t\tAccessModeOptions{\n\t\t\t\t\tOwnerID: repo.OwnerID,\n\t\t\t\t\tPrivate: repo.IsPrivate,\n\t\t\t\t},\n\t\t\t) {\n\t\t\t\tcontinue // Drop invalid user ID\n\t\t\t}\n\n\t\t\tvalidUserIDs = append(validUserIDs, userID)\n\t\t}\n\n\t\tprotectBranch.WhitelistUserIDs = strings.Join(tool.Int64sToStrings(validUserIDs), \",\")\n\t}\n\n\thasTeamsChanged := false\n\tvalidTeamIDs := tool.StringsToInt64s(strings.Split(protectBranch.WhitelistTeamIDs, \",\"))\n\tif protectBranch.WhitelistTeamIDs != whitelistTeamIDs {\n\t\thasTeamsChanged = true\n\t\tteamIDs := tool.StringsToInt64s(strings.Split(whitelistTeamIDs, \",\"))\n\t\tteams, err := GetTeamsHaveAccessToRepo(repo.OwnerID, repo.ID, AccessModeWrite)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"GetTeamsHaveAccessToRepo [org_id: %d, repo_id: %d]: %v\", repo.OwnerID, repo.ID, err)\n\t\t}\n\t\tvalidTeamIDs = make([]int64, 0, len(teams))\n\t\tfor i := range teams {\n\t\t\tif teams[i].HasWriteAccess() && com.IsSliceContainsInt64(teamIDs, teams[i].ID) {\n\t\t\t\tvalidTeamIDs = append(validTeamIDs, teams[i].ID)\n\t\t\t}\n\t\t}\n\n\t\tprotectBranch.WhitelistTeamIDs = strings.Join(tool.Int64sToStrings(validTeamIDs), \",\")\n\t}\n\n\t// Make sure protectBranch.ID is not 0 for whitelists\n\tif protectBranch.ID == 0 {\n\t\tif _, err = x.Insert(protectBranch); err != nil {\n\t\t\treturn fmt.Errorf(\"Insert: %v\", err)\n\t\t}\n\t}\n\n\t// Merge users and members of teams\n\tvar whitelists []*ProtectBranchWhitelist\n\tif hasUsersChanged || hasTeamsChanged {\n\t\tmergedUserIDs := make(map[int64]bool)\n\t\tfor _, userID := range validUserIDs {\n\t\t\t// Empty whitelist users can cause an ID with 0\n\t\t\tif userID != 0 {\n\t\t\t\tmergedUserIDs[userID] = true\n\t\t\t}\n\t\t}\n\n\t\tfor _, teamID := range validTeamIDs {\n\t\t\tmembers, err := GetTeamMembers(teamID)\n\t\t\tif err != nil {\n\t\t\t\treturn fmt.Errorf(\"GetTeamMembers [team_id: %d]: %v\", teamID, err)\n\t\t\t}\n\n\t\t\tfor i := range members {\n\t\t\t\tmergedUserIDs[members[i].ID] = true\n\t\t\t}\n\t\t}\n\n\t\twhitelists = make([]*ProtectBranchWhitelist, 0, len(mergedUserIDs))\n\t\tfor userID := range mergedUserIDs {\n\t\t\twhitelists = append(whitelists, &ProtectBranchWhitelist{\n\t\t\t\tProtectBranchID: protectBranch.ID,\n\t\t\t\tRepoID: repo.ID,\n\t\t\t\tName: protectBranch.Name,\n\t\t\t\tUserID: userID,\n\t\t\t})\n\t\t}\n\t}\n\n\tsess := x.NewSession()\n\tdefer sess.Close()\n\tif err = sess.Begin(); err != nil {\n\t\treturn err\n\t}\n\n\tif _, err = sess.ID(protectBranch.ID).AllCols().Update(protectBranch); err != nil {\n\t\treturn fmt.Errorf(\"Update: %v\", err)\n\t}\n\n\t// Refresh whitelists\n\tif hasUsersChanged || hasTeamsChanged {\n\t\tif _, err = sess.Delete(&ProtectBranchWhitelist{ProtectBranchID: protectBranch.ID}); err != nil {\n\t\t\treturn fmt.Errorf(\"delete old protect branch whitelists: %v\", err)\n\t\t} else if _, err = sess.Insert(whitelists); err != nil {\n\t\t\treturn fmt.Errorf(\"insert new protect branch whitelists: %v\", err)\n\t\t}\n\t}\n\n\treturn sess.Commit()\n}", "func handleRepo(ctx context.Context, client *github.Client, repo *github.Repository) error {\n\topt := &github.ListOptions{\n\t\tPerPage: 100,\n\t}\n\n\tteams, resp, err := client.Repositories.ListTeams(ctx, repo.GetOwner().GetLogin(), repo.GetName(), opt)\n\tif resp.StatusCode == http.StatusNotFound || resp.StatusCode == http.StatusForbidden || err != nil {\n\t\tif _, ok := err.(*github.RateLimitError); ok {\n\t\t\treturn err\n\t\t}\n\n\t\treturn nil\n\t}\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tcollabs, resp, err := client.Repositories.ListCollaborators(ctx, repo.GetOwner().GetLogin(), repo.GetName(), &github.ListCollaboratorsOptions{ListOptions: *opt})\n\tif resp.StatusCode == http.StatusNotFound || resp.StatusCode == http.StatusForbidden || err != nil {\n\t\tif _, ok := err.(*github.RateLimitError); ok {\n\t\t\treturn err\n\t\t}\n\n\t\treturn nil\n\t}\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tkeys, resp, err := client.Repositories.ListKeys(ctx, repo.GetOwner().GetLogin(), repo.GetName(), opt)\n\tif resp.StatusCode == http.StatusNotFound || resp.StatusCode == http.StatusForbidden || err != nil {\n\t\tif _, ok := err.(*github.RateLimitError); ok {\n\t\t\treturn err\n\t\t}\n\n\t\treturn nil\n\t}\n\tif err != nil {\n\t\treturn err\n\t}\n\n\thooks, resp, err := client.Repositories.ListHooks(ctx, repo.GetOwner().GetLogin(), repo.GetName(), opt)\n\tif resp.StatusCode == http.StatusNotFound || resp.StatusCode == http.StatusForbidden || err != nil {\n\t\tif _, ok := err.(*github.RateLimitError); ok {\n\t\t\treturn err\n\t\t}\n\n\t\treturn nil\n\t}\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tbranches, _, err := client.Repositories.ListBranches(ctx, repo.GetOwner().GetLogin(), repo.GetName(), opt)\n\tif err != nil {\n\t\treturn err\n\t}\n\tprotectedBranches := []string{}\n\tunprotectedBranches := []string{}\n\tfor _, branch := range branches {\n\t\t// we must get the individual branch for the branch protection to work\n\t\tb, _, err := client.Repositories.GetBranch(ctx, repo.GetOwner().GetLogin(), repo.GetName(), branch.GetName())\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif b.GetProtected() {\n\t\t\tprotectedBranches = append(protectedBranches, b.GetName())\n\t\t} else {\n\t\t\tunprotectedBranches = append(unprotectedBranches, b.GetName())\n\t\t}\n\t}\n\n\t// only print whole status if we have more that one collaborator\n\tif len(collabs) <= 1 && len(keys) < 1 && len(hooks) < 1 && len(protectedBranches) < 1 && len(unprotectedBranches) < 1 {\n\t\treturn nil\n\t}\n\n\toutput := fmt.Sprintf(\"%s -> \\n\", repo.GetFullName())\n\n\tif len(collabs) > 1 {\n\t\tpush := []string{}\n\t\tpull := []string{}\n\t\tadmin := []string{}\n\t\tfor _, c := range collabs {\n\t\t\tuserTeams := []github.Team{}\n\t\t\tfor _, t := range teams {\n\t\t\t\tisMember, resp, err := client.Teams.GetTeamMembership(ctx, t.GetID(), c.GetLogin())\n\t\t\t\tif resp.StatusCode != http.StatusNotFound && resp.StatusCode != http.StatusForbidden && err == nil && isMember.GetState() == \"active\" {\n\t\t\t\t\tuserTeams = append(userTeams, *t)\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tperms := c.GetPermissions()\n\n\t\t\tswitch {\n\t\t\tcase perms[\"admin\"]:\n\t\t\t\tpermTeams := []string{}\n\t\t\t\tfor _, t := range userTeams {\n\t\t\t\t\tif t.GetPermission() == \"admin\" {\n\t\t\t\t\t\tpermTeams = append(permTeams, t.GetName())\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\tadmin = append(admin, fmt.Sprintf(\"\\t\\t\\t%s (teams: %s)\", c.GetLogin(), strings.Join(permTeams, \", \")))\n\t\t\tcase perms[\"push\"]:\n\t\t\t\tpush = append(push, fmt.Sprintf(\"\\t\\t\\t%s\", c.GetLogin()))\n\t\t\tcase perms[\"pull\"]:\n\t\t\t\tpull = append(pull, fmt.Sprintf(\"\\t\\t\\t%s\", c.GetLogin()))\n\t\t\t}\n\t\t}\n\t\toutput += fmt.Sprintf(\"\\tCollaborators (%d):\\n\", len(collabs))\n\t\toutput += fmt.Sprintf(\"\\t\\tAdmin (%d):\\n%s\\n\", len(admin), strings.Join(admin, \"\\n\"))\n\t\toutput += fmt.Sprintf(\"\\t\\tWrite (%d):\\n%s\\n\", len(push), strings.Join(push, \"\\n\"))\n\t\toutput += fmt.Sprintf(\"\\t\\tRead (%d):\\n%s\\n\", len(pull), strings.Join(pull, \"\\n\"))\n\t}\n\n\tif len(keys) > 0 {\n\t\tkstr := []string{}\n\t\tfor _, k := range keys {\n\t\t\tkstr = append(kstr, fmt.Sprintf(\"\\t\\t%s - ro:%t (%s)\", k.GetTitle(), k.GetReadOnly(), k.GetURL()))\n\t\t}\n\t\toutput += fmt.Sprintf(\"\\tKeys (%d):\\n%s\\n\", len(kstr), strings.Join(kstr, \"\\n\"))\n\t}\n\n\tif len(hooks) > 0 {\n\t\thstr := []string{}\n\t\tfor _, h := range hooks {\n\t\t\thstr = append(hstr, fmt.Sprintf(\"\\t\\t%s - active:%t (%s)\", h.GetName(), h.GetActive(), h.GetURL()))\n\t\t}\n\t\toutput += fmt.Sprintf(\"\\tHooks (%d):\\n%s\\n\", len(hstr), strings.Join(hstr, \"\\n\"))\n\t}\n\n\tif len(protectedBranches) > 0 {\n\t\toutput += fmt.Sprintf(\"\\tProtected Branches (%d): %s\\n\", len(protectedBranches), strings.Join(protectedBranches, \", \"))\n\t}\n\n\tif len(unprotectedBranches) > 0 {\n\t\toutput += fmt.Sprintf(\"\\tUnprotected Branches (%d): %s\\n\", len(unprotectedBranches), strings.Join(unprotectedBranches, \", \"))\n\t}\n\n\trepo, _, err = client.Repositories.Get(ctx, repo.GetOwner().GetLogin(), repo.GetName())\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tmergeMethods := \"\\tMerge Methods:\"\n\tif repo.GetAllowMergeCommit() {\n\t\tmergeMethods += \" mergeCommit\"\n\t}\n\tif repo.GetAllowSquashMerge() {\n\t\tmergeMethods += \" squash\"\n\t}\n\tif repo.GetAllowRebaseMerge() {\n\t\tmergeMethods += \" rebase\"\n\t}\n\toutput += mergeMethods + \"\\n\"\n\n\tfmt.Printf(\"%s--\\n\\n\", output)\n\n\treturn nil\n}", "func (t *Commit) GetRepoID() string {\n\treturn t.RepoID\n}", "func NewListRepoTokensForbidden() *ListRepoTokensForbidden {\n\treturn &ListRepoTokensForbidden{}\n}", "func (m *MockGitUseCaseI) GetBranchList(requestUserID *int64, userName, repoName string) (git.BranchSet, error) {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"GetBranchList\", requestUserID, userName, repoName)\n\tret0, _ := ret[0].(git.BranchSet)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}", "func (m *MockRepositoryClient) GetBranchProtection(org, repo, branch string) (*github.BranchProtection, error) {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"GetBranchProtection\", org, repo, branch)\n\tret0, _ := ret[0].(*github.BranchProtection)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}", "func GetBranch(ctx *context.APIContext) {\n\t// swagger:operation GET /repos/{owner}/{repo}/branches/{branch} repository repoGetBranch\n\t// ---\n\t// summary: Retrieve a specific branch from a repository, including its effective branch protection\n\t// produces:\n\t// - application/json\n\t// parameters:\n\t// - name: owner\n\t// in: path\n\t// description: owner of the repo\n\t// type: string\n\t// required: true\n\t// - name: repo\n\t// in: path\n\t// description: name of the repo\n\t// type: string\n\t// required: true\n\t// - name: branch\n\t// in: path\n\t// description: branch to get\n\t// type: string\n\t// required: true\n\t// responses:\n\t// \"200\":\n\t// \"$ref\": \"#/responses/Branch\"\n\t// \"404\":\n\t// \"$ref\": \"#/responses/notFound\"\n\n\tbranchName := ctx.Params(\"*\")\n\n\tbranch, err := ctx.Repo.GitRepo.GetBranch(branchName)\n\tif err != nil {\n\t\tif git.IsErrBranchNotExist(err) {\n\t\t\tctx.NotFound(err)\n\t\t} else {\n\t\t\tctx.Error(http.StatusInternalServerError, \"GetBranch\", err)\n\t\t}\n\t\treturn\n\t}\n\n\tc, err := branch.GetCommit()\n\tif err != nil {\n\t\tctx.Error(http.StatusInternalServerError, \"GetCommit\", err)\n\t\treturn\n\t}\n\n\tbranchProtection, err := git_model.GetFirstMatchProtectedBranchRule(ctx, ctx.Repo.Repository.ID, branchName)\n\tif err != nil {\n\t\tctx.Error(http.StatusInternalServerError, \"GetBranchProtection\", err)\n\t\treturn\n\t}\n\n\tbr, err := convert.ToBranch(ctx, ctx.Repo.Repository, branch.Name, c, branchProtection, ctx.Doer, ctx.Repo.IsAdmin())\n\tif err != nil {\n\t\tctx.Error(http.StatusInternalServerError, \"convert.ToBranch\", err)\n\t\treturn\n\t}\n\n\tctx.JSON(http.StatusOK, br)\n}", "func (s *ActionsService) ListRepoSecrets(ctx context.Context, owner, repo string, opts *ListOptions) (*Secrets, *Response, error) {\n\turl := fmt.Sprintf(\"repos/%v/%v/actions/secrets\", owner, repo)\n\treturn s.listSecrets(ctx, url, opts)\n}", "func extractBranches(r *git.Repository) ([]string, error) {\n\titer, err := r.NewBranchIterator(git.BranchAll)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tbranchRef := []string{}\n\terr = iter.ForEach(func(branch *git.Branch, branchType git.BranchType) error {\n\t\tname, err := branch.Name()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tname = strings.TrimPrefix(name, originPrefix)\n\t\tbranchRef = append(branchRef, name)\n\t\treturn nil\n\t})\n\treturn branchRef, err\n}", "func (fn GetReposOwnerRepoBranchesHandlerFunc) Handle(params GetReposOwnerRepoBranchesParams) middleware.Responder {\n\treturn fn(params)\n}", "func (r *BetRepository) GetBySelectionID(ctx context.Context, id string) ([]domainmodels.Bet, error) {\n\tstorageBets, err := r.queryGetBetsBySelectionID(ctx, id)\n\tif err == sql.ErrNoRows {\n\t\treturn []domainmodels.Bet{}, nil\n\t}\n\tif err != nil {\n\t\treturn []domainmodels.Bet{}, errors.Wrap(err, \"bet repository failed to get bets with selection id \"+id)\n\t}\n\n\tvar domainBets []domainmodels.Bet\n\tfor _, storageBet := range storageBets {\n\t\tdomainBet := r.betMapper.MapStorageBetToDomainBet(storageBet)\n\t\tdomainBets = append(domainBets, domainBet)\n\t}\n\n\treturn domainBets, nil\n}", "func (mr *MockClientMockRecorder) GetBranches(org, repo, onlyProtected interface{}) *gomock.Call {\n\tmr.mock.ctrl.T.Helper()\n\treturn mr.mock.ctrl.RecordCallWithMethodType(mr.mock, \"GetBranches\", reflect.TypeOf((*MockClient)(nil).GetBranches), org, repo, onlyProtected)\n}", "func (b *BranchDAG) ChildBranches(branchID BranchID) (cachedChildBranches CachedChildBranches) {\n\tcachedChildBranches = make(CachedChildBranches, 0)\n\tb.childBranchStorage.ForEach(func(key []byte, cachedObject objectstorage.CachedObject) bool {\n\t\tcachedChildBranches = append(cachedChildBranches, &CachedChildBranch{CachedObject: cachedObject})\n\n\t\treturn true\n\t}, objectstorage.WithIteratorPrefix(branchID.Bytes()))\n\n\treturn\n}", "func getRepoForks(client *ginclient.Client, repo string) ([]gogs.Repository, error) {\n\treqpath := fmt.Sprintf(\"api/v1/repos/%s/forks\", repo)\n\tresp, err := client.Get(reqpath)\n\tif err != nil {\n\t\tlog.Printf(\"Failed get forks for %q: %s\", repo, err.Error())\n\t\treturn nil, err\n\t}\n\tdata, err := ioutil.ReadAll(resp.Body)\n\tif err != nil {\n\t\tlog.Printf(\"Failed read forks from response for %q: %s\", repo, err.Error())\n\t\treturn nil, err\n\t}\n\tforks := make([]gogs.Repository, 0)\n\terr = json.Unmarshal(data, &forks)\n\tif err != nil {\n\t\tlog.Printf(\"Failed to unmarshal forks for %q: %s\", repo, err.Error())\n\t}\n\treturn forks, err\n}", "func (r *Repository) GitHubGetRepositoriesByCLAGroupDisabled(ctx context.Context, claGroupID string) ([]*repoModels.RepositoryDBModel, error) {\n\tcondition := expression.Key(repoModels.RepositoryCLAGroupIDColumn).Equal(expression.Value(claGroupID))\n\tfilter := expression.Name(repoModels.RepositoryTypeColumn).Equal(expression.Value(utils.GitLabLower)).\n\t\tAnd(expression.Name(repoModels.RepositoryEnabledColumn).Equal(expression.Value(false)))\n\trecords, err := r.getRepositoriesWithConditionFilter(ctx, condition, filter, repoModels.RepositoryProjectIndex)\n\tif err != nil {\n\t\t// Catch the error - return the same error with the appropriate details\n\t\tif _, ok := err.(*utils.GitLabRepositoryNotFound); ok {\n\t\t\treturn nil, &utils.GitLabRepositoryNotFound{\n\t\t\t\tCLAGroupID: claGroupID,\n\t\t\t}\n\t\t}\n\n\t\t// Some other error\n\t\treturn nil, err\n\t}\n\n\treturn records, nil\n}", "func (m *MockClient) GetBranchProtection(org, repo, branch string) (*github.BranchProtection, error) {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"GetBranchProtection\", org, repo, branch)\n\tret0, _ := ret[0].(*github.BranchProtection)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}", "func d8getBranches(node *d8nodeT, branch *d8branchT, parVars *d8partitionVarsT) {\n\t// Load the branch buffer\n\tfor index := 0; index < d8maxNodes; index++ {\n\t\tparVars.branchBuf[index] = node.branch[index]\n\t}\n\tparVars.branchBuf[d8maxNodes] = *branch\n\tparVars.branchCount = d8maxNodes + 1\n\n\t// Calculate rect containing all in the set\n\tparVars.coverSplit = parVars.branchBuf[0].rect\n\tfor index := 1; index < d8maxNodes+1; index++ {\n\t\tparVars.coverSplit = d8combineRect(&parVars.coverSplit, &parVars.branchBuf[index].rect)\n\t}\n\tparVars.coverSplitArea = d8calcRectVolume(&parVars.coverSplit)\n}", "func d15getBranches(node *d15nodeT, branch *d15branchT, parVars *d15partitionVarsT) {\n\t// Load the branch buffer\n\tfor index := 0; index < d15maxNodes; index++ {\n\t\tparVars.branchBuf[index] = node.branch[index]\n\t}\n\tparVars.branchBuf[d15maxNodes] = *branch\n\tparVars.branchCount = d15maxNodes + 1\n\n\t// Calculate rect containing all in the set\n\tparVars.coverSplit = parVars.branchBuf[0].rect\n\tfor index := 1; index < d15maxNodes+1; index++ {\n\t\tparVars.coverSplit = d15combineRect(&parVars.coverSplit, &parVars.branchBuf[index].rect)\n\t}\n\tparVars.coverSplitArea = d15calcRectVolume(&parVars.coverSplit)\n}", "func (p GithubRepoHost) UpdateBranchProtection(repoID string, rule BranchProtectionRule) error {\n\tif isDebug() {\n\t\tfmt.Printf(\"Updating branch protection on %s\\n\", repoID)\n\t}\n\n\trules := fetchBranchProtectionRules()\n\tinput := githubv4.UpdateBranchProtectionRuleInput{\n\t\tBranchProtectionRuleID: rule.ID,\n\t\tPattern: githubv4.NewString(githubv4.String(rules.Pattern)),\n\t\tDismissesStaleReviews: githubv4.NewBoolean(githubv4.Boolean(rules.DismissesStaleReviews)),\n\t\tIsAdminEnforced: githubv4.NewBoolean(githubv4.Boolean(rules.IsAdminEnforced)),\n\t\tRequiresApprovingReviews: githubv4.NewBoolean(githubv4.Boolean(rules.RequiresApprovingReviews)),\n\t\tRequiredApprovingReviewCount: githubv4.NewInt(githubv4.Int(rules.RequiredApprovingReviewCount)),\n\t\tRequiresStatusChecks: githubv4.NewBoolean(githubv4.Boolean(rules.RequiresStatusChecks)),\n\t\tRequiredStatusCheckContexts: &[]githubv4.String{\n\t\t\t*githubv4.NewString(\"build\"),\n\t\t},\n\t}\n\n\tvar m UpdateBranchProtectionRuleMutation\n\tclient := buildClient()\n\terr := client.Mutate(context.Background(), &m, input, nil)\n\treturn err\n}", "func IsUserInProtectBranchWhitelist(repoID, userID int64, branch string) bool {\n\thas, err := x.Where(\"repo_id = ?\", repoID).And(\"user_id = ?\", userID).And(\"name = ?\", branch).Get(new(ProtectBranchWhitelist))\n\treturn has && err == nil\n}", "func (c APIClient) ListRepo() ([]*pfs.RepoInfo, error) {\n\trequest := &pfs.ListRepoRequest{}\n\trepoInfos, err := c.PfsAPIClient.ListRepo(\n\t\tc.Ctx(),\n\t\trequest,\n\t)\n\tif err != nil {\n\t\treturn nil, grpcutil.ScrubGRPC(err)\n\t}\n\treturn repoInfos.RepoInfo, nil\n}", "func (o *PostWebhook) GetBranchesToIgnore() string {\n\tif o == nil {\n\t\tvar ret string\n\t\treturn ret\n\t}\n\n\treturn o.BranchesToIgnore\n}", "func d1getBranches(node *d1nodeT, branch *d1branchT, parVars *d1partitionVarsT) {\n\t// Load the branch buffer\n\tfor index := 0; index < d1maxNodes; index++ {\n\t\tparVars.branchBuf[index] = node.branch[index]\n\t}\n\tparVars.branchBuf[d1maxNodes] = *branch\n\tparVars.branchCount = d1maxNodes + 1\n\n\t// Calculate rect containing all in the set\n\tparVars.coverSplit = parVars.branchBuf[0].rect\n\tfor index := 1; index < d1maxNodes+1; index++ {\n\t\tparVars.coverSplit = d1combineRect(&parVars.coverSplit, &parVars.branchBuf[index].rect)\n\t}\n\tparVars.coverSplitArea = d1calcRectVolume(&parVars.coverSplit)\n}", "func RetrieveGitRepositories(rootpath string) ([]string, error) {\n\tvar gitPaths []string\n\terr := filepath.Walk(rootpath, func(pathdir string, fileInfo os.FileInfo, err error) error {\n\t\tif fileInfo.IsDir() && filepath.Base(pathdir) == consts.GitFileName {\n\t\t\tfileDir := filepath.Dir(pathdir)\n\t\t\ttraces.DebugTracer.Printf(\"Just found in hard drive %s\\n\", fileDir)\n\t\t\tgitPaths = append(gitPaths, fileDir)\n\t\t}\n\t\treturn nil\n\t})\n\treturn gitPaths, err\n}", "func (h *branchesService) setGitBranchTips(repo *Repo) {\n\tvar invalidBranches []string\n\tfor _, b := range repo.Branches {\n\t\ttip, ok := repo.TryGetCommitByID(b.TipID)\n\t\tif !ok {\n\t\t\t// A branch tip id, which commit id does not exist in the repo\n\t\t\t// Store that branch name so it can be removed from the list below\n\t\t\tinvalidBranches = append(invalidBranches, b.Name)\n\t\t\tcontinue\n\t\t}\n\n\t\t// Adding the branch to the branch tip commit\n\t\ttip.addBranch(b)\n\t\ttip.BranchTipNames = append(tip.BranchTipNames, b.Name)\n\n\t\tif b.IsCurrent {\n\t\t\t// Mark the current branch tip commit as the current commit\n\t\t\ttip.IsCurrent = true\n\t\t}\n\t}\n\n\t// If some branches do not have existing tip commit id,\n\t// Remove them from the list of repo.Branches\n\tif len(invalidBranches) > 0 {\n\t\trepo.Branches = linq.Filter(repo.Branches, func(v *Branch) bool {\n\t\t\treturn !linq.Contains(invalidBranches, v.Name)\n\t\t})\n\t}\n}", "func (v *VersionHistory) GetBranchToken() []byte {\n\ttoken := make([]byte, len(v.BranchToken))\n\tcopy(token, v.BranchToken)\n\treturn token\n}", "func (c *Client) ListRepoAccounts(namespace, repoName string) ([]*api.Account, error) {\n\tout := []*api.Account{}\n\trawURL := fmt.Sprintf(pathRepoAccounts, c.base.String(), namespace, repoName)\n\terr := c.get(rawURL, true, &out)\n\treturn out, errio.Error(err)\n}", "func (o *VulnerabilitiesRequest) GetRepositoryList() []string {\n\tif o == nil || o.RepositoryList == nil {\n\t\tvar ret []string\n\t\treturn ret\n\t}\n\treturn *o.RepositoryList\n}", "func (g *GitHub) GetRepos(userID string) (Repos []api.Repo, username, avatarURL string, err error) {\n\tds := store.NewStore()\n\tdefer ds.Close()\n\n\ttok, err := ds.FindtokenByUserID(userID, \"github\")\n\tif err != nil {\n\t\tlog.ErrorWithFields(\"find token failed\", log.Fields{\"user_id\": userID, \"error\": err})\n\t\treturn Repos, username, avatarURL, err\n\t}\n\n\t// Use token to get repo list.\n\tts := oauth2.StaticTokenSource(\n\t\t&oauth2.Token{AccessToken: tok.Vsctoken.AccessToken},\n\t)\n\ttc := oauth2.NewClient(oauth2.NoContext, ts)\n\n\tclient := github.NewClient(tc)\n\n\t// List all repositories for the authenticated user.\n\topt := &github.RepositoryListOptions{\n\t\tListOptions: github.ListOptions{PerPage: 30},\n\t}\n\t// Get all pages of results.\n\tvar allRepos []*github.Repository\n\tfor {\n\t\trepos, resp, err := client.Repositories.List(\"\", opt)\n\t\tif err != nil {\n\t\t\tmessage := \"Unable to list repo by token\"\n\t\t\tlog.ErrorWithFields(message, log.Fields{\"user_id\": userID, \"token\": tok, \"error\": err})\n\t\t\treturn Repos, username, avatarURL, fmt.Errorf(\"Unable to list repo by token\")\n\t\t}\n\t\tallRepos = append(allRepos, repos...)\n\t\tif resp.NextPage == 0 {\n\t\t\tbreak\n\t\t}\n\t\topt.ListOptions.Page = resp.NextPage\n\t}\n\n\tRepos = make([]api.Repo, len(allRepos))\n\tfor i, repo := range allRepos {\n\t\tRepos[i].Name = *repo.Name\n\t\tRepos[i].URL = *repo.CloneURL\n\t\tRepos[i].Owner = *repo.Owner.Login\n\t}\n\n\tuser, _, err := client.Users.Get(\"\")\n\tif err != nil {\n\t\tlog.ErrorWithFields(\"Users.Get returned error\", log.Fields{\"user_id\": userID,\n\t\t\t\"token\": tok, \"error\": err})\n\t\treturn Repos, username, avatarURL, err\n\t}\n\tusername = *user.Login\n\tavatarURL = *user.AvatarURL\n\n\treturn Repos, username, avatarURL, nil\n}", "func NewPatchReposOwnerRepoReleasesIDForbidden() *PatchReposOwnerRepoReleasesIDForbidden {\n\treturn &PatchReposOwnerRepoReleasesIDForbidden{}\n}", "func (r *Repository) GitHubGetRepositoriesByProjectSFID(ctx context.Context, projectSFID string) ([]*repoModels.RepositoryDBModel, error) {\n\tcondition := expression.Key(repoModels.RepositoryProjectIDColumn).Equal(expression.Value(projectSFID))\n\tfilter := expression.Name(repoModels.RepositoryTypeColumn).Equal(expression.Value(utils.GitLabLower))\n\n\trecords, err := r.getRepositoriesWithConditionFilter(ctx, condition, filter, repoModels.RepositoryProjectSFIDIndex)\n\tif err != nil {\n\t\t// Catch the error - return the same error with the appropriate details\n\t\tif _, ok := err.(*utils.GitLabRepositoryNotFound); ok {\n\t\t\treturn nil, &utils.GitLabRepositoryNotFound{\n\t\t\t\tProjectSFID: projectSFID,\n\t\t\t}\n\t\t}\n\n\t\t// Some other error\n\t\treturn nil, err\n\t}\n\n\treturn records, nil\n}", "func (o *BranchingModelSettings) GetBranchTypes() []BranchingModelSettingsBranchTypes {\n\tif o == nil || o.BranchTypes == nil {\n\t\tvar ret []BranchingModelSettingsBranchTypes\n\t\treturn ret\n\t}\n\treturn *o.BranchTypes\n}", "func d3getBranches(node *d3nodeT, branch *d3branchT, parVars *d3partitionVarsT) {\n\t// Load the branch buffer\n\tfor index := 0; index < d3maxNodes; index++ {\n\t\tparVars.branchBuf[index] = node.branch[index]\n\t}\n\tparVars.branchBuf[d3maxNodes] = *branch\n\tparVars.branchCount = d3maxNodes + 1\n\n\t// Calculate rect containing all in the set\n\tparVars.coverSplit = parVars.branchBuf[0].rect\n\tfor index := 1; index < d3maxNodes+1; index++ {\n\t\tparVars.coverSplit = d3combineRect(&parVars.coverSplit, &parVars.branchBuf[index].rect)\n\t}\n\tparVars.coverSplitArea = d3calcRectVolume(&parVars.coverSplit)\n}", "func getAndMergeFetchedBranches(branches []Branch) []Branch {\n\trawString, err := runDirectCommand(\"git branch --sort=-committerdate --no-color\")\n\tif err != nil {\n\t\treturn branches\n\t}\n\tbranchLines := splitLines(rawString)\n\tfor _, line := range branchLines {\n\t\tline = strings.Replace(line, \"* \", \"\", -1)\n\t\tline = strings.TrimSpace(line)\n\t\tif branchAlreadyStored(line, branches) {\n\t\t\tcontinue\n\t\t}\n\t\tbranches = append(branches, constructBranch(\"\", line, len(branches)))\n\t}\n\treturn branches\n}", "func ListBranchesAndTags(branchType git.BranchType, repoLocation data.RepoLocation, repoIdentifier data.RepoIdentifier) (data.BranchesAndTags, error) {\n\tvar (\n\t\tremoteBranches = make([]data.Remotes, 0)\n\t\tremoteTags = make([]data.Remotes, 0)\n\t\trepoId = repoIdentifier.RepoId()\n\t)\n\trepo, err := git.OpenRepository(repoLocation.GetRepoPath(repoId))\n\tif err != nil {\n\t\treturn data.BranchesAndTags{}, err\n\t}\n\tbranchIterator, err := repo.NewBranchIterator(branchType)\n\tif err != nil {\n\t\treturn data.BranchesAndTags{}, err\n\t}\n\n\tbranchIterator.ForEach(func(b *git.Branch, bt git.BranchType) error {\n\t\tname, err := b.Name()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treg := regexp.MustCompile(\"^[A-Za-z0-9_-]+\\\\/\")\n\t\tname = reg.ReplaceAllString(name, \"\")\n\t\ttarget := b.Target()\n\t\tvar hash string\n\t\tif nil == target {\n\t\t\thash = \"\"\n\t\t} else {\n\t\t\thash = target.String()\n\t\t}\n\n\t\tremoteBranches = append(remoteBranches, data.Remotes{Type: \"branch\", Hash: hash, Value: name})\n\n\t\treturn nil\n\t})\n\n\trefIt, err := repo.NewReferenceIteratorGlob(\"refs/tags/*\")\n\n\tif err != nil {\n\t\treturn data.BranchesAndTags{}, err\n\t}\n\n\ttag, err := refIt.Next()\n\tfor err == nil {\n\t\tname := strings.Replace(tag.Name(), \"refs/tags/\", \"\", 1)\n\t\tremoteTags = append(remoteTags, data.Remotes{Type: \"tag\", Hash: tag.Target().String(), Value: name})\n\t\ttag, err = refIt.Next()\n\t}\n\n\treturn data.BranchesAndTags{Tags: remoteTags, Branches: remoteBranches}, nil\n}", "func (gc *GithubClient) ListFiles(org, repo string, ID int) ([]*github.CommitFile, error) {\n\toptions := &github.ListOptions{}\n\tgenericList, err := gc.depaginate(\n\t\tfmt.Sprintf(\"listing files in Pull Requests '%d'\", ID),\n\t\tmaxRetryCount,\n\t\toptions,\n\t\tfunc() ([]interface{}, *github.Response, error) {\n\t\t\tpage, resp, err := gc.Client.PullRequests.ListFiles(ctx, org, repo, ID, options)\n\t\t\tvar interfaceList []interface{}\n\t\t\tif nil == err {\n\t\t\t\tfor _, f := range page {\n\t\t\t\t\tinterfaceList = append(interfaceList, f)\n\t\t\t\t}\n\t\t\t}\n\t\t\treturn interfaceList, resp, err\n\t\t},\n\t)\n\tres := make([]*github.CommitFile, len(genericList))\n\tfor i, elem := range genericList {\n\t\tres[i] = elem.(*github.CommitFile)\n\t}\n\treturn res, err\n}" ]
[ "0.63642013", "0.6320566", "0.5900391", "0.56682706", "0.5501149", "0.54667157", "0.54543006", "0.54134506", "0.51837456", "0.51545143", "0.4974663", "0.49698856", "0.496513", "0.494982", "0.49366364", "0.48883775", "0.48778513", "0.48493487", "0.48474577", "0.4751535", "0.4738013", "0.4727116", "0.46891257", "0.46279487", "0.46260476", "0.46001658", "0.4584099", "0.4443153", "0.43944657", "0.4355819", "0.4330614", "0.43177867", "0.42948732", "0.428859", "0.42869312", "0.42554495", "0.42474577", "0.42436764", "0.42427632", "0.42196605", "0.42130962", "0.42129242", "0.42037672", "0.4185289", "0.41805115", "0.41713718", "0.415691", "0.41369703", "0.41276246", "0.41234654", "0.41199735", "0.41189623", "0.4118698", "0.4107066", "0.41066894", "0.41031754", "0.40986046", "0.40965202", "0.40940282", "0.4078522", "0.40735498", "0.40721348", "0.40634027", "0.40613437", "0.40541816", "0.40349302", "0.40340856", "0.4020747", "0.40184954", "0.39898738", "0.39852864", "0.39805168", "0.39771998", "0.3975001", "0.39733595", "0.39705288", "0.39460677", "0.39407125", "0.3935295", "0.39310738", "0.39125556", "0.38983607", "0.3891863", "0.38907406", "0.3878355", "0.38698998", "0.38690582", "0.38684618", "0.38672388", "0.3834461", "0.38306448", "0.38193005", "0.38117662", "0.38116118", "0.37996015", "0.37980378", "0.37955904", "0.37910184", "0.37893963", "0.37875175" ]
0.90316087
0
the following function catches SIGUSR1 and dumps runtime statistics there is no performance penalty for collecting these stats
func dumpInfo(c chan os.Signal) { for { <-c log.Println("Signal caught - dumping runtime stats") var m runtime.MemStats runtime.ReadMemStats(&m) s, _ := json.Marshal(m) log.Println("MemStats JSON follows") log.Printf("%s\n", s) var garC debug.GCStats debug.ReadGCStats(&garC) log.Printf("\nLastGC:\t%s", garC.LastGC) // time of last collection log.Printf("\nNumGC:\t%d", garC.NumGC) // number of garbage collections log.Printf("\nPauseTotal:\t%s", garC.PauseTotal) // total pause for all collections log.Printf("\nPause:\t%s", garC.Pause) // pause history, most recent first log.Println("debug.Stack: " + string(debug.Stack())) log.Println("runtime.NumGoroutine: " + string(runtime.NumGoroutine())) } }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func handleDebugSignal(ctx context.Context) {\n\tsigCh := make(chan os.Signal, 1)\n\tsignal.Notify(sigCh, unix.SIGUSR2)\n\n\tfor {\n\t\tselect {\n\t\tcase <-sigCh:\n\t\t\tpprof.Lookup(\"goroutine\").WriteTo(os.Stderr, 1)\n\t\tcase <-ctx.Done():\n\t\t\treturn\n\t\t}\n\t}\n}", "func sys_prof(p *proc.Proc_t, ptype, _events, _pmflags, intperiod int) int {\n\ten := true\n\tif ptype&defs.PROF_DISABLE != 0 {\n\t\ten = false\n\t}\n\tpmflags := pmflag_t(_pmflags)\n\tswitch {\n\tcase ptype&defs.PROF_GOLANG != 0:\n\t\t_prof_go(en)\n\tcase ptype&defs.PROF_SAMPLE != 0:\n\t\tev := pmev_t{evid: pmevid_t(_events),\n\t\t\tpflags: pmflags}\n\t\t_prof_nmi(en, ev, intperiod)\n\tcase ptype&defs.PROF_COUNT != 0:\n\t\tif pmflags&EVF_BACKTRACE != 0 {\n\t\t\treturn int(-defs.EINVAL)\n\t\t}\n\t\tevs := make([]pmev_t, 0, 4)\n\t\tfor i := uint(0); i < 64; i++ {\n\t\t\tb := 1 << i\n\t\t\tif _events&b != 0 {\n\t\t\t\tn := pmev_t{}\n\t\t\t\tn.evid = pmevid_t(b)\n\t\t\t\tn.pflags = pmflags\n\t\t\t\tevs = append(evs, n)\n\t\t\t}\n\t\t}\n\t\t_prof_pmc(en, evs)\n\tcase ptype&defs.PROF_HACK != 0:\n\t\truntime.Setheap(_events << 20)\n\tcase ptype&defs.PROF_HACK2 != 0:\n\t\tif _events < 0 {\n\t\t\treturn int(-defs.EINVAL)\n\t\t}\n\t\tfmt.Printf(\"GOGC = %v\\n\", _events)\n\t\tdebug.SetGCPercent(_events)\n\tcase ptype&defs.PROF_HACK3 != 0:\n\t\tif _events < 0 {\n\t\t\treturn int(-defs.EINVAL)\n\t\t}\n\t\tbuf := make([]uint8, _events)\n\t\tif buf == nil {\n\t\t}\n\t\t//fakedur[duri] = buf\n\t\t//duri = (duri + 1) % len(fakedur)\n\t\t//for i := 0; i < _events/8; i++ {\n\t\t//fakeptr = proc\n\t\t//}\n\tcase ptype&defs.PROF_HACK4 != 0:\n\t\tmakefake(p)\n\tcase ptype&defs.PROF_HACK5 != 0:\n\t\tn := _events\n\t\tif n < 0 {\n\t\t\treturn int(-defs.EINVAL)\n\t\t}\n\t\truntime.SetMaxheap(n)\n\t\tfmt.Printf(\"remaining mem: %v\\n\",\n\t\t\tres.Human(runtime.Remain()))\n\tcase ptype&defs.PROF_HACK6 != 0:\n\t\tanum := float64(_events)\n\t\tadenom := float64(_pmflags)\n\t\tif adenom <= 0 || anum <= 0 {\n\t\t\treturn int(-defs.EINVAL)\n\t\t}\n\t\tfrac := anum / adenom\n\t\truntime.Assistfactor = frac\n\t\tfmt.Printf(\"assist factor = %v\\n\", frac)\n\tdefault:\n\t\treturn int(-defs.EINVAL)\n\t}\n\treturn 0\n}", "func CatchDebug() {\r\n\tsigchan := make(chan os.Signal, 1)\r\n\t//signal.Notify(sigchan, syscall.SIGUSR1)\r\n\tsignal.Notify(sigchan, syscall.Signal(0xa)) // SIGUSR1 = Signal(0xa)\r\n\tfor range sigchan {\r\n\t\tPrintProgramStatus()\r\n\t}\r\n}", "func catchDebug() {\n\tsigchan := make(chan os.Signal, 1)\n\t//signal.Notify(sigchan, syscall.SIGUSR1)\n\tsignal.Notify(sigchan, syscall.Signal(0xa)) // SIGUSR1 = Signal(0xa)\n\n\tfor {\n\t\tselect {\n\t\tcase <-sigchan:\n\t\t\tprintProgramStatus()\n\t\t}\n\t}\n}", "func sigHandler(s chan os.Signal) {\n\tfor received_signal := range s {\n\t\tswitch received_signal {\n\t\tcase syscall.SIGINT:\n\t\t\tlog.Warn(\"CAUGHT SIGINT: Shutting down!\")\n\t\t\tif *profile != \"\" {\n\t\t\t\tlog.Println(\"> Writing out profile info\")\n\t\t\t\tpprof.StopCPUProfile()\n\t\t\t}\n\t\t\tutil.CaptureMessage(\"Stopped tracker\")\n\t\t\tos.Exit(0)\n\t\tcase syscall.SIGUSR2:\n\t\t\tlog.Warn(\"CAUGHT SIGUSR2: Reloading config\")\n\t\t\t<-s\n\t\t\tconf.LoadConfig(*config_file, false)\n\t\t\tlog.Info(\"> Reloaded config\")\n\t\t\tutil.CaptureMessage(\"Reloaded configuration\")\n\t\t}\n\t}\n}", "func signalCatcher(server *server.Server, endpoint *http.HttpEndpoint) {\n\tsig_chan := make(chan os.Signal, 4)\n\tsignal.Notify(sig_chan, os.Interrupt, syscall.SIGTERM)\n\n\tvar s os.Signal\n\tselect {\n\tcase s = <-sig_chan:\n\t}\n\tif server.CpuProfile() != \"\" {\n\t\tlogging.Infop(\"Stopping CPU profile\")\n\t\tpprof.StopCPUProfile()\n\t}\n\tif server.MemProfile() != \"\" {\n\t\tf, err := os.Create(server.MemProfile())\n\t\tif err != nil {\n\t\t\tlogging.Errorp(\"Cannot create memory profile file\", logging.Pair{\"error\", err})\n\t\t} else {\n\n\t\t\tlogging.Infop(\"Writing Memory profile\")\n\t\t\tpprof.WriteHeapProfile(f)\n\t\t\tf.Close()\n\t\t}\n\t}\n\tif s == os.Interrupt {\n\t\t// Interrupt (ctrl-C) => Immediate (ungraceful) exit\n\t\tlogging.Infop(\"Shutting down immediately\")\n\t\tos.Exit(0)\n\t}\n\tlogging.Infop(\"Attempting graceful exit\")\n\t// Stop accepting new requests\n\terr := endpoint.Close()\n\tif err != nil {\n\t\tlogging.Errorp(\"error closing http listener\", logging.Pair{\"err\", err})\n\t}\n\terr = endpoint.CloseTLS()\n\tif err != nil {\n\t\tlogging.Errorp(\"error closing https listener\", logging.Pair{\"err\", err})\n\t}\n}", "func hookStats(e *evtx.GoEvtxMap) {\n\t// We do not store stats if process termination is not enabled\n\tif flagProcTermEn {\n\t\tif guid, err := e.GetString(&pathSysmonProcessGUID); err == nil {\n\t\t\t//v, ok := processTracker.Get(guid)\n\t\t\t//if ok {\n\t\t\tif pt := processTracker.GetByGuid(guid); pt != nil {\n\t\t\t\t//pt := v.(*processTrack)\n\t\t\t\tswitch e.EventID() {\n\t\t\t\tcase 1:\n\t\t\t\t\tpt.Stats.CountProcessCreated++\n\t\t\t\tcase 3:\n\t\t\t\t\tpt.Stats.CountNetConn++\n\t\t\t\tcase 11:\n\t\t\t\t\tif target, err := e.GetString(&pathSysmonTargetFilename); err == nil {\n\t\t\t\t\t\text := filepath.Ext(target)\n\t\t\t\t\t\tif pt.Stats.CountFilesCreatedByExt[ext] == nil {\n\t\t\t\t\t\t\ti := int64(0)\n\t\t\t\t\t\t\tpt.Stats.CountFilesCreatedByExt[ext] = &i\n\t\t\t\t\t\t}\n\t\t\t\t\t\t*(pt.Stats.CountFilesCreatedByExt[ext])++\n\t\t\t\t\t}\n\t\t\t\t\tpt.Stats.CountFilesCreated++\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n}", "func hookStats(e *evtx.GoEvtxMap) {\n\t// We do not store stats if process termination is not enabled\n\tif flagProcTermEn {\n\t\tif guid, err := e.GetString(&sysmonProcessGUID); err == nil {\n\t\t\tpt := processTracker[guid]\n\t\t\tif pt != nil {\n\t\t\t\tswitch e.EventID() {\n\t\t\t\tcase 1:\n\t\t\t\t\tpt.Stats.CountProcessCreated++\n\t\t\t\tcase 3:\n\t\t\t\t\tpt.Stats.CountNetConn++\n\t\t\t\tcase 11:\n\t\t\t\t\tif target, err := e.GetString(&sysmonTargetFilename); err == nil {\n\t\t\t\t\t\text := filepath.Ext(target)\n\t\t\t\t\t\tif pt.Stats.CountFilesCreatedByExt[ext] == nil {\n\t\t\t\t\t\t\ti := int64(0)\n\t\t\t\t\t\t\tpt.Stats.CountFilesCreatedByExt[ext] = &i\n\t\t\t\t\t\t}\n\t\t\t\t\t\t*(pt.Stats.CountFilesCreatedByExt[ext])++\n\t\t\t\t\t}\n\t\t\t\t\tpt.Stats.CountFilesCreated++\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n}", "func report(p *rc.Process, wallTime time.Duration) {\n\tsv, err := p.SystemVersion()\n\tif err != nil {\n\t\tlog.Println(err)\n\t\treturn\n\t}\n\n\tss, err := p.SystemStatus()\n\tif err != nil {\n\t\tlog.Println(err)\n\t\treturn\n\t}\n\n\tproc, err := p.Stop()\n\tif err != nil {\n\t\treturn\n\t}\n\n\trusage, ok := proc.SysUsage().(*syscall.Rusage)\n\tif !ok {\n\t\treturn\n\t}\n\n\tlog.Println(\"Version:\", sv.Version)\n\tlog.Println(\"Alloc:\", ss.Alloc/1024, \"KiB\")\n\tlog.Println(\"Sys:\", ss.Sys/1024, \"KiB\")\n\tlog.Println(\"Goroutines:\", ss.Goroutines)\n\tlog.Println(\"Wall time:\", wallTime)\n\tlog.Println(\"Utime:\", time.Duration(rusage.Utime.Nano()))\n\tlog.Println(\"Stime:\", time.Duration(rusage.Stime.Nano()))\n\tif runtime.GOOS == \"darwin\" {\n\t\t// Darwin reports in bytes, Linux seems to report in KiB even\n\t\t// though the manpage says otherwise.\n\t\trusage.Maxrss /= 1024\n\t}\n\tlog.Println(\"MaxRSS:\", rusage.Maxrss, \"KiB\")\n\n\tfmt.Printf(\"%s,%d,%d,%d,%.02f,%.02f,%.02f,%d\\n\",\n\t\tsv.Version,\n\t\tss.Alloc/1024,\n\t\tss.Sys/1024,\n\t\tss.Goroutines,\n\t\twallTime.Seconds(),\n\t\ttime.Duration(rusage.Utime.Nano()).Seconds(),\n\t\ttime.Duration(rusage.Stime.Nano()).Seconds(),\n\t\trusage.Maxrss)\n}", "func stats() {\n\tfor {\n\t\tcnt := atomic.LoadInt32(&counter)\n\t\tif cnt < 1000000 {\n\t\t\tfmt.Printf(\"#\\n# Elapsed: %s - Counter: %d\\n#\\n\", time.Now().Sub(startTime).String(), cnt)\n\t\t} else {\n\t\t\tfmt.Printf(\"#\\n# Elapsed: %s - Counter: %d\\n# Hit Ctrl-C to terminate the program\\n#\\n\", time.Now().Sub(startTime).String(), cnt)\n\t\t}\n\t\ttime.Sleep(time.Second)\n\t}\n}", "func stats() {\n\tfor {\n\t\tcnt := atomic.LoadInt32(&counter)\n\t\tif cnt < 1000000 {\n\t\t\tfmt.Printf(\"#\\n# Elapsed: %s - Counter: %d\\n#\\n\", time.Now().Sub(startTime).String(), cnt)\n\t\t} else {\n\t\t\tfmt.Printf(\"#\\n# Elapsed: %s - Counter: %d\\n# Hit Ctrl-C to terminate the program\\n#\\n\", time.Now().Sub(startTime).String(), cnt)\n\t\t}\n\t\ttime.Sleep(time.Second)\n\t}\n}", "func (a *Agent) handleSignals() error {\n\tconst stacktraceBufSize = 1 * units.MiB\n\n\t// pre-allocate a buffer\n\tbuf := make([]byte, stacktraceBufSize)\n\n\tfor {\n\t\tselect {\n\t\tcase sig := <-a.signalCh:\n\t\t\ta.logger.Info().Str(\"signal\", sig.String()).Msg(\"received signal\")\n\t\t\tswitch sig {\n\t\t\tcase os.Interrupt, unix.SIGTERM:\n\t\t\t\ta.Stop()\n\t\t\tcase unix.SIGHUP:\n\t\t\t\t// Noop\n\t\t\tcase unix.SIGTRAP:\n\t\t\t\tstacklen := runtime.Stack(buf, true)\n\t\t\t\tfmt.Printf(\"=== received SIGTRAP ===\\n*** goroutine dump...\\n%s\\n*** end\\n\", buf[:stacklen])\n\t\t\tdefault:\n\t\t\t\ta.logger.Warn().Str(\"signal\", sig.String()).Msg(\"unsupported\")\n\t\t\t}\n\t\tcase <-a.groupCtx.Done():\n\t\t\treturn nil\n\t\t}\n\t}\n}", "func handleSignals(c chan os.Signal) {\n\tlog.Print(\"Notice: System signal monitoring is enabled(watch: SIGINT,SIGTERM,SIGQUIT)\\n\")\n\n\tswitch <-c {\n\tcase syscall.SIGINT:\n\t\tfmt.Println(\"\\nShutdown by Ctrl+C\")\n\tcase syscall.SIGTERM: // by kill\n\t\tfmt.Println(\"\\nShutdown quickly\")\n\tcase syscall.SIGQUIT:\n\t\tfmt.Println(\"\\nShutdown gracefully\")\n\t\t// do graceful shutdown\n\t}\n\n\t// sync logs\n\t_ = app.Logger.Sync()\n\t_ = mysql.Close()\n\t_ = myrds.ClosePool()\n\tmongo.Close()\n\n\t// unregister from eureka\n\t// erkServer.Unregister()\n\n\t// 等待一秒\n\ttime.Sleep(1e9 / 2)\n\tcolor.Info.Println(\"\\n GoodBye...\")\n\n\tos.Exit(0)\n}", "func init() {\n\tpromRegistry := prometheus.NewRegistry()\n\tpromRegistry.MustRegister(uptime, reqCount, passCount, blockCount, reqDuration)\n\tgo recordUptime()\n\tpromHandler = promhttp.InstrumentMetricHandler(promRegistry, promhttp.HandlerFor(promRegistry, promhttp.HandlerOpts{}))\n}", "func DumpDebugStats() {}", "func (p *ProcessList) USR1() bool {\n\tLog(fmt.Sprintf(\"Sent USR1 to pid: %d\", p.Pid), \"debug\")\n\tsyscall.Kill(p.Pid, syscall.SIGUSR1)\n\treturn true\n}", "func HandleDebugSignal() {\n\tsignals := make(chan os.Signal, 1)\n\tsignal.Notify(signals, syscall.SIGUSR1)\n\tfor {\n\t\tselect {\n\t\tcase <-signals:\n\t\t\tlogDebugInfo()\n\t\t}\n\t}\n}", "func main() {\n testService := NewService(\"test\", methods)\n\n start := time.Now()\n sigint := make(chan os.Signal, 1)\n signal.Notify(sigint, os.Interrupt)\n\n for {\n select {\n\n case <-testService.Quit:\n fmt.Println(\"Quit\")\n return\n\n case <-sigint:\n fmt.Println(\"Quit\")\n since := time.Since(start)\n fmt.Println(since)\n fmt.Println(testService.NResponses)\n fmt.Println(float64(testService.NResponses) / since.Seconds())\n return\n\n }\n }\n}", "func (stats *Stats) RegisterPrometheus() error {\n\terr := prometheus.Register(prometheus.NewCounterFunc(prometheus.CounterOpts{\n\t\tNamespace: \"tracee_ebpf\",\n\t\tName: \"events_total\",\n\t\tHelp: \"events collected by tracee-ebpf\",\n\t}, func() float64 { return float64(stats.EventCount.Read()) }))\n\n\tif err != nil {\n\t\treturn err\n\t}\n\n\terr = prometheus.Register(prometheus.NewCounterFunc(prometheus.CounterOpts{\n\t\tNamespace: \"tracee_ebpf\",\n\t\tName: \"netevents_total\",\n\t\tHelp: \"net events collected by tracee-ebpf\",\n\t}, func() float64 { return float64(stats.NetEvCount.Read()) }))\n\n\tif err != nil {\n\t\treturn err\n\t}\n\n\terr = prometheus.Register(prometheus.NewCounterFunc(prometheus.CounterOpts{\n\t\tNamespace: \"tracee_ebpf\",\n\t\tName: \"lostevents_total\",\n\t\tHelp: \"events lost in the submission buffer\",\n\t}, func() float64 { return float64(stats.LostEvCount.Read()) }))\n\n\tif err != nil {\n\t\treturn err\n\t}\n\n\terr = prometheus.Register(prometheus.NewCounterFunc(prometheus.CounterOpts{\n\t\tNamespace: \"tracee_ebpf\",\n\t\tName: \"write_lostevents_total\",\n\t\tHelp: \"events lost in the write buffer\",\n\t}, func() float64 { return float64(stats.LostWrCount.Read()) }))\n\n\tif err != nil {\n\t\treturn err\n\t}\n\n\terr = prometheus.Register(prometheus.NewCounterFunc(prometheus.CounterOpts{\n\t\tNamespace: \"tracee_ebpf\",\n\t\tName: \"network_lostevents_total\",\n\t\tHelp: \"events lost in the network buffer\",\n\t}, func() float64 { return float64(stats.LostNtCount.Read()) }))\n\n\tif err != nil {\n\t\treturn err\n\t}\n\n\terr = prometheus.Register(prometheus.NewCounterFunc(prometheus.CounterOpts{\n\t\tNamespace: \"tracee_ebpf\",\n\t\tName: \"errors_total\",\n\t\tHelp: \"errors accumulated by tracee-ebpf\",\n\t}, func() float64 { return float64(stats.ErrorCount.Read()) }))\n\n\treturn err\n}", "func startSignalsHandler(h *HTTP) {\n\tsignal.Notify(h.signals, syscall.SIGTERM, syscall.SIGINT, syscall.SIGKILL)\n\tif sig := <-h.signals; sig != nil {\n\t\th.events <- fmt.Errorf(\"received signal: %s\", sig.String())\n\t}\n}", "func stats(pid int) {\n\tproc, _ := process.NewProcess(int32(pid))\n\tfor {\n\t\tlogSample(getSample(pid, proc))\n\t\ttime.Sleep(time.Second * samplerSleepSeconds)\n\t}\n}", "func RegisterProfilingSignalHandlers(configParams *config.Config) {\n\treturn\n}", "func signalHandler() {\n\tincoming := make(chan os.Signal)\n\n\tsignal.Notify(incoming, syscall.SIGHUP, syscall.SIGINT, syscall.SIGTERM)\n\tfor {\n\t\tsig := <-incoming\n\n\t\tux, ok := sig.(syscall.Signal)\n\t\tif !ok {\n\t\t\to.Warn(\"Couldn't handle signal %s, Coercion failed\", sig)\n\t\t\tcontinue\n\t\t}\n\n\t\tswitch ux {\n\t\tcase syscall.SIGHUP:\n\t\t\to.Warn(\"Reloading Configuration\")\n\t\t\treloadScores <- 1\n\t\tcase syscall.SIGINT:\n\t\t\tfmt.Fprintln(os.Stderr, \"Interrupt Received - Terminating\")\n\t\t\t//FIXME: Gentle Shutdown\n\t\t\tos.Exit(1)\n\t\tcase syscall.SIGTERM:\n\t\t\tfmt.Fprintln(os.Stderr, \"Terminate Received - Terminating\")\n\t\t\t//FIXME: Gentle Shutdown\n\t\t\tos.Exit(2)\n\t\t}\n\t}\n\n}", "func (k *KACollector) signal(sig syscall.Signal) error {\n\tps, err := process.Processes()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tvar pid int32\n\tfor _, p := range ps {\n\t\tname, err := p.Name()\n\t\tif err != nil {\n\t\t\tcontinue\n\t\t}\n\n\t\tif name == \"keepalived\" {\n\t\t\tpid = p.Pid\n\t\t\tbreak\n\t\t}\n\t}\n\n\tif pid == 0 {\n\t\treturn fmt.Errorf(\"cannot find pid\")\n\t}\n\n\tproc, err := os.FindProcess(int(pid))\n\tif err != nil {\n\t\treturn fmt.Errorf(\"process %v: %v\", pid, err)\n\t}\n\n\terr = proc.Signal(sig)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"signal %v: %v\", sig, err)\n\t}\n\n\ttime.Sleep(100 * time.Millisecond)\n\treturn nil\n}", "func (u *UTrace) Dump() (Report, error) {\n\treport := NewReport(time.Now().Sub(u.startTime))\n\tvar id FuncID\n\tstats := make([]kernelCounter, runtime.NumCPU())\n\titerator := u.kernelCounters.Iterate()\n\n\tfor iterator.Next(&id, &stats) {\n\t\tsymbol, ok := u.matchingFuncCache[id]\n\t\tif !ok {\n\t\t\tcontinue\n\t\t}\n\t\tf := NewFunc(symbol)\n\t\tif symbol.Value > 0xffffffff00000000 {\n\t\t\tf.Type = Kernel\n\t\t} else {\n\t\t\tf.Type = User\n\t\t}\n\t\tfor _, cpuStat := range stats {\n\t\t\tf.Count += cpuStat.Count\n\t\t\tf.AverageLatency += time.Duration(cpuStat.CumulatedTime) * time.Nanosecond\n\t\t}\n\t\tif f.Count > 0 {\n\t\t\tf.AverageLatency = time.Duration(float64(f.AverageLatency.Nanoseconds()) / float64(f.Count))\n\t\t}\n\n\t\tf.stackTraces = u.matchingFuncStackTraces[id]\n\n\t\treport.functions[id] = f\n\t}\n\treport.stackTracerCount.Kernel = atomic.LoadUint64(&u.kernelStackTraceCounter)\n\treport.stackTracerCount.User = atomic.LoadUint64(&u.userStackTraceCounter)\n\tif err := u.lostCount.Lookup([4]byte{0}, &report.stackTracerCount.LostUser); err != nil {\n\t\tlogrus.Warnf(\"failed to retrieve user stack trace lost count: %s\", err)\n\t}\n\tif err := u.lostCount.Lookup([4]byte{1}, &report.stackTracerCount.LostKernel); err != nil {\n\t\tlogrus.Warnf(\"failed to retrieve kernel stack trace lost count: %s\", err)\n\t}\n\treport.stackTracerCount.LostUser += atomic.LoadUint64(&u.userStackTraceLost)\n\treport.stackTracerCount.LostKernel += atomic.LoadUint64(&u.kernelStackTraceLost)\n\treturn report, iterator.Err()\n}", "func sigTermHeartbeater() {\n\tfor {\n\t\tlog.Println(\"Post-SIGTERM heartbeat\")\n\t\ttime.Sleep(time.Second)\n\t}\n}", "func processHealthMonitor(duration time.Duration) {\n\tfor {\n\t\t<-time.After(duration)\n\t\tvar numOfGoroutines = runtime.NumGoroutine()\n\t\t//var memStats runtime.MemStats\n\t\t//runtime.ReadMemStats(&memStats)\n\t\t//core.Info(\"Number of goroutines: %d\",numOfGoroutines)\n\t\t//core.Info(\"Mem stats: %v\",memStats)\n\t\tcore.CloudWatchClient.PutMetric(\"num_of_goroutines\", \"Count\", float64(numOfGoroutines), \"httshark_health_monitor\")\n\t}\n}", "func trapSignalsPosix() {\n\tgo func() {\n\t\tsigchan := make(chan os.Signal, 1)\n\t\tsignal.Notify(sigchan, syscall.SIGTERM, syscall.SIGHUP, syscall.SIGQUIT, syscall.SIGUSR1)\n\n\t\tfor sig := range sigchan {\n\t\t\tswitch sig {\n\t\t\tcase syscall.SIGTERM:\n\t\t\t\tlog.Println(\"[INFO] SIGTERM: Terminating process\")\n\t\t\t\tif PidFile != \"\" {\n\t\t\t\t\tos.Remove(PidFile)\n\t\t\t\t}\n\t\t\t\tos.Exit(0)\n\n\t\t\tcase syscall.SIGQUIT:\n\t\t\t\tlog.Println(\"[INFO] SIGQUIT: Shutting down\")\n\t\t\t\texitCode := executeShutdownCallbacks(\"SIGQUIT\")\n\t\t\t\terr := Stop()\n\t\t\t\tif err != nil {\n\t\t\t\t\tlog.Printf(\"[ERROR] SIGQUIT stop: %v\", err)\n\t\t\t\t\texitCode = 1\n\t\t\t\t}\n\t\t\t\tif PidFile != \"\" {\n\t\t\t\t\tos.Remove(PidFile)\n\t\t\t\t}\n\t\t\t\tos.Exit(exitCode)\n\n\t\t\tcase syscall.SIGHUP:\n\t\t\t\tlog.Println(\"[INFO] SIGHUP: Hanging up\")\n\t\t\t\terr := Stop()\n\t\t\t\tif err != nil {\n\t\t\t\t\tlog.Printf(\"[ERROR] SIGHUP stop: %v\", err)\n\t\t\t\t}\n\n\t\t\tcase syscall.SIGUSR1:\n\t\t\t\tlog.Println(\"[INFO] SIGUSR1: Reloading\")\n\n\t\t\t\tvar updatedCorefile Input\n\n\t\t\t\tcorefileMu.Lock()\n\t\t\t\tif corefile == nil {\n\t\t\t\t\t// Hmm, did spawing process forget to close stdin? Anyhow, this is unusual.\n\t\t\t\t\tlog.Println(\"[ERROR] SIGUSR1: no Corefile to reload (was stdin left open?)\")\n\t\t\t\t\tcorefileMu.Unlock()\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\tif corefile.IsFile() {\n\t\t\t\t\tbody, err := ioutil.ReadFile(corefile.Path())\n\t\t\t\t\tif err == nil {\n\t\t\t\t\t\tupdatedCorefile = CorefileInput{\n\t\t\t\t\t\t\tFilepath: corefile.Path(),\n\t\t\t\t\t\t\tContents: body,\n\t\t\t\t\t\t\tRealFile: true,\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\tcorefileMu.Unlock()\n\n\t\t\t\terr := Restart(updatedCorefile)\n\t\t\t\tif err != nil {\n\t\t\t\t\tlog.Printf(\"[ERROR] SIGUSR1: %v\", err)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}()\n}", "func HandleProfilers() (err error) {\n\n\tvar (\n\t\tfs = flag.NewFlagSet(\"Sample Service\", flag.ExitOnError)\n\t\tctx = context.Background()\n\t\tcpuprofile = fs.String(\"cpuprofile\", \"\", \"Write CPU Profile to `file`\")\n\t\tmemprofile = fs.String(\"memprofile\", \"\", \"Write Mem Profile to `file`\")\n\t\tcpufile, memfile *os.File\n\t\tlog = log.New(\"Profilers\", config.LogCfg{Level: log.DebugLevel})\n\t)\n\tfs.Parse(os.Args[1:])\n\n\t// Start cpu profile if requested\n\tif *cpuprofile != \"\" {\n\t\tcpufile, err = os.Create(*cpuprofile)\n\t\tif err != nil {\n\t\t\tlog.Errorw(ctx, \"could not create CPU profile: \", err)\n\t\t}\n\t\tif err := pprof.StartCPUProfile(cpufile); err != nil {\n\t\t\tlog.Errorw(ctx, \"could not start CPU profile: \", err)\n\t\t}\n\t\tlog.Debug(ctx, \"Cpu Profiling Started\")\n\t}\n\t// Start goroutine to capture memory stats, if requested\n\t// Add memprofile around functions need to be traced.. memprofile added here will dump stats every second\n\tgo func() {\n\t\tif *memprofile != \"\" {\n\t\t\tfor {\n\t\t\t\ttime.Sleep(time.Second) //TODO change timing for memprofile, to get from config\n\t\t\t\tif *memprofile != \"\" {\n\t\t\t\t\tfmt.Println(\"Doing memprofile\")\n\t\t\t\t\tmemfile, err = os.Create(*memprofile)\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\tfmt.Println(\"could not create memory profile: \", err)\n\t\t\t\t\t}\n\t\t\t\t\t//runtime.GC() // get up-to-date statistics\n\t\t\t\t\tif err := pprof.WriteHeapProfile(memfile); err != nil {\n\t\t\t\t\t\tfmt.Println(\"could not write memory profile: \", err)\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}()\n\t// defer cpufile.Close()\n\t// defer memfile.Close()\n\treturn err\n}", "func (k *KACollector) text() ([]KAStats, error) {\n\tkaStats := make([]KAStats, 0)\n\n\terr := k.signal(syscall.SIGUSR1)\n\tif err != nil {\n\t\treturn kaStats, err\n\t}\n\n\terr = k.signal(syscall.SIGUSR2)\n\tif err != nil {\n\t\treturn kaStats, err\n\t}\n\n\tdata, err := k.parseData()\n\tif err != nil {\n\t\treturn kaStats, err\n\t}\n\n\tstats, err := k.parseStats()\n\tif err != nil {\n\t\treturn kaStats, err\n\t}\n\n\tif len(data) == len(stats) {\n\t\tfor idx := range data {\n\t\t\tst := KAStats{}\n\t\t\tst.Data = data[idx]\n\t\t\tst.Stats = stats[idx]\n\t\t\tkaStats = append(kaStats, st)\n\t\t}\n\t}\n\n\treturn kaStats, nil\n}", "func RegisterStackDumper() {\n\tgo func() {\n\t\tsigs := make(chan os.Signal, 1)\n\t\tsignal.Notify(sigs, syscall.SIGUSR1)\n\t\tfor {\n\t\t\t<-sigs\n\t\t\tLogStack()\n\t\t}\n\t}()\n}", "func RegisterStackDumper() {\n\tgo func() {\n\t\tsigs := make(chan os.Signal, 1)\n\t\tsignal.Notify(sigs, syscall.SIGUSR1)\n\t\tfor {\n\t\t\t<-sigs\n\t\t\tLogStack()\n\t\t}\n\t}()\n}", "func CPUProfile(hd *web.THandler) {\n\t// 创建pprof文件\n\tfilename := \"cpu-\" + strconv.Itoa(pid) + \".pprof\"\n\tf, err := os.Create(filename)\n\tif err != nil {\n\t\tfmt.Fprintf(hd, \"Could not Creat file %s: %s\\n\", filename, err)\n\t\tlog.Fatal(\"record cpu profile failed: \", err)\n\t}\n\tdefer f.Close()\n\n\t//pprof.StartCPUProfile(f)\n\t// Set Content Type assuming StartCPUProfile will work,\n\t// because if it does it starts writing.\n\t//hd.Header().Set(\"Content-Type\", \"application/octet-stream\")\n\n\t// 开始记录系统运行过程保存到<f>文件\n\tif err := pprof.StartCPUProfile(f); err != nil {\n\t\t// StartCPUProfile failed, so no writes yet.\n\t\t// Can change header back to text content\n\t\t// and send error code.\n\t\t//hd.Header().Set(\"Content-Type\", \"text/plain; charset=utf-8\")\n\t\t//hd.WriteHeader(http.StatusInternalServerError)\n\t\tfmt.Fprintf(hd, \"Could not enable CPU profiling: %s\\n\", err)\n\t\treturn\n\t}\n\tlParams := hd.MethodParams()\n\t// 延迟实际记录时间必须达到30秒 否则获取不到具体信息\n\tlSec := lParams.AsInteger(\"seconds\")\n\tif lSec == 0 {\n\t\tlSec = 120\n\t}\n\tlogger.Info(lSec, lParams.AsInteger(\"seconds\"))\n\ttime.Sleep(time.Duration(lSec) * time.Second)\n\n\t// 结束\n\tpprof.StopCPUProfile()\n\n\tfmt.Fprintf(hd, \"create cpu profile %s \\n\", filename)\n\troot, _ := os.Getwd() //path.Split(os.Args[0])\n\tfmt.Fprintf(hd, \"Now you can use this to check it: go tool pprof %s\\n\", path.Join(\"file:///\", root, filename))\n}", "func TestSignalTrace(t *testing.T) {\n\tdone := make(chan struct{})\n\tquit := make(chan struct{})\n\tc := make(chan os.Signal, 1)\n\tNotify(c, syscall.SIGHUP)\n\n\t// Source and sink for signals busy loop unsynchronized with\n\t// trace starts and stops. We are ultimately validating that\n\t// signals and runtime.(stop|start)TheWorldGC are compatible.\n\tgo func() {\n\t\tdefer close(done)\n\t\tdefer Stop(c)\n\t\tpid := syscall.Getpid()\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase <-quit:\n\t\t\t\treturn\n\t\t\tdefault:\n\t\t\t\tsyscall.Kill(pid, syscall.SIGHUP)\n\t\t\t}\n\t\t\twaitSig(t, c, syscall.SIGHUP)\n\t\t}\n\t}()\n\n\tfor i := 0; i < 100; i++ {\n\t\tbuf := new(bytes.Buffer)\n\t\tif err := trace.Start(buf); err != nil {\n\t\t\tt.Fatalf(\"[%d] failed to start tracing: %v\", i, err)\n\t\t}\n\t\ttime.After(1 * time.Microsecond)\n\t\ttrace.Stop()\n\t\tsize := buf.Len()\n\t\tif size == 0 {\n\t\t\tt.Fatalf(\"[%d] trace is empty\", i)\n\t\t}\n\t}\n\tclose(quit)\n\t<-done\n}", "func signalProcessor(c <-chan os.Signal) {\n\tfor {\n\t\tsig := <-c\n\t\tswitch sig {\n\t\tcase syscall.SIGHUP:\n\t\t\t// Rotate logs if configured\n\t\t\tif logf != nil {\n\t\t\t\tlog.LogInfo(\"Recieved SIGHUP, cycling logfile\")\n\t\t\t\tcloseLogFile()\n\t\t\t\topenLogFile()\n\t\t\t} else {\n\t\t\t\tlog.LogInfo(\"Ignoring SIGHUP, logfile not configured\")\n\t\t\t}\n\t\tcase syscall.SIGTERM:\n\t\t\t// Initiate shutdown\n\t\t\tlog.LogInfo(\"Received SIGTERM, shutting down\")\n\t\t\tgo timedExit()\n\t\t\tweb.Stop()\n\t\t\tif smtpServer != nil {\n\t\t\t\tsmtpServer.Stop()\n\t\t\t} else {\n\t\t\t\tlog.LogError(\"smtpServer was nil during shutdown\")\n\t\t\t}\n\t\t}\n\t}\n}", "func (p *ProcMetrics) Collect() {\n\tif m, err := CollectProcInfo(p.pid); err == nil {\n\t\tnow := time.Now()\n\n\t\tif !p.lastTime.IsZero() {\n\t\t\tratio := 1.0\n\t\t\tswitch {\n\t\t\tcase m.CPU.Period > 0 && m.CPU.Quota > 0:\n\t\t\t\tratio = float64(m.CPU.Quota) / float64(m.CPU.Period)\n\t\t\tcase m.CPU.Shares > 0:\n\t\t\t\tratio = float64(m.CPU.Shares) / 1024\n\t\t\tdefault:\n\t\t\t\tratio = 1 / float64(runtime.NumCPU())\n\t\t\t}\n\n\t\t\tinterval := ratio * float64(now.Sub(p.lastTime))\n\n\t\t\tp.cpu.user.time = m.CPU.User - p.last.CPU.User\n\t\t\tp.cpu.user.percent = 100 * float64(p.cpu.user.time) / interval\n\n\t\t\tp.cpu.system.time = m.CPU.Sys - p.last.CPU.Sys\n\t\t\tp.cpu.system.percent = 100 * float64(p.cpu.system.time) / interval\n\n\t\t\tp.cpu.total.time = (m.CPU.User + m.CPU.Sys) - (p.last.CPU.User + p.last.CPU.Sys)\n\t\t\tp.cpu.total.percent = 100 * float64(p.cpu.total.time) / interval\n\t\t}\n\n\t\tp.memory.available = m.Memory.Available\n\t\tp.memory.size = m.Memory.Size\n\t\tp.memory.resident.usage = m.Memory.Resident\n\t\tp.memory.resident.percent = 100 * float64(p.memory.resident.usage) / float64(p.memory.available)\n\t\tp.memory.shared.usage = m.Memory.Shared\n\t\tp.memory.text.usage = m.Memory.Text\n\t\tp.memory.data.usage = m.Memory.Data\n\t\tp.memory.pagefault.major.count = m.Memory.MajorPageFaults - p.last.Memory.MajorPageFaults\n\t\tp.memory.pagefault.minor.count = m.Memory.MinorPageFaults - p.last.Memory.MinorPageFaults\n\n\t\tp.files.open = m.Files.Open\n\t\tp.files.max = m.Files.Max\n\n\t\tp.threads.num = m.Threads.Num\n\t\tp.threads.switches.voluntary.count = m.Threads.VoluntaryContextSwitches - p.last.Threads.VoluntaryContextSwitches\n\t\tp.threads.switches.involuntary.count = m.Threads.InvoluntaryContextSwitches - p.last.Threads.InvoluntaryContextSwitches\n\n\t\tp.last = m\n\t\tp.lastTime = now\n\t\tp.engine.Report(p)\n\t}\n}", "func monitorSignals(p *Publisher) {\n\tc := make(chan os.Signal, 1)\n\tsignal.Notify(c, syscall.SIGUSR1)\n\tfor {\n\t\t<-c\n\t\tclients := make([]string, 0)\n\t\tcallback := func(client Subscriber) {\n\t\t\tclients = append(clients, client.String())\n\t\t}\n\t\tp.dowithsubscribers(callback)\n\t\tlogger.Info(fmt.Sprintln(\"Purges sent:\", p.Publishes, \". Connected Clients\", clients))\n\t}\n}", "func sighandler(sig uint32, info *siginfo, ctxt unsafe.Pointer, gp *g) {\n\t_g_ := getg()\n\tc := &sigctxt{info, ctxt}\n\n\tif sig == _SIGPROF {\n\t\tsigprof(uintptr(c.pc()), uintptr(c.sp()), uintptr(c.lr()), gp, _g_.m)\n\t\treturn\n\t}\n\n\tflags := int32(_SigThrow)\n\tif sig < uint32(len(sigtable)) {\n\t\tflags = sigtable[sig].flags\n\t}\n\tif c.sigcode() != _SI_USER && flags&_SigPanic != 0 {\n\t\t// Make it look like a call to the signal func.\n\t\t// Have to pass arguments out of band since\n\t\t// augmenting the stack frame would break\n\t\t// the unwinding code.\n\t\tgp.sig = sig\n\t\tgp.sigcode0 = uintptr(c.sigcode())\n\t\tgp.sigcode1 = uintptr(c.fault())\n\t\tgp.sigpc = uintptr(c.pc())\n\n\t\t// We arrange lr, and pc to pretend the panicking\n\t\t// function calls sigpanic directly.\n\t\t// Always save LR to stack so that panics in leaf\n\t\t// functions are correctly handled. This smashes\n\t\t// the stack frame but we're not going back there\n\t\t// anyway.\n\t\tsp := c.sp() - 4\n\t\tc.set_sp(sp)\n\t\t*(*uint32)(unsafe.Pointer(uintptr(sp))) = c.lr()\n\n\t\tpc := uintptr(gp.sigpc)\n\n\t\t// If we don't recognize the PC as code\n\t\t// but we do recognize the link register as code,\n\t\t// then assume this was a call to non-code and treat like\n\t\t// pc == 0, to make unwinding show the context.\n\t\tif pc != 0 && findfunc(pc) == nil && findfunc(uintptr(c.lr())) != nil {\n\t\t\tpc = 0\n\t\t}\n\n\t\t// Don't bother saving PC if it's zero, which is\n\t\t// probably a call to a nil func: the old link register\n\t\t// is more useful in the stack trace.\n\t\tif pc != 0 {\n\t\t\tc.set_lr(uint32(pc))\n\t\t}\n\n\t\t// In case we are panicking from external C code\n\t\tc.set_r10(uint32(uintptr(unsafe.Pointer(gp))))\n\t\tc.set_pc(uint32(funcPC(sigpanic)))\n\t\treturn\n\t}\n\n\tif c.sigcode() == _SI_USER || flags&_SigNotify != 0 {\n\t\tif sigsend(sig) {\n\t\t\treturn\n\t\t}\n\t}\n\n\tif flags&_SigKill != 0 {\n\t\texit(2)\n\t}\n\n\tif flags&_SigThrow == 0 {\n\t\treturn\n\t}\n\n\t_g_.m.throwing = 1\n\t_g_.m.caughtsig.set(gp)\n\n\tif crashing == 0 {\n\t\tstartpanic()\n\t}\n\n\tif sig < uint32(len(sigtable)) {\n\t\tprint(sigtable[sig].name, \"\\n\")\n\t} else {\n\t\tprint(\"Signal \", sig, \"\\n\")\n\t}\n\n\tprint(\"PC=\", hex(c.pc()), \" m=\", _g_.m.id, \"\\n\")\n\tif _g_.m.lockedg != nil && _g_.m.ncgo > 0 && gp == _g_.m.g0 {\n\t\tprint(\"signal arrived during cgo execution\\n\")\n\t\tgp = _g_.m.lockedg\n\t}\n\tprint(\"\\n\")\n\n\tvar docrash bool\n\tif gotraceback(&docrash) > 0 {\n\t\tgoroutineheader(gp)\n\t\ttracebacktrap(uintptr(c.pc()), uintptr(c.sp()), uintptr(c.lr()), gp)\n\t\tif crashing > 0 && gp != _g_.m.curg && _g_.m.curg != nil && readgstatus(_g_.m.curg)&^_Gscan == _Grunning {\n\t\t\t// tracebackothers on original m skipped this one; trace it now.\n\t\t\tgoroutineheader(_g_.m.curg)\n\t\t\ttraceback(^uintptr(0), ^uintptr(0), 0, gp)\n\t\t} else if crashing == 0 {\n\t\t\ttracebackothers(gp)\n\t\t\tprint(\"\\n\")\n\t\t}\n\t\tdumpregs(c)\n\t}\n\n\tif docrash {\n\t\tcrashing++\n\t\tif crashing < sched.mcount {\n\t\t\t// There are other m's that need to dump their stacks.\n\t\t\t// Relay SIGQUIT to the next m by sending it to the current process.\n\t\t\t// All m's that have already received SIGQUIT have signal masks blocking\n\t\t\t// receipt of any signals, so the SIGQUIT will go to an m that hasn't seen it yet.\n\t\t\t// When the last m receives the SIGQUIT, it will fall through to the call to\n\t\t\t// crash below. Just in case the relaying gets botched, each m involved in\n\t\t\t// the relay sleeps for 5 seconds and then does the crash/exit itself.\n\t\t\t// In expected operation, the last m has received the SIGQUIT and run\n\t\t\t// crash/exit and the process is gone, all long before any of the\n\t\t\t// 5-second sleeps have finished.\n\t\t\tprint(\"\\n-----\\n\\n\")\n\t\t\traiseproc(_SIGQUIT)\n\t\t\tusleep(5 * 1000 * 1000)\n\t\t}\n\t\tcrash()\n\t}\n\n\texit(2)\n}", "func signal() {\n\tnoEvents = true\n}", "func goroutine1() {\n\tfor {\n\t\tselect {\n\t\tcase <-time.After(time.Second):\n\t\t\tlog.Println(\"heartbeat\")\n\t\tcase <-shutdown.InProgress():\n\t\t\tlog.Println(\"shutdown (1)\")\n\t\t\treturn\n\t\t}\n\t}\n}", "func sigtrampgo(sig uint32, info *siginfo, ctx unsafe.Pointer) {\n\tif sigfwdgo(sig, info, ctx) {\n\t\treturn\n\t}\n\tg := getg()\n\tif g == nil {\n\t\tif sig == _SIGPROF {\n\t\t\t// Ignore profiling signals that arrive on\n\t\t\t// non-Go threads. On some systems they will\n\t\t\t// be handled directly by the signal handler,\n\t\t\t// by calling sigprofNonGo, in which case we won't\n\t\t\t// get here anyhow.\n\t\t\treturn\n\t\t}\n\t\tbadsignal(uintptr(sig), &sigctxt{info, ctx})\n\t\treturn\n\t}\n\n\t// If some non-Go code called sigaltstack, adjust.\n\tsp := uintptr(unsafe.Pointer(&sig))\n\tif sp < g.m.gsignal.stack.lo || sp >= g.m.gsignal.stack.hi {\n\t\tvar st sigaltstackt\n\t\tsigaltstack(nil, &st)\n\t\tif st.ss_flags&_SS_DISABLE != 0 {\n\t\t\tsetg(nil)\n\t\t\tcgocallback(unsafe.Pointer(funcPC(noSignalStack)), noescape(unsafe.Pointer(&sig)), unsafe.Sizeof(sig))\n\t\t}\n\t\tstsp := uintptr(unsafe.Pointer(st.ss_sp))\n\t\tif sp < stsp || sp >= stsp+st.ss_size {\n\t\t\tsetg(nil)\n\t\t\tcgocallback(unsafe.Pointer(funcPC(sigNotOnStack)), noescape(unsafe.Pointer(&sig)), unsafe.Sizeof(sig))\n\t\t}\n\t\tg.m.gsignal.stack.lo = stsp\n\t\tg.m.gsignal.stack.hi = stsp + st.ss_size\n\t\tg.m.gsignal.stackguard0 = stsp + _StackGuard\n\t\tg.m.gsignal.stackguard1 = stsp + _StackGuard\n\t\tg.m.gsignal.stackAlloc = st.ss_size\n\t\tg.m.gsignal.stktopsp = getcallersp(unsafe.Pointer(&sig))\n\t}\n\n\tsetg(g.m.gsignal)\n\tsighandler(sig, info, ctx, g)\n\tsetg(g)\n}", "func MonitorSignals (running *bool, srv *http.Server) {\n\tc := make(chan os.Signal, 1)\n\tsignal.Notify(c, os.Interrupt, syscall.SIGHUP, syscall.SIGINT, syscall.SIGTERM, syscall.SIGQUIT)\n\n\tgo func() {\t//listen for kill messages\n\t\t<-c\n\t\t*running = false \t//stops the queen/workers background tasks. This also stops health checks so this looks unhealthy now\n\t\ttime.Sleep(time.Second * 5)\t//sleep here, this is needed for this to be taken out of the load balancer before stopping to handle requests\n\t\tsrv.Shutdown (context.Background()) //shutsdown the http server\n\t}()\n}", "func listenSignals(l net.Listener, srv *http.Server) {\n\tsigChan := make(chan os.Signal)\n\tsignal.Notify(sigChan,\n\t\tsyscall.SIGHUP, syscall.SIGINT,\n\t\tsyscall.SIGQUIT, syscall.SIGTERM,\n\t\tsyscall.SIGUSR1, syscall.SIGUSR2,\n\t)\n\tfor {\n\t\tswitch sig := <-sigChan; sig {\n\t\tcase syscall.SIGHUP:\n\t\t\tif err := reload(l, srv); err != nil {\n\t\t\t\tlog.Println(err.Error())\n\t\t\t}\n\t\tdefault:\n\t\t\tl.Close()\n\t\t}\n\t}\n}", "func hookDumpRegistry(e *evtx.GoEvtxMap) {\n\t// we dump only if alert is relevant\n\tif getCriticality(e) < dumpTresh {\n\t\treturn\n\t}\n\n\tparallelHooks.Acquire()\n\tgo func() {\n\t\tdefer parallelHooks.Release()\n\t\tif guid, err := e.GetString(&pathSysmonProcessGUID); err == nil {\n\t\t\tif targetObject, err := e.GetString(&pathSysmonTargetObject); err == nil {\n\t\t\t\tif details, err := e.GetString(&pathSysmonDetails); err == nil {\n\t\t\t\t\t// We dump only if Details is \"Binary Data\" since the other kinds can be seen in the raw event\n\t\t\t\t\tif details == \"Binary Data\" {\n\t\t\t\t\t\tdumpPath := filepath.Join(dumpDirectory, guid, idFromEvent(e), \"reg.txt\")\n\t\t\t\t\t\tkey, value := filepath.Split(targetObject)\n\t\t\t\t\t\tdumpEventAndCompress(e, guid)\n\t\t\t\t\t\tcontent, err := utils.RegQuery(key, value)\n\t\t\t\t\t\tif err != nil {\n\t\t\t\t\t\t\tlog.Errorf(\"Failed to run reg query: %s\", err)\n\t\t\t\t\t\t\tcontent = fmt.Sprintf(\"Error Dumping %s: %s\", targetObject, err)\n\t\t\t\t\t\t}\n\t\t\t\t\t\terr = ioutil.WriteFile(dumpPath, []byte(content), 0600)\n\t\t\t\t\t\tif err != nil {\n\t\t\t\t\t\t\tlog.Errorf(\"Failed to write registry content to file: %s\", err)\n\t\t\t\t\t\t\treturn\n\t\t\t\t\t\t}\n\t\t\t\t\t\tcompress(dumpPath)\n\t\t\t\t\t\treturn\n\t\t\t\t\t}\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\tlog.Errorf(\"Failed to dump registry from event\")\n\t}()\n}", "func CollectSysStats(registry *Registry) {\n\tvar s sysStatsCollector\n\ts.registry = registry\n\ts.maxOpen = registry.Gauge(\"fh.allocated\", nil)\n\ts.curOpen = registry.Gauge(\"fh.max\", nil)\n\ts.numGoroutines = registry.Gauge(\"go.numGoroutines\", nil)\n\n\tticker := time.NewTicker(30 * time.Second)\n\tgo func() {\n\t\tlog := registry.log\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase <-ticker.C:\n\t\t\t\tlog.Debugf(\"Collecting system stats\")\n\t\t\t\tfdStats(&s)\n\t\t\t\tgoRuntimeStats(&s)\n\t\t\t}\n\t\t}\n\t}()\n}", "func ProcessSignal(p *os.Process, sig os.Signal,) error", "func setupSignal(d chan int) {\n\tc := make(chan os.Signal, 1)\n\tsignal.Notify(c, os.Interrupt)\n\tgo func() {\n\t\tfor sig := range c {\n\t\t\tfmt.Printf(\"\\nCaptured signal %v\\n\", sig)\n\t\t\tfmt.Printf(\"Output in %v\\n\", \"proc.log\")\n\t\t\tos.Exit(1) // Will exit immediately.\n\t\t\td <- 0\n\t\t\tos.Exit(1)\n\t\t}\n\t}()\n\n}", "func GetStats(sleep int, iterations int64) {\n\tfor j := int64(0); j < iterations; j++ {\n\t\tmatches, err := filepath.Glob(\"/proc/[0-9]*/stat\")\n\t\tif err != nil {\n\t\tif(debug) {\n\t\t\tfmt.Println(\"err in: func GetStats\")\n\t\t}\n\t\t\tcontinue\n//\t\t\tpanic(err)\n\t\t}\n\n\t\tfor i := range matches {\n\t\t\tif 1 == 0 {\n\t\t\t\tfmt.Printf(\"matches[%v]=\\t%v\\n\", i, matches[i])\n\t\t\t}\n\t\t\tProcRead(matches[i])\n\t\t}\n\t\ttime.Sleep(time.Duration(sleep) * time.Second)\n\t\t//time.Sleep(1000 * time.Millisecond)\n\t}\n}", "func HandleDumpTarballSignal(client *config.Client) {\n\tsignals := make(chan os.Signal, 1)\n\tsignal.Notify(signals, syscall.SIGUSR2)\n\tfor {\n\t\tselect {\n\t\tcase <-signals:\n\t\t\tlogrus.Info(\"received SIGUSR2; dumping etcd namespace to tarball\")\n\n\t\t\ttarballPath, err := client.DumpTarball()\n\t\t\tif err != nil {\n\t\t\t\tlogrus.Info(\"Failed to dump etcd namespace: \", err)\n\t\t\t} else {\n\t\t\t\tlogrus.Info(\"Dumped etcd namespace to \", tarballPath)\n\t\t\t}\n\t\t}\n\t}\n}", "func panicErrCollector(e ErrCollector) {\n\tlastErr := len(e.MyErrs) - 1\n\tif e.MyErrs[lastErr].err != nil {\n\t\tcurrtime := time.Now().Format(\"2006-01-02\")\n\t\terrString := fmt.Sprintf(\"PANIC - TIME: %s FUNC: %s ERROR:%s\\n\", currtime, e.MyErrs[len(e.MyErrs)-1].Caller, e.MyErrs[len(e.MyErrs)-1].ErrString)\n\t\ttempDir, _ := os.Getwd()\n\t\tpanicFileName := fmt.Sprintf(\"%s/dumps/%s_%s\", tempDir, currtime, \"PANIC_FILE\")\n\n\t\t//Validates that the dump folder is present on the disk, if not it will create the directory and file needed\n\t\t//for the panic dump file.\n\t\t_, err := os.Stat(\"dumps\")\n\t\tif err != nil {\n\t\t\terr = os.Mkdir(\"dumps\", 0777)\n\t\t\tif err != nil {\n\t\t\t\tpanic(errors.New(\"Default Handler Unable to Create Directory\"))\n\t\t\t}\n\n\t\t\te.PanicFile, err = os.OpenFile(panicFileName, os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0777)\n\t\t\tif err != nil {\n\t\t\t\tpanic(errors.New(\"Default Handler Unable to Create File\"))\n\t\t\t}\n\n\t\t\tnum, err := e.PanicFile.Write([]byte(errString))\n\t\t\tfmt.Println(num)\n\t\t\tif err != nil {\n\t\t\t\tpanic(errors.New(\"Default Handler Unable to Write File\"))\n\t\t\t}\n\t\t\tfmt.Printf(\"PANIC - TRACE_FILE:\\n\\t%s\\n\", e.PanicFile.Name())\n\t\t\tdefer e.PanicFile.Close()\n\t\t\treturn\n\t\t}\n\n\t\te.PanicFile, err = os.OpenFile(panicFileName, os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0777)\n\t\tif err != nil {\n\t\t\tpanic(errors.New(\"Default Handler Unable to Create File\"))\n\t\t}\n\n\t\tnum, err := e.PanicFile.Write([]byte(errString))\n\t\tfmt.Println(num)\n\t\tif err != nil {\n\t\t\tpanic(errors.New(\"Default Handler Unable to Write File\"))\n\t\t}\n\t\tfmt.Printf(\"PANIC - TRACE_FILE:\\n\\t%s\\n\", e.PanicFile.Name())\n\t\tdefer e.PanicFile.Close()\n\t\treturn\n\n\t}\n}", "func runCPUUsageStats(){\n\tnbCPU := float64(runtime.NumCPU())\n\tparams := fmt.Sprintf(\"(Get-process -Id %d).CPU\",os.Getpid())\n\tfor {\n\t\tcmd := exec.Command(\"powershell\", params)\n\t\tdata, _ := cmd.Output()\n\t\tcurrent,_ := strconv.ParseFloat(strings.Replace(string(data),\"\\r\\n\",\"\",-1),32)\n\t\tif previous == 0 {\n\t\t\tprevious = current\n\t\t}\n\t\tcurrentUsage = int(((current - previous)*float64(100))/(waitTime*nbCPU) )\n\t\tprevious = current\n\t\ttime.Sleep(time.Duration(waitTime )*time.Second)\n\t}\n}", "func waitForSignals() {\n // Set up signal handling.\n signalCh := make(chan os.Signal, 1)\n signal.Notify(signalCh, os.Interrupt, syscall.SIGTERM)\n\n // Block until one of the signals above is received\n select {\n case <-signalCh:\n diagnostic.LogInfo(\"main.waitForSignals\",\"signal received, shutting down...\")\n }\n}", "func sv(fp *cortexm.StackFrame, lr uintptr) {\n\ttrap := int(*(*byte)(unsafe.Pointer(fp.PC - 2)))\n\tif trap >= len(syscalls) {\n\t\tpanic(\"unknown syscall number\")\n\t}\n\tsyscalls[trap](fp, lr)\n}", "func main() {\n\t//Trace code ----------------------------\n\t//trace.Start(os.Stderr)\n\t//defer trace.Stop()\n\t//---------------------------------------\n\tvar istream string\n\t//Pprof variables -----------------------\n\t//var cpuprofile = flag.String(\"cpuprofile\", \"\", \"write cpu profile to `file`\")\n\t//var memprofile = flag.String(\"memprofile\", \"\", \"write memory profile to `file`\")\n\tflag.StringVar(&istream, \"file\", \"\", \"Specify input file. Default is emptyfile\")\n\tflag.Parse()\n\t/*\n\t//Pprof code -----------------------\n\tif *cpuprofile != \"\" {\n\t\tf, err := os.Create(*cpuprofile)\n\t\tif err != nil {\n\t\t\tlog.Fatal(\"could not create CPU profile: \", err)\n\t\t}\n\t\tdefer f.Close() // error handling omitted for example\n\t\tif err := pprof.StartCPUProfile(f); err != nil {\n\t\t\tlog.Fatal(\"could not start CPU profile: \", err)\n\t\t}\n\t\tdefer pprof.StopCPUProfile()\n\t}*/\n\t//----------------------------------\n\truntime.GOMAXPROCS(0)\n\tmaxProcs := runtime.GOMAXPROCS(0)\n\tnumCPU := runtime.NumCPU()\n\tfmt.Println(\"maxProcs: \", maxProcs, \" numCPU: \", numCPU)\n\tstart := time.Now()\n seqKmst(istream)\n\tt := time.Since(start)\n\tfmt.Println(\"TotalExecutionTime,\", t, \",\", t.Microseconds(), \",\", t.Milliseconds(), \",\", t.Seconds())\n /*\n\t//Pprof code ---------------------\n\tif *memprofile != \"\" {\n\t\tf, err := os.Create(*memprofile)\n\t\tif err != nil {\n\t\t\tlog.Fatal(\"could not create memory profile: \", err)\n\t\t}\n\t\tdefer f.Close() // error handling omitted for example\n\t\truntime.GC() // get up-to-date statistics\n\t\tif err := pprof.WriteHeapProfile(f); err != nil {\n\t\t\tlog.Fatal(\"could not write memory profile: \", err)\n\t\t}\n\t}\n\t*/\n\t//-------------------------------\n\n\n}", "func PrintStats() {\n\tvar glbRead, glbSent int64\n\n\tvar currentTime = time.Now()\n\tvar startTime = currentTime.Truncate(time.Minute).Add(time.Minute)\n\tvar duration = startTime.Sub(currentTime)\n\theartbeat := time.Tick(duration)\n\n\tfor {\n\t\tselect {\n\t\tcase <-heartbeat:\n\t\t\theartbeat = time.Tick(60 * time.Second)\n\n\t\t\t// publish stats at a fixed interval\n\t\t\treadPartial := atomic.LoadInt64(&readLogCnt)\n\t\t\tatomic.StoreInt64(&readLogCnt, 0)\n\n\t\t\tsentPartial := atomic.LoadInt64(&sentLogCnt)\n\t\t\tatomic.StoreInt64(&sentLogCnt, 0)\n\n\t\t\tglbRead = glbRead + readPartial\n\t\t\tglbSent = glbSent + sentPartial\n\t\t\tlog.Printf(\"Readed: Total (from start): %d, Partial: %d/min (avr %d/sec)\\n\", glbRead, readPartial, readPartial/60)\n\t\t\tlog.Printf(\"Sent: Total (from start): %d, Partial: %d/min (avr %d/sec)\\n\", glbSent, sentPartial, sentPartial/60)\n\t\t}\n\t}\n}", "func signal(s os.Signal) {\n\tp, _ := os.FindProcess(os.Getpid())\n\t_ = p.Signal(s)\n\t// Sleep so test won't finish and signal will be received.\n\ttime.Sleep(999)\n}", "func Test_NoUnknownOnStart(t *testing.T) {\n\tconst (\n\t\tresultName = \"copy_of_node_cpu_seconds_global\"\n\t\tsourceMetric = \"node_cpu_seconds_global\"\n\t)\n\n\tvar (\n\t\tresPoints []types.MetricPoint\n\t\tl sync.Mutex\n\t)\n\n\tstore := store.New(time.Hour, time.Hour)\n\n\treg, err := registry.New(registry.Option{\n\t\tPushPoint: pushFunction(func(ctx context.Context, points []types.MetricPoint) {\n\t\t\tl.Lock()\n\t\t\tdefer l.Unlock()\n\n\t\t\tresPoints = append(resPoints, points...)\n\t\t}),\n\t})\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tctx := context.Background()\n\tt0 := time.Now().Truncate(time.Second)\n\n\t// we always boot the manager with 10 seconds resolution\n\truleManager := newManager(ctx, store, defaultLinuxRecordingRules, t0)\n\n\tpromqlRules := []PromQLRule{\n\t\t{\n\t\t\tAlertingRuleID: \"509701d5-3cb0-449b-a858-0290f4dc3cff\",\n\t\t\tName: resultName,\n\t\t\tWarningQuery: fmt.Sprintf(\"%s > 0\", sourceMetric),\n\t\t\tWarningDelay: 5 * time.Minute,\n\t\t\tCriticalQuery: fmt.Sprintf(\"%s > 100\", sourceMetric),\n\t\t\tCriticalDelay: 5 * time.Minute,\n\t\t\tResolution: 10 * time.Second,\n\t\t\tInstanceID: agentID,\n\t\t},\n\t}\n\n\terr = ruleManager.RebuildPromQLRules(promqlRules)\n\tif err != nil {\n\t\tt.Error(err)\n\t}\n\n\tid, err := reg.RegisterAppenderCallback(\n\t\tregistry.RegistrationOption{\n\t\t\tNoLabelsAlteration: true,\n\t\t\tDisablePeriodicGather: true,\n\t\t},\n\t\tregistry.AppenderRegistrationOption{},\n\t\truleManager,\n\t)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tfor currentTime := t0; currentTime.Before(t0.Add(9 * time.Minute)); currentTime = currentTime.Add(10 * time.Second) {\n\t\tif currentTime.After(t0.Add(1 * time.Minute)) {\n\t\t\t// Took one full minute before first points.\n\t\t\tstore.PushPoints(context.Background(), []types.MetricPoint{\n\t\t\t\t{\n\t\t\t\t\tPoint: types.Point{\n\t\t\t\t\t\tTime: currentTime,\n\t\t\t\t\t\tValue: 30,\n\t\t\t\t\t},\n\t\t\t\t\tLabels: map[string]string{\n\t\t\t\t\t\ttypes.LabelName: sourceMetric,\n\t\t\t\t\t\ttypes.LabelInstanceUUID: agentID,\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t})\n\t\t}\n\n\t\truleManager.now = func() time.Time { return currentTime }\n\n\t\treg.InternalRunScrape(ctx, currentTime, id)\n\t}\n\n\tvar hadResult bool\n\n\tfor _, p := range resPoints {\n\t\tif p.Labels[types.LabelName] != resultName {\n\t\t\tt.Errorf(\"unexpected point with labels: %v\", p.Labels)\n\n\t\t\tcontinue\n\t\t}\n\n\t\tif p.Annotations.Status.CurrentStatus == types.StatusWarning {\n\t\t\thadResult = true\n\n\t\t\tcontinue\n\t\t}\n\n\t\tif p.Annotations.Status.CurrentStatus == types.StatusUnknown {\n\t\t\tt.Errorf(\"point status = %v want %v\", p.Annotations.Status.CurrentStatus, types.StatusWarning)\n\t\t}\n\t}\n\n\tif !hadResult {\n\t\tt.Errorf(\"rule never returned any points\")\n\t}\n}", "func EnableGoroutineDump() {\n\tc := make(chan os.Signal, 1)\n\tsignal.Notify(c, syscall.SIGUSR1)\n\tgo func() {\n\t\tfor range c {\n\t\t\tts := time.Now()\n\t\t\tfmt.Println(\"=== BEGIN pprof dump ===\")\n\t\t\tfmt.Println(\"Timestamp: \", ts.Format(time.RFC3339Nano))\n\n\t\t\truntime.GC() // get up-to-date statistics\n\t\t\tps := pprof.Profiles()\n\n\t\t\tgo dumpToFile(ts, ps)\n\t\t\tgo dumpHeapProfile(ts)\n\n\t\t\tfor _, p := range ps {\n\t\t\t\tfmt.Printf(\"--- BEGIN %s dump ---\\n\", p.Name())\n\t\t\t\tp.WriteTo(os.Stdout, 2)\n\t\t\t\tfmt.Printf(\"--- END %s dump ---\\n\", p.Name())\n\t\t\t}\n\t\t\tfmt.Println(\"=== END pprof dump ===\")\n\t\t}\n\t}()\n}", "func main() {\n\tsig := make(chan os.Signal, 1)\n\tsignal.Notify(sig, os.Interrupt, os.Kill)\n\n\te := events.NewStream(1000, 10)\n\tSource.Load(e)\n\n\tSource.Start()\n\n\tdefer Source.Stop()\n\n\t<-sig\n}", "func (sch *scheduler) logRedundantStats(s *session) {\n\n\tfilename := \"Server_scheduler_stats.json\"\n\tos.Remove(filename)\n\tlogStatsFile, _ := os.OpenFile(filename, os.O_CREATE|os.O_WRONLY, 0644)\n\n\tdupQuota := 0.0\n\tif sch.allSntBytes != 0 {\n\t\tdupQuota = float64(sch.duplicatedStreamBytes) / float64(sch.allSntBytes) * 100.0\n\t}\n\tutils.Debugf(\"Duplicated Stream Bytes %d (%f %%)\", sch.duplicatedStreamBytes, dupQuota)\n\n\tdropQuota := 0.0\n\tif sch.duplicatedPackets != 0 {\n\t\tdropQuota = float64(sch.droppedDuplicatedPackets) / float64(sch.duplicatedPackets) * 100.0\n\t}\n\tutils.Debugf(\"Total redundant droppings %d/%d (%f %%)\", sch.droppedDuplicatedPackets, sch.duplicatedPackets, dropQuota)\n\n\tsch.pathLogMapSync.RLock()\n\tpathStats := \"[\"\n\tfor pathID, pth := range s.paths {\n\t\tpackets, retransmissions, losses, sentStreamFrameBytes := pth.sentPacketHandler.GetStatistics()\n\t\tpathStats += \" { \\\"pathID\\\": \" + strconv.FormatUint(uint64(pathID), 10) +\n\t\t\t\", \\\"pathIP\\\" : \\\"\" + pth.conn.LocalAddr().String() + \"\\\"\" +\n\t\t\t\", \\\"sendPackets\\\" : \" + strconv.FormatUint(packets, 10) +\n\t\t\t\", \\\"retransmissions\\\" : \" + strconv.FormatUint(retransmissions, 10) +\n\t\t\t\", \\\"losses\\\" : \" + strconv.FormatUint(losses, 10) +\n\t\t\t\", \\\"sentStreamFrameBytes\\\" : \" + strconv.FormatUint(sentStreamFrameBytes, 10) +\n\t\t\t\", \\\"selectedAsBestPath\\\" : \" + strconv.FormatUint(sch.bestPathSelection[pathID], 10) +\n\t\t\t\"},\"\n\t}\n\tpathStats = pathStats[0 : len(pathStats)-1]\n\tpathStats += \"]\"\n\tsch.pathLogMapSync.RUnlock()\n\n\tlogStatsFile.WriteString(\n\t\t\"{ \\\"totalSentPackets\\\" : \" + strconv.FormatUint(s.allSntPackets, 10) +\n\t\t\t\", \\\"duplicatedPackets\\\" : \" + strconv.FormatUint(sch.duplicatedPackets, 10) +\n\t\t\t\", \\\"duplicatedDroppedPackets\\\" : \" + strconv.FormatUint(sch.droppedDuplicatedPackets, 10) +\n\t\t\t\", \\\"duplicatedPacketDropRate\\\" : \" + strconv.FormatFloat(dropQuota, 'g', -1, 64) +\n\t\t\t\", \\\"totalStreamBytes\\\" : \" + strconv.FormatUint(sch.allSntBytes, 10) +\n\t\t\t\", \\\"duplicatedStreamBytes\\\" : \" + strconv.FormatUint(sch.duplicatedStreamBytes, 10) +\n\t\t\t\", \\\"duplicateStreamRate\\\" : \" + strconv.FormatFloat(dupQuota, 'g', -1, 64) +\n\t\t\t\", \\\"blockedCWhighestTPPath\\\" : \" + strconv.FormatUint(sch.cwBlocks, 10) +\n\t\t\t\", \\\"lowerRTTSchedules\\\" : \" + strconv.FormatUint(sch.lowerRTTSchedules, 10) +\n\t\t\t\", \\\"pathSwitches\\\" : \" + strconv.FormatUint(sch.pathSwitches, 10) +\n\t\t\t\", \\\"pathStats\\\" : \" + pathStats +\n\t\t\t\"}\")\n\n\tlogStatsFile.Close()\n}", "func (t *Indie) handleSignal() {\n signal.Notify(t.signalChan, syscall.SIGTERM)\n\n for {\n\tswitch <-t.signalChan {\n\tcase syscall.SIGTERM:\n\t log.Println(\"[NOTICE] RECEIVE signal: SIGTERM. \")\n\n\t // Notify Stop\n\t t.stopModules()\n\n\t // Wait Modules Stop\n\t t.waitModules()\n\n\t log.Println(\"[NOTICE] ALL MODULES ARE STOPPED, GOING TO EXIT.\")\n\n\t os.Exit(0)\n\t}\n }\n}", "func collectStat(ev *Event, m *map[string]int) {\n\tif m == nil {\n\t\t*m = make(map[string]int)\n\t}\n\t// stats to map\n\tif _, ok := (*m)[ev.Pid]; !ok {\n\t\t(*m)[uniquePid]++\n\t}\n\t(*m)[ev.Pid]++\n\n\t(*m)[ev.UtmM]++\n\t(*m)[ev.UtmS]++\n\n\tif _, ok := (*m)[genKey(ev.Pid, ev.UtmM)]; !ok {\n\t\t(*m)[genKey(uniquePidsUtmm, ev.UtmM)]++\n\t}\n\t(*m)[genKey(ev.Pid, ev.UtmM)]++\n\n\tif _, ok := (*m)[genKey(ev.Pid, ev.UtmS)]; !ok {\n\t\t(*m)[genKey(uniquePidsUtms, ev.UtmS)]++\n\t}\n\t(*m)[genKey(ev.Pid, ev.UtmS)]++\n\n\t(*m)[genKey(ev.UtmM, ev.UtmS)]++\n\n\tif _, ok := (*m)[genKey(ev.Pid, ev.UtmM, ev.UtmS)]; !ok {\n\t\t(*m)[genKey(uniquePidsUtms, ev.UtmM, ev.UtmS)]++\n\t}\n\t(*m)[genKey(ev.Pid, ev.UtmM, ev.UtmS)]++\n\n\t(*m)[evNumberKey]++\n\n}", "func printStats(start time.Time, numFiles int64, totalFileSize int64, numErrors int) {\n\tfileSizeMB := totalFileSize / 1024 / 1024\n\texecTime := time.Since(start).Seconds()\n\tfmt.Println()\n\tfmt.Println(\"-------------- RUNTIME STATS -----------------\")\n\tfmt.Printf(\"hornet version : %s\\n\", version)\n\tfmt.Printf(\"date : %s\\n\", time.Now().Format(time.RFC1123))\n\tfmt.Printf(\"elapsed time : %f s\\n\", execTime)\n\tfmt.Printf(\"# file errors : %d\\n\", numErrors)\n\tfmt.Printf(\"files processed : %d\\n\", numFiles)\n\tfmt.Printf(\"data processed : %d MB\\n\", fileSizeMB)\n\tfmt.Printf(\"throughput : %f MB/s\\n\", float64(fileSizeMB)/execTime)\n}", "func (m *Monitor) SendStats() error {\n\t// delay between to send in order to reduce the statsd pool presure\n\tconst delay = time.Second\n\ttime.Sleep(delay)\n\n\tif resolvers := m.probe.GetResolvers(); resolvers != nil {\n\t\tif err := resolvers.ProcessResolver.SendStats(); err != nil {\n\t\t\treturn fmt.Errorf(\"failed to send process_resolver stats: %w\", err)\n\t\t}\n\t\ttime.Sleep(delay)\n\n\t\tif err := resolvers.DentryResolver.SendStats(); err != nil {\n\t\t\treturn fmt.Errorf(\"failed to send process_resolver stats: %w\", err)\n\t\t}\n\t\tif err := resolvers.NamespaceResolver.SendStats(); err != nil {\n\t\t\treturn fmt.Errorf(\"failed to send namespace_resolver stats: %w\", err)\n\t\t}\n\t}\n\n\tif err := m.perfBufferMonitor.SendStats(); err != nil {\n\t\treturn fmt.Errorf(\"failed to send events stats: %w\", err)\n\t}\n\ttime.Sleep(delay)\n\n\tif err := m.loadController.SendStats(); err != nil {\n\t\treturn fmt.Errorf(\"failed to send load controller stats: %w\", err)\n\t}\n\n\tif m.activityDumpManager != nil {\n\t\tif err := m.activityDumpManager.SendStats(); err != nil {\n\t\t\treturn fmt.Errorf(\"failed to send activity dump maanger stats: %w\", err)\n\t\t}\n\t}\n\n\tif m.probe.config.RuntimeMonitor {\n\t\tif err := m.runtimeMonitor.SendStats(); err != nil {\n\t\t\treturn fmt.Errorf(\"failed to send runtime monitor stats: %w\", err)\n\t\t}\n\t}\n\n\tif err := m.discarderMonitor.SendStats(); err != nil {\n\t\treturn fmt.Errorf(\"failed to send discarder stats: %w\", err)\n\t}\n\n\treturn nil\n}", "func (s *Stats) StatsMonitor() {\n\tlog.Printf(\"Initialized stats goroutine\")\n\ts.startTime = time.Now()\n\tfor {\n\t\telapsed := time.Since(s.startTime)\n\t\tlog.Printf(\"[%10.4f] cases %10d | fcps %8.4f | cov %2.1f%% (hit: %3d, tot: %3d) | corpus: %d | crashes: %d\", elapsed.Seconds(), s.IterationCount, float64(s.IterationCount)/elapsed.Seconds(), float64(s.FoundBreakpoints)/float64(s.TotalBreakpoints)*100.0, s.FoundBreakpoints, s.TotalBreakpoints, s.CorpusLength, s.Crashes)\n\t\ttime.Sleep(time.Second)\n\t}\n}", "func CatchSigterm(StopProcess func()) {\n\t// Catch a SIGTERM and stop\n\tsigs := make(chan os.Signal, 1)\n\tsignal.Notify(sigs, os.Interrupt, syscall.SIGTERM)\n\n\tgo func() {\n\t\tfor sig := range sigs {\n\t\t\tlog.Info(\"Shutting down from Signal\", \"signal\", sig)\n\t\t\tStopProcess()\n\t\t\t/*\n\t\t\t\tif service != nil {\n\t\t\t\t\tservice.Stop()\n\t\t\t\t}\n\t\t\t*/\n\t\t\tos.Exit(-1)\n\t\t}\n\t}()\n\n}", "func (d *DBClient) StatsWSHandler(w http.ResponseWriter, r *http.Request) {\n\tconn, err := upgrader.Upgrade(w, r, nil)\n\tif err != nil {\n\t\tlog.Println(err)\n\t\treturn\n\t}\n\n\tfor {\n\t\tmessageType, p, err := conn.ReadMessage()\n\t\tif err != nil {\n\t\t\treturn\n\t\t}\n\t\tlog.WithFields(log.Fields{\n\t\t\t\"message\": string(p),\n\t\t}).Info(\"Got message...\")\n\n\t\tfor _ = range time.Tick(1 * time.Second) {\n\n\t\t\tcount, err := d.Cache.RecordsCount()\n\n\t\t\tif err != nil {\n\t\t\t\tlog.WithFields(log.Fields{\n\t\t\t\t\t\"message\": p,\n\t\t\t\t\t\"error\": err.Error(),\n\t\t\t\t}).Error(\"got error while trying to get records count\")\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tstats := d.Counter.Flush()\n\n\t\t\tvar sr statsResponse\n\t\t\tsr.Stats = stats\n\t\t\tsr.RecordsCount = count\n\n\t\t\tb, err := json.Marshal(sr)\n\n\t\t\tif err = conn.WriteMessage(messageType, b); err != nil {\n\t\t\t\tlog.WithFields(log.Fields{\n\t\t\t\t\t\"message\": p,\n\t\t\t\t\t\"error\": err.Error(),\n\t\t\t\t}).Debug(\"Got error when writing message...\")\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\n\t}\n\n}", "func hookDumpProcess(e *evtx.GoEvtxMap) {\n\t// we dump only if alert is relevant\n\tif getCriticality(e) < dumpTresh {\n\t\treturn\n\t}\n\n\tparallelHooks.Acquire()\n\tgo func() {\n\t\tdefer parallelHooks.Release()\n\t\tvar pidPath *evtx.GoEvtxPath\n\t\tvar procGUIDPath *evtx.GoEvtxPath\n\n\t\t// the interesting pid to dump depends on the event\n\t\tswitch e.EventID() {\n\t\tcase 8, 10:\n\t\t\tpidPath = &pathSysmonSourceProcessId\n\t\t\tprocGUIDPath = &pathSysmonSourceProcessGUID\n\t\tdefault:\n\t\t\tpidPath = &pathSysmonProcessId\n\t\t\tprocGUIDPath = &pathSysmonProcessGUID\n\t\t}\n\n\t\tif guid, err := e.GetString(procGUIDPath); err == nil {\n\t\t\tif pid, err := e.GetInt(pidPath); err == nil {\n\t\t\t\tdumpEventAndCompress(e, guid)\n\t\t\t\tdumpPidAndCompress(int(pid), guid, idFromEvent(e))\n\t\t\t}\n\t\t}\n\n\t}()\n}", "func (s *Shell) Profile(c *cli.Context) error {\n\tseconds := c.Uint(\"seconds\")\n\tbaseDir := c.String(\"output_dir\")\n\n\tgenDir := filepath.Join(baseDir, fmt.Sprintf(\"debuginfo-%s\", time.Now().Format(time.RFC3339)))\n\n\terr := os.Mkdir(genDir, 0o755)\n\tif err != nil {\n\t\treturn s.errorOut(err)\n\t}\n\tvar wgPprof sync.WaitGroup\n\tvitals := []string{\n\t\t\"allocs\", // A sampling of all past memory allocations\n\t\t\"block\", // Stack traces that led to blocking on synchronization primitives\n\t\t\"cmdline\", // The command line invocation of the current program\n\t\t\"goroutine\", // Stack traces of all current goroutines\n\t\t\"heap\", // A sampling of memory allocations of live objects.\n\t\t\"mutex\", // Stack traces of holders of contended mutexes\n\t\t\"profile\", // CPU profile.\n\t\t\"threadcreate\", // Stack traces that led to the creation of new OS threads\n\t\t\"trace\", // A trace of execution of the current program.\n\t}\n\twgPprof.Add(len(vitals))\n\ts.Logger.Infof(\"Collecting profiles: %v\", vitals)\n\ts.Logger.Infof(\"writing debug info to %s\", genDir)\n\n\terrs := make(chan error, len(vitals))\n\tfor _, vt := range vitals {\n\t\tgo func(vt string) {\n\t\t\tdefer wgPprof.Done()\n\t\t\turi := fmt.Sprintf(\"/v2/debug/pprof/%s?seconds=%d\", vt, seconds)\n\t\t\tresp, err := s.HTTP.Get(uri)\n\t\t\tif err != nil {\n\t\t\t\terrs <- fmt.Errorf(\"error collecting %s: %w\", vt, err)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tdefer func() {\n\t\t\t\tif resp.Body != nil {\n\t\t\t\t\tresp.Body.Close()\n\t\t\t\t}\n\t\t\t}()\n\t\t\tif resp.StatusCode == http.StatusUnauthorized {\n\t\t\t\terrs <- fmt.Errorf(\"error collecting %s: %w\", vt, errUnauthorized)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tif resp.StatusCode == http.StatusBadRequest {\n\t\t\t\t// best effort to interpret the underlying problem\n\t\t\t\tpprofVersion := resp.Header.Get(\"X-Go-Pprof\")\n\t\t\t\tif pprofVersion == \"1\" {\n\t\t\t\t\tb, err := io.ReadAll(resp.Body)\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\terrs <- fmt.Errorf(\"error collecting %s: %w\", vt, errBadRequest)\n\t\t\t\t\t\treturn\n\t\t\t\t\t}\n\t\t\t\t\trespContent := string(b)\n\t\t\t\t\t// taken from pprof.Profile https://github.com/golang/go/blob/release-branch.go1.20/src/net/http/pprof/pprof.go#L133\n\t\t\t\t\tif strings.Contains(respContent, \"profile duration exceeds server's WriteTimeout\") {\n\t\t\t\t\t\terrs <- fmt.Errorf(\"%w: %s\", ErrProfileTooLong, respContent)\n\t\t\t\t\t} else {\n\t\t\t\t\t\terrs <- fmt.Errorf(\"error collecting %s: %w: %s\", vt, errBadRequest, respContent)\n\t\t\t\t\t}\n\t\t\t\t} else {\n\t\t\t\t\terrs <- fmt.Errorf(\"error collecting %s: %w\", vt, errBadRequest)\n\t\t\t\t}\n\t\t\t\treturn\n\t\t\t}\n\t\t\t// write to file\n\t\t\tf, err := os.Create(filepath.Join(genDir, vt))\n\t\t\tif err != nil {\n\t\t\t\terrs <- fmt.Errorf(\"error creating file for %s: %w\", vt, err)\n\t\t\t\treturn\n\t\t\t}\n\t\t\twc := utils.NewDeferableWriteCloser(f)\n\t\t\tdefer wc.Close()\n\n\t\t\t_, err = io.Copy(wc, resp.Body)\n\t\t\tif err != nil {\n\t\t\t\terrs <- fmt.Errorf(\"error writing to file for %s: %w\", vt, err)\n\t\t\t\treturn\n\t\t\t}\n\t\t\terr = wc.Close()\n\t\t\tif err != nil {\n\t\t\t\terrs <- fmt.Errorf(\"error closing file for %s: %w\", vt, err)\n\t\t\t\treturn\n\t\t\t}\n\t\t}(vt)\n\t}\n\twgPprof.Wait()\n\tclose(errs)\n\t// Atmost one err is emitted per vital.\n\ts.Logger.Infof(\"collected %d/%d profiles\", len(vitals)-len(errs), len(vitals))\n\tif len(errs) > 0 {\n\t\tvar merr error\n\t\tfor err := range errs {\n\t\t\tmerr = errors.Join(merr, err)\n\t\t}\n\t\treturn s.errorOut(fmt.Errorf(\"profile collection failed:\\n%v\", merr))\n\t}\n\treturn nil\n}", "func recordUptime() {\n\tfor range time.Tick(time.Second) {\n\t\tuptime.WithLabelValues().Inc()\n\t}\n}", "func TrinityMainLoop(svr *network.TLSServer, logger *util.Logger) {\n\t// Notify SIGINT, SIGTERM\n\tc := make(chan os.Signal, 1)\n\tsignal.Notify(c, os.Interrupt)\n\tsignal.Notify(c, syscall.SIGTERM)\n\tsignal.Notify(c, syscall.SIGINFO) // syscall.SIGINFO doesn't exist in linux go.\n\n\tlogger.Info(\"Main\", \"MacOSX - Use (Ctrl-T) for status\")\n\n\tiostatus := map[bool]string{true: \"Incoming\", false: \"Outgoing\"}\n\t// Wait for SIGINT\n\tfor {\n\t\tselect {\n\t\tcase sig := <-c:\n\t\t\tswitch sig {\n\t\t\tcase syscall.SIGINFO:\n\t\t\t\tlogger.Info(\"Main\", \"Status: Node ID %02X\", svr.ServerNode.ID)\n\t\t\t\tlogger.Info(\"Main\", \"Status: Listener Address %s\", svr.Listener.Addr())\n\t\t\t\tlogger.Info(\"Main\", \"Status: Advertised Address %s\", svr.ServerNode.HostAddr)\n\t\t\t\tconnections := svr.Connections()\n\t\t\t\tlogger.Info(\"Main\", \"Status: %d Active Connection(s)\", len(connections))\n\t\t\t\tfor _, peer := range connections {\n\t\t\t\t\tlogger.Info(\"Main\", \"Status: Peer %02X (%s %s) %s\", peer.ServerNetworkNode.ID, iostatus[peer.Incoming], peer.Connection.RemoteAddr(), network.PeerStateString[peer.State])\n\t\t\t\t}\n\t\t\tcase os.Interrupt:\n\t\t\t\tfallthrough\n\t\t\tcase syscall.SIGTERM:\n\t\t\t\tlogger.Info(\"Main\", \"Signal %d received, shutting down\", sig)\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t}\n}", "func (a *Agent) startDBStats() {\n\tfor {\n\t\tselect {\n\t\tcase <-a.shutdownCtx.Done():\n\t\t\treturn\n\t\tcase <-time.After(config.StatsInterval):\n\t\t\tlog.Debug().\n\t\t\t\tStr(metrics.DBSenderState, metrics.DBStats.SenderState.Value()).\n\t\t\t\tStr(metrics.DBState, metrics.DBStats.DBState.Value()).\n\t\t\t\tStr(metrics.DBPeerSyncState, metrics.DBStats.PeerSyncState.Value()).\n\t\t\t\tInt64(metrics.DBLagDurabilityBytes, metrics.DBStats.DurabilityLagBytes.Value()).\n\t\t\t\tInt64(metrics.DBLagFlushBytes, metrics.DBStats.FlushLagBytes.Value()).\n\t\t\t\tInt64(metrics.DBLagVisibilityBytes, metrics.DBStats.VisibilityLagBytes.Value()).\n\t\t\t\tDur(metrics.DBLagVisibilityMs, time.Duration(metrics.DBStats.VisibilityLagMs.Value())).\n\t\t\t\tMsg(\"db-stats\")\n\t\t}\n\t}\n}", "func handleSignal(server *http.Server) {\n\tc := make(chan os.Signal)\n\tsignal.Notify(c, syscall.SIGINT, syscall.SIGQUIT, syscall.SIGTERM, syscall.SIGHUP)\n\n\tgo func() {\n\t\ts := <-c\n\t\tlog.Info(fmt.Sprintf(\"get a signal %s\", s.String()))\n\t\tswitch s {\n\t\tcase syscall.SIGQUIT, syscall.SIGTERM, syscall.SIGINT:\n\t\t\tctx, cancel := context.WithTimeout(context.Background(), 35*time.Second)\n\t\t\tdefer cancel()\n\t\t\tif err := server.Shutdown(ctx); nil != err {\n\t\t\t\tlog.Info(fmt.Sprintf(\"server shutdown failed: %v\", err))\n\t\t\t}\n\t\t\tif err := service.Close(); nil != err {\n\t\t\t\tlog.Info(fmt.Sprintf(\"service close failed: %v\", err))\n\t\t\t}\n\t\t\tlog.Info(\"nblog exited\")\n\t\t\ttime.Sleep(time.Second)\n\t\t\treturn\n\t\tcase syscall.SIGHUP:\n\t\tdefault:\n\t\t\treturn\n\t\t}\n\t}()\n}", "func (srv *Server) handleSignals() {\n\tvar sig os.Signal\n\n\tsignal.Notify(\n\t\tsrv.sigChan,\n\t\thookableSignals...,\n\t)\n\n\tpid := syscall.Getpid()\n\tfor {\n\t\tsig = <-srv.sigChan\n\t\tsrv.signalHooks(PreSignal, sig)\n\t\tswitch sig {\n\t\tcase syscall.SIGHUP:\n\t\t\tlog.Println(pid, \"Received SIGHUP. forking.\")\n\t\t\terr := srv.fork()\n\t\t\tif err != nil {\n\t\t\t\tlog.Println(\"Fork err:\", err)\n\t\t\t}\n\t\tcase syscall.SIGINT:\n\t\t\tlog.Println(pid, \"Received SIGINT.\")\n\t\t\t// ctrl+c无等待时间\n\t\t\tsrv.shutdown(0)\n\t\tcase syscall.SIGTERM:\n\t\t\tlog.Println(pid, \"Received SIGTERM.\")\n\t\t\tsrv.shutdown(TermTimeout)\n\t\tdefault:\n\t\t\tlog.Printf(\"Received %v: nothing i care about...\\n\", sig)\n\t\t}\n\t\tsrv.signalHooks(PostSignal, sig)\n\t}\n}", "func handleSignal() {\n\tgo func() {\n\t\tc := make(chan os.Signal)\n\n\t\tsignal.Notify(c, syscall.SIGINT, syscall.SIGQUIT, syscall.SIGTERM)\n\t\ts := <-c\n\t\tlogger.Tracef(\"Got signal [%s]\", s)\n\n\t\tsession.SaveOnlineUsers()\n\t\tlogger.Tracef(\"Saved all online user, exit\")\n\n\t\tos.Exit(0)\n\t}()\n}", "func tracefsUprobe(args probeArgs) (*perfEvent, error) {\n\treturn tracefsProbe(uprobeType, args)\n}", "func CatchInterruptPanic() {\r\n\tsigchan := make(chan os.Signal, 1)\r\n\tsignal.Notify(sigchan, os.Interrupt)\r\n\t<-sigchan\r\n\tsignal.Stop(sigchan)\r\n\tPrintProgramStatus()\r\n\tpanic(\"SIGINT\")\r\n}", "func (x *RpcExector) signal(rpcc *rpc.XmlRPCClient, sig_name string, processes []string) {\n\tfor _, process := range processes {\n\t\tif process == \"all\" {\n\t\t\treply, err := rpcc.SignalAll(process)\n\t\t\tif err == nil {\n\t\t\t\tx.showProcessInfo(&reply, make(map[string]bool))\n\t\t\t} else {\n\t\t\t\tfmt.Printf(\"Fail to send signal %s to all process\", sig_name)\n\t\t\t\tos.Exit(1)\n\t\t\t}\n\t\t} else {\n\t\t\treply, err := rpcc.SignalProcess(sig_name, process)\n\t\t\tif err == nil && reply.Success {\n\t\t\t\tfmt.Printf(\"Succeed to send signal %s to process %s\\n\", sig_name, process)\n\t\t\t} else {\n\t\t\t\tfmt.Printf(\"Fail to send signal %s to process %s\\n\", sig_name, process)\n\t\t\t\tos.Exit(1)\n\t\t\t}\n\t\t}\n\t}\n}", "func show_stats()bool{\nreturn flags['s']/* should statistics be printed at end of run? */\n}", "func analyzeGoroutines(events []*trace.Event) {\n\tgsInit.Do(func() {\n\t\tgs = trace.GoroutineStats(events)\n\t})\n}", "func handleSignal(onSignal func()) {\n\tsigChan := make(chan os.Signal, 10)\n\tsignal.Notify(sigChan, syscall.SIGINT, syscall.SIGTERM, syscall.SIGPIPE)\n\tfor signo := range sigChan {\n\t\tswitch signo {\n\t\tcase syscall.SIGINT, syscall.SIGTERM:\n\t\t\tlog.Infof(\"received signal %d (%v)\", signo, signo)\n\t\t\tonSignal()\n\t\t\treturn\n\t\tcase syscall.SIGPIPE:\n\t\t\t// By default systemd redirects the stdout to journald. When journald is stopped or crashes we receive a SIGPIPE signal.\n\t\t\t// Go ignores SIGPIPE signals unless it is when stdout or stdout is closed, in this case the agent is stopped.\n\t\t\t// We never want the agent to stop upon receiving SIGPIPE, so we intercept the SIGPIPE signals and just discard them.\n\t\tdefault:\n\t\t\tlog.Warnf(\"unhandled signal %d (%v)\", signo, signo)\n\t\t}\n\t}\n}", "func (dpm *DevicePluginManager) handleSignals() {\n\tfor {\n\t\tselect {\n\t\tcase s := <-dpm.signalCh:\n\t\t\tswitch s {\n\t\t\tcase syscall.SIGTERM, syscall.SIGQUIT, syscall.SIGINT:\n\t\t\t\tglog.V(3).Infof(\"Received signal \\\"%v\\\", shutting down\", s)\n\t\t\t\tclose(dpm.stopCh)\n\t\t\t}\n\t\tcase event := <-dpm.fsWatcher.Events:\n\t\t\tif event.Name == pluginapi.KubeletSocket {\n\t\t\t\tif event.Op&fsnotify.Create == fsnotify.Create {\n\t\t\t\t\tdpm.startPlugins()\n\t\t\t\t}\n\t\t\t\t// TODO: Kubelet doesn't really clean-up it's socket, so this is currently manual-testing thing. Could we solve Kubelet deaths better?\n\t\t\t\tif event.Op&fsnotify.Remove == fsnotify.Remove {\n\t\t\t\t\tdpm.stopPlugins()\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n}", "func (s *Stats) GetAllCPUInfo() {\n s.GetCPUInfo()\n s.GetCPUTimes()\n}", "func main() {\n\t// Trace for monitoring go routines\n\tf, err := os.Create(\"trace.out\")\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tdefer f.Close()\n\n\terr = trace.Start(f)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tdefer trace.Stop()\n\n\t// Start random seed\n\trand.Seed(time.Now().UnixNano())\n\n\t// Get required inputs from the user\n\tproductsRate, _ := strconv.ParseInt(userInput(\"Please enter the range of products per trolley. (1-200):\", 1, 200, true), 10, 64)\n\tcustomerRate, _ := strconv.Atoi(userInput(\"Please enter the rate customers arrive at checkouts. (1-60):\", 1, 60, true))\n\tprocessSpeed, _ := strconv.ParseFloat(userInput(\"Please enter the range for product processing speed. (0.5-6):\", 0.5, 6, false), 64)\n\t// Print the inputs back to the user\n\tfmt.Println(\"Products Rate:\", productsRate)\n\tfmt.Println(\"Customer Rate:\", customerRate)\n\tfmt.Printf(\"%s %f\", \"Process Speed:\", processSpeed)\n\n\t// Add a WaitGroup for Supermarket closing when the Enter key is clicked\n\tvar wg sync.WaitGroup\n\twg.Add(1)\n\n\t// Create manager agent and start Open a Supermarket\n\tm := newManager(1, &wg, productsRate, float64(customerRate), processSpeed)\n\tm.openSupermarket()\n\n\t// Locks program running, must be at the end of main\n\tfmt.Println(\"\\nPress Enter at any time to terminate simulation...\")\n\tinput := bufio.NewScanner(os.Stdin)\n\t// Waits for Enter to be clicked\n\tinput.Scan()\n\n\tfmt.Println(\"\\nSupermarket CLosing...\")\n\n\t// Start graceful shutdown of the Supermarket\n\tm.closeSupermarket()\n\n\t// Wait for the Supermarket to close and the channels and go routines to shut down\n\twg.Wait()\n\n\t// Get the supermarket metrics for Statistics print\n\tsupermarket := m.getSupermarket()\n\tcheckouts := supermarket.getAllCheckouts()\n\ttotalProcessedProducts := getTotalProcessedProducts(checkouts)\n\tfmt.Println()\n\n\t// Sort the Checkouts array for print\n\tsort.SliceStable(checkouts, func(i, j int) bool {\n\t\treturn checkouts[i].Number < checkouts[j].Number\n\t})\n\n\t// Print the Checkout stats in order of checkout number\n\tprintCheckoutStats(checkouts, totalProcessedProducts)\n}", "func trapSignals() {\n\tc := make(chan os.Signal, 1)\n\tsignal.Notify(c, os.Interrupt, syscall.SIGTERM)\n\tgo func() {\n\t\ts := <-c\n\t\tcli.Exit(128 + int(s.(syscall.Signal)))\n\t}()\n}", "func Test_NoStatusChangeOnStart(t *testing.T) {\n\tconst (\n\t\tresultName = \"copy_of_node_cpu_seconds_global\"\n\t\tsourceMetric = \"node_cpu_seconds_global\"\n\t)\n\n\tfor _, resolutionSecond := range []int{10, 30, 60} {\n\t\tt.Run(fmt.Sprintf(\"resolution=%d\", resolutionSecond), func(t *testing.T) {\n\t\t\tvar (\n\t\t\t\tresPoints []types.MetricPoint\n\t\t\t\tl sync.Mutex\n\t\t\t)\n\n\t\t\tstore := store.New(time.Hour, time.Hour)\n\t\t\treg, err := registry.New(registry.Option{\n\t\t\t\tPushPoint: pushFunction(func(ctx context.Context, points []types.MetricPoint) {\n\t\t\t\t\tl.Lock()\n\t\t\t\t\tdefer l.Unlock()\n\n\t\t\t\t\tresPoints = append(resPoints, points...)\n\t\t\t\t}),\n\t\t\t})\n\t\t\tif err != nil {\n\t\t\t\tt.Fatal(err)\n\t\t\t}\n\n\t\t\tctx := context.Background()\n\t\t\tt0 := time.Now().Truncate(time.Second)\n\n\t\t\truleManager := newManager(ctx, store, defaultLinuxRecordingRules, t0)\n\n\t\t\tpromqlRules := []PromQLRule{\n\t\t\t\t{\n\t\t\t\t\tAlertingRuleID: \"509701d5-3cb0-449b-a858-0290f4dc3cff\",\n\t\t\t\t\tName: resultName,\n\t\t\t\t\tWarningQuery: fmt.Sprintf(\"%s > 0\", sourceMetric),\n\t\t\t\t\tWarningDelay: 5 * time.Minute,\n\t\t\t\t\tCriticalQuery: fmt.Sprintf(\"%s > 100\", sourceMetric),\n\t\t\t\t\tCriticalDelay: 5 * time.Minute,\n\t\t\t\t\tResolution: time.Duration(resolutionSecond) * time.Second,\n\t\t\t\t\tInstanceID: agentID,\n\t\t\t\t},\n\t\t\t}\n\n\t\t\terr = ruleManager.RebuildPromQLRules(promqlRules)\n\t\t\tif err != nil {\n\t\t\t\tt.Error(err)\n\t\t\t}\n\n\t\t\tid, err := reg.RegisterAppenderCallback(\n\t\t\t\tregistry.RegistrationOption{\n\t\t\t\t\tNoLabelsAlteration: true,\n\t\t\t\t\tDisablePeriodicGather: true,\n\t\t\t\t},\n\t\t\t\tregistry.AppenderRegistrationOption{},\n\t\t\t\truleManager,\n\t\t\t)\n\t\t\tif err != nil {\n\t\t\t\tt.Fatal(err)\n\t\t\t}\n\n\t\t\tfor currentTime := t0; currentTime.Before(t0.Add(7 * time.Minute)); currentTime = currentTime.Add(time.Second * time.Duration(resolutionSecond)) {\n\t\t\t\tif !currentTime.Equal(t0) {\n\t\t\t\t\t// cpu_used need two gather to be calculated, skip first point.\n\t\t\t\t\tstore.PushPoints(context.Background(), []types.MetricPoint{\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tPoint: types.Point{\n\t\t\t\t\t\t\t\tTime: currentTime,\n\t\t\t\t\t\t\t\tValue: 30,\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\tLabels: map[string]string{\n\t\t\t\t\t\t\t\ttypes.LabelName: sourceMetric,\n\t\t\t\t\t\t\t\ttypes.LabelInstanceUUID: agentID,\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t})\n\t\t\t\t}\n\n\t\t\t\tif currentTime.Sub(t0) > 6*time.Minute {\n\t\t\t\t\tlogger.V(0).Printf(\"Number of points: %d\", len(resPoints))\n\t\t\t\t}\n\n\t\t\t\truleManager.now = func() time.Time { return currentTime }\n\t\t\t\treg.InternalRunScrape(ctx, currentTime, id)\n\t\t\t}\n\n\t\t\tvar hadResult bool\n\n\t\t\t// Manager should not create ok points since the metric is always in critical.\n\t\t\t// This test might be changed in the future if we implement a persistent store,\n\t\t\t// as it would allow to known the exact hold state of the Prometheus rule.\n\t\t\tfor _, p := range resPoints {\n\t\t\t\tif p.Labels[types.LabelName] != resultName {\n\t\t\t\t\tt.Errorf(\"unexpected point with labels: %v\", p.Labels)\n\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\n\t\t\t\tif p.Annotations.Status.CurrentStatus == types.StatusWarning {\n\t\t\t\t\thadResult = true\n\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\n\t\t\t\tt.Errorf(\"point status = %v want %v\", p.Annotations.Status.CurrentStatus, types.StatusWarning)\n\t\t\t}\n\n\t\t\tif !hadResult {\n\t\t\t\tt.Errorf(\"rule never returned any points\")\n\t\t\t}\n\t\t})\n\t}\n}", "func (d *Diagnosis) cpuProfileHandler(w http.ResponseWriter, r *http.Request) {\n\tw.Header().Set(\"X-Content-Type-Options\", \"nosniff\")\n\tsec, err := strconv.ParseInt(r.FormValue(\"seconds\"), 10, 64)\n\tif sec <= 0 || err != nil {\n\t\tsec = 30\n\t}\n\tif durationExceedsWriteTimeout(r, float64(sec)) {\n\t\tserveError(w, http.StatusBadRequest, fmt.Sprintf(\"cpu profile duration exceeds diagnosis server's WriteTimeout: %v\", d.serverWriteTimeout))\n\t\treturn\n\t}\n\tw.Header().Set(\"Content-Type\", \"application/octet-stream\")\n\tw.Header().Set(\"Content-Disposition\", fmt.Sprintf(`attachment; filename=\"%s-cpuprofile.pprof\"`, d.appName))\n\thz, _ := strconv.Atoi(r.FormValue(\"hz\"))\n\tif err := d.cpuProfile(w, time.Duration(sec)*time.Second, hz); err != nil {\n\t\tserveError(w, http.StatusInternalServerError, err.Error())\n\t\treturn\n\t}\n}", "func sysvicall1Err(fn *libcFunc, a1 uintptr) (r1, err uintptr) {\n\t// Leave caller's PC/SP around for traceback.\n\tgp := getg()\n\tvar mp *m\n\tif gp != nil {\n\t\tmp = gp.m\n\t}\n\tif mp != nil && mp.libcallsp == 0 {\n\t\tmp.libcallg.set(gp)\n\t\tmp.libcallpc = getcallerpc()\n\t\t// sp must be the last, because once async cpu profiler finds\n\t\t// all three values to be non-zero, it will use them\n\t\tmp.libcallsp = getcallersp()\n\t} else {\n\t\tmp = nil\n\t}\n\n\tvar libcall libcall\n\tlibcall.fn = uintptr(unsafe.Pointer(fn))\n\tlibcall.n = 1\n\t// TODO(rsc): Why is noescape necessary here and below?\n\tlibcall.args = uintptr(noescape(unsafe.Pointer(&a1)))\n\tasmcgocall(unsafe.Pointer(&asmsysvicall6x), unsafe.Pointer(&libcall))\n\tif mp != nil {\n\t\tmp.libcallsp = 0\n\t}\n\treturn libcall.r1, libcall.err\n}", "func main() {\n\tlogger := gologger.New(\n\t\t\"./log\",\n\t\t\"benchmark_apache\",\n\t)\n\tdefer logger.Kill()\n\n\thandler := http.NewServeMux()\n\thandler.HandleFunc(\"/index\", func(w http.ResponseWriter, r *http.Request) {\n\t\tsm.Lock()\n\t\tdefer sm.Unlock()\n\n\t\ti++\n\n\t\tlogger.Info(\"print \", i)\n\t\tlogger.Infof(\"print:%d\", i)\n\n\t\tlogger.Error(\"print \", i)\n\t\tlogger.Errorf(\"print:%d\", i)\n\n\t\tlogger.Debug(\"print \", i)\n\t\tlogger.Debugf(\"print:%d\", i)\n\n\t\tlogger.Panic(\"print \", i)\n\t\tlogger.Panicf(\"print:%d\", i)\n\n\t\tw.Write([]byte(fmt.Sprintf(\"print %d\\n\", i)))\n\t})\n\n\tserver := &http.Server{\n\t\tAddr: \":3000\",\n\t\tHandler: handler,\n\t}\n\n\tgo func() {\n\t\tkill := make(chan os.Signal, 1)\n\t\tsignal.Notify(kill, os.Interrupt, os.Kill)\n\n\t\t<-kill\n\n\t\tlogger.Info(\"caught signal interrupt\")\n\n\t\tclose(kill)\n\t\tsignal.Stop(kill)\n\n\t\tserver.SetKeepAlivesEnabled(false)\n\t\terr := server.Shutdown(context.Background())\n\t\tif err != nil {\n\t\t\tlogger.Errorf(\"unable to stop server, reason: %v\", err)\n\t\t}\n\t}()\n\n\tlogger.Infof(\"server started on %s\", server.Addr)\n\terr := server.ListenAndServe()\n\tif err != http.ErrServerClosed {\n\t\tlogger.Errorf(\"unable to start server, reason: %v\", err)\n\t} else {\n\t\tlogger.Info(\"server stopped!\")\n\t}\n}", "func measureSpammerMetrics() {\n\tif spammerStartTime.IsZero() {\n\t\t// Spammer not started yet\n\t\treturn\n\t}\n\n\tsentSpamMsgsCnt := deps.ServerMetrics.SentSpamMessages.Load()\n\tnew := utils.GetUint32Diff(sentSpamMsgsCnt, lastSentSpamMsgsCnt)\n\tlastSentSpamMsgsCnt = sentSpamMsgsCnt\n\n\tspammerAvgHeap.Add(uint64(new))\n\n\ttimeDiff := time.Since(spammerStartTime)\n\tif timeDiff > 60*time.Second {\n\t\t// Only filter over one minute maximum\n\t\ttimeDiff = 60 * time.Second\n\t}\n\n\t// trigger events for outside listeners\n\tEvents.AvgSpamMetricsUpdated.Trigger(&spammer.AvgSpamMetrics{\n\t\tNewMessages: new,\n\t\tAverageMessagesPerSecond: spammerAvgHeap.GetAveragePerSecond(timeDiff),\n\t})\n}", "func init() {\n\t// catch SIGQUIT and print stack traces\n\tsigChan := make(chan os.Signal, 1)\n\tgo func() {\n\t\tfor range sigChan {\n\t\t\tlog.Printf(\"[INFO] SIGQUIT detected, dump:\\n%s\", getDump())\n\t\t}\n\t}()\n\tsignal.Notify(sigChan, syscall.SIGQUIT)\n}", "func signal_ignore(s uint32) {\n}", "func StartStats() {\n\tif *statsInterval <= 0 {\n\t\treturn\n\t}\n\tgo func() {\n\t\tch := time.Tick(*statsInterval)\n\t\tfor {\n\t\t\t<-ch\n\t\t\tfs.Stats.Log()\n\t\t}\n\t}()\n}", "func init() {\n\tprometheus.MustRegister(uptime, reqCount, reqCountPerEndpoint, userCPU, systemCPU, memUsage, diskUsage)\n\tinitStat, err := stat.GetServerStat()\n\tif err != nil {\n\t\tlog.Error(err)\n\t}\n\tgo recordServerMetrics(initStat)\n}", "func prometheusEvent(event *v1.Event, er *EventRouter) {\n\tvar counter prometheus.Counter\n\tvar err error\n\n\tif event.Type == \"Normal\" {\n\t\tcounter, err = kubernetesNormalEventCounterVec.GetMetricWithLabelValues(\n\t\t\tevent.InvolvedObject.Kind,\n\t\t\tevent.InvolvedObject.Name,\n\t\t\tevent.InvolvedObject.Namespace,\n\t\t\tevent.Reason,\n\t\t\tevent.Source.Host,\n\t\t\ter.Clustername,\n\t\t)\n\t} else if event.Type == \"Warning\" {\n\t\tcounter, err = kubernetesWarningEventCounterVec.GetMetricWithLabelValues(\n\t\t\tevent.InvolvedObject.Kind,\n\t\t\tevent.InvolvedObject.Name,\n\t\t\tevent.InvolvedObject.Namespace,\n\t\t\tevent.Reason,\n\t\t\tevent.Source.Host,\n\t\t\ter.Clustername,\n\t\t)\n\t}\n\n\tif err != nil {\n\t\tlogrus.Warnf(\"prometheus event error: \" + err.Error())\n\t} else {\n\t\tcounter.Add(1)\n\t}\n}", "func (s *Service) hdlXcodeStats() {\n\tvar (\n\t\tc = context.TODO()\n\t\tstates = []int8{archive.VideoXcodeSDFinish, archive.VideoXcodeHDFinish, archive.VideoDispatchFinish} //xcode states need stats\n\t\tlevels = []int8{50, 60, 80, 90}\n\t\txcodeStats = make(map[int8]map[string]int)\n\t\tbs []byte\n\t\terr error\n\t\tctime = time.Now()\n\t\tmtime = ctime\n\t)\n\n\tfor _, st := range states {\n\t\tif _, ok := s.xcodeTimeCache.Data[st]; !ok {\n\t\t\tcontinue\n\t\t}\n\t\tsort.Ints(s.xcodeTimeCache.Data[st])\n\t\tseconds := s.xcodeTimeCache.Data[st]\n\t\tif len(seconds) < 1 {\n\t\t\tcontinue\n\t\t}\n\t\tfor _, l := range levels {\n\t\t\tm := \"m\" + strconv.Itoa(int(l))\n\t\t\to := int(math.Floor(float64(len(seconds))*(float64(l)/100)+0.5)) - 1 //seconds offset\n\t\t\tif o < 0 {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif o < 0 || o >= len(seconds) {\n\t\t\t\tlog.Error(\"s.hdlVideoXcodeStats() index out of range. seconds(%d)\", o)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif _, ok := xcodeStats[st]; !ok {\n\t\t\t\txcodeStats[st] = make(map[string]int)\n\t\t\t}\n\t\t\txcodeStats[st][m] = seconds[o]\n\t\t}\n\t}\n\tif bs, err = json.Marshal(xcodeStats); err != nil {\n\t\tlog.Error(\"s.hdlVideoXcodeStats() json.Marshal error(%v)\", err)\n\t\treturn\n\t}\n\tlog.Info(\"s.hdlVideoXcodeStats() end xcode stats xcodeStats:%s\", bs)\n\tif len(xcodeStats) < 1 {\n\t\tlog.Info(\"s.hdlVideoXcodeStats() end xcode stats ignore empty data\")\n\t\treturn\n\t}\n\tif _, err = s.arc.ReportAdd(c, archive.ReportTypeXcode, string(bs), ctime, mtime); err != nil {\n\t\tlog.Error(\"s.hdlVideoXcodeStats() s.arc.ReportAdd error(%v)\", err)\n\t\treturn\n\t}\n\ts.xcodeTimeCache.Lock()\n\tdefer s.xcodeTimeCache.Unlock()\n\ts.xcodeTimeCache.Data = make(map[int8][]int)\n}", "func initSigHandle(c *os.Process) {\n\tLog(robot.Info, \"Starting pid 1 signal handler\")\n\tsigs := make(chan os.Signal, 1)\n\n\tsignal.Notify(sigs, unix.SIGINT, unix.SIGTERM)\n\n\tfor {\n\t\tselect {\n\t\tcase sig := <-sigs:\n\t\t\tsignal.Stop(sigs)\n\t\t\tLog(robot.Info, \"Caught signal '%s', propagating to child pid %d\", sig, c.Pid)\n\t\t\tc.Signal(sig)\n\t\t}\n\t}\n}", "func ProcStat(c *gin.Context) {\n\tres := CmdExec(\"cat /proc/stat | head -n 1 | awk '{$1=\\\"\\\";print}'\")\n\tresArray := strings.Split(res[0], \" \")\n\tvar cpu []int64\n\tvar totalcpu, idlecpu int64\n\tfor _, v := range resArray {\n\t\ttemp, err := strconv.ParseInt(v, 10, 64)\n\t\tif err == nil {\n\t\t\tcpu = append(cpu, temp)\n\t\t\ttotalcpu = totalcpu + temp\n\t\t}\n\t}\n\tidlecpu = cpu[3]\n\tc.JSON(http.StatusOK, gin.H{\n\t\t\"totalcpu\": totalcpu,\n\t\t\"idlecpu\": idlecpu,\n\t})\n}", "func (g *Goer) installSignal() {\n\tch := make(chan os.Signal, 1)\n\tsignal.Notify(ch, syscall.SIGINT, syscall.SIGTERM, syscall.SIGQUIT, syscall.SIGUSR1, syscall.SIGUSR2)\n\tfor signalType := range ch {\n\t\tswitch signalType {\n\t\t// stop process in debug mode with Ctrl+c.\n\t\tcase syscall.SIGINT:\n\t\t\tg.stopAll(ch, signalType)\n\t\t// kill signal in bash shell.\n\t\tcase syscall.SIGKILL | syscall.SIGTERM:\n\t\t\tg.stopAll(ch, signalType)\n\t\t// graceful reload\n\t\tcase syscall.SIGQUIT:\n\t\t\tsignal.Stop(ch)\n\t\t\tg.reload()\n\t\t\tos.Exit(0)\n\t\t}\n\t}\n}", "func processSimpleCounters(counters *interfaces.VnetInterfaceSimpleCounters) {\n\tfmt.Printf(\"%+v\\n\", counters)\n\n\tcounterNames := []string{\"Drop\", \"Punt\", \"IPv4\", \"IPv6\", \"RxNoBuf\", \"RxMiss\", \"RxError\", \"TxError\", \"MPLS\"}\n\n\tfor i := uint32(0); i < counters.Count; i++ {\n\t\tfmt.Printf(\"Interface '%d': %s = %d\\n\",\n\t\t\tcounters.FirstSwIfIndex+i, counterNames[counters.VnetCounterType], counters.Data[i])\n\t}\n}" ]
[ "0.6210038", "0.60277957", "0.59776646", "0.5906248", "0.56304824", "0.5578134", "0.55220157", "0.54440075", "0.5421208", "0.5403018", "0.5403018", "0.53950644", "0.5334088", "0.5245548", "0.5184892", "0.5180288", "0.515967", "0.51418954", "0.5081876", "0.5081836", "0.5056463", "0.5029638", "0.5023958", "0.5013777", "0.50128746", "0.50076735", "0.5004492", "0.498369", "0.49725476", "0.49579245", "0.49566558", "0.49566558", "0.4956447", "0.49427518", "0.4938672", "0.49361086", "0.49324653", "0.4915772", "0.48957372", "0.4876094", "0.48593268", "0.48448673", "0.48229596", "0.4820306", "0.48180285", "0.48172495", "0.48111922", "0.4799881", "0.47990572", "0.479605", "0.4793894", "0.47849602", "0.4775535", "0.47705558", "0.47695613", "0.47487533", "0.47126356", "0.4712244", "0.47019464", "0.46980485", "0.46900517", "0.46836603", "0.4680185", "0.46641383", "0.4657958", "0.4648465", "0.4645476", "0.46364516", "0.46209875", "0.45944586", "0.45933396", "0.45915282", "0.45883447", "0.45833075", "0.4582045", "0.4578513", "0.45623845", "0.45552507", "0.45523712", "0.45418623", "0.45331103", "0.45328346", "0.45180553", "0.4518032", "0.45150045", "0.4512703", "0.45116457", "0.45104498", "0.4509223", "0.45068803", "0.4506831", "0.45006657", "0.44958293", "0.4494439", "0.44910234", "0.44896936", "0.44880903", "0.44874597", "0.4486886", "0.4480498" ]
0.59971803
2
QuickExec quick exec an simple command line
func QuickExec(cmdLine string, workDir ...string) (string, error) { return ExecLine(cmdLine, workDir...) }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func main() {\n\tlines := util.ReadLines()\n\n\tansP1, ansP2 := Exec(lines)\n\tfmt.Printf(\"Part1: %v\\n\", ansP1)\n\tfmt.Printf(\"Part2: %v\\n\", ansP2)\n}", "func main() {\n\tInitVars()\n\ttfmBins := InitTfmBins(&OsPath)\n\tverConstraints, err := ParseTfmConfigs(\"./\")\n\tif err != nil {\n\t\tfmt.Fprintf(os.Stderr, \"[ERROR] %s\", err.Error())\n\t\tos.Exit(1)\n\t}\n\ttfmBinFile := SelectTfmBin(verConstraints, tfmBins)\n\tmyDebug(\"Calling: %s\", tfmBinFile)\n\tif tfmBinFile == \"\" {\n\t\tfmt.Println(\"[ERROR] there is no file to execute\")\n\t\tos.Exit(1)\n\t}\n\tout, err := exec.Command(tfmBinFile, os.Args[1:]...).CombinedOutput()\n\tfmt.Println(string(out[:]))\n\tif err != nil {\n\t\tos.Exit(1)\n\t}\n}", "func main() {\n\tcmd.Execute()\n}", "func main() {\n\tcmd.Execute()\n}", "func main() {\n\tcmd.Execute()\n}", "func main() {\n\tcmd.Execute()\n}", "func main() {\n\tcmd.Execute()\n}", "func main() {\n\tcmd.Execute()\n}", "func main() {\n\tcmd.Execute()\n}", "func main() {\n\terr := cmd.Execute(version)\n\tif err != nil {\n\t\tfmt.Println(err)\n\t}\n}", "func main() {\n\tcmd.Root().Execute()\n}", "func Exec(cmd string) {\n\n\tfmt.Printf(\"Você digitou: %s \", cmd)\n\n}", "func Executable() (string, error)", "func Setup(c *exec.Cmd) {}", "func ExecBuiltin(args []string) {\n\tif len(args) <= 0 {\n\t\tPanic(\"No parameters\")\n\t}\n\n\t//TODO: Loadings\n\tswitch args[0] {\n\tcase \"Error\":\n\t\tError(strings.Join(args[1:], \" \"))\n\tcase \"Warn\":\n\t\tWarn(strings.Join(args[1:], \" \"))\n\tcase \"Info\":\n\t\tInfo(strings.Join(args[1:], \" \"))\n\tcase \"Made\":\n\t\tMade(strings.Join(args[1:], \" \"))\n\tcase \"Ask\":\n\t\tif noColor {\n\t\t\tfmt.Print(\"[?] \")\n\t\t} else {\n\t\t\tfmt.Print(\"\\033[38;5;99;01m[?]\\033[00m \")\n\t\t}\n\t\tfmt.Println(strings.Join(args[1:], \" \"))\n\tcase \"AskYN\":\n\t\tif AskYN(strings.Join(args[1:], \" \")) {\n\t\t\tos.Exit(0)\n\t\t}\n\t\tos.Exit(1)\n\tcase \"Read\":\n\t\treader := bufio.NewReader(os.Stdin)\n\t\ttext, _ := reader.ReadString('\\n')\n\t\tfmt.Print(text)\n\tcase \"ReadSecure\":\n\t\tfmt.Print(ReadSecure())\n\tcase \"AskList\":\n\t\tvalues := \"\"\n\t\tdflt := -1\n\n\t\tif len(args) >= 3 {\n\t\t\tvalues = args[2]\n\t\t\tif len(args) >= 4 {\n\t\t\t\tif i, err := strconv.Atoi(args[3]); err == nil {\n\t\t\t\t\tdflt = i\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\tos.Exit(AskList(strings.Split(values, \",\"), dflt, args[1]))\n\tcase \"Bell\":\n\t\tBell()\n\t}\n\tos.Exit(0)\n}", "func main() {\n cmd.Execute ()\n}", "func main() {\n\n\tif err := qml.Run(run); err != nil {\n\t\tfmt.Fprint(os.Stderr, \"error: %v\\n\", err)\n\t\tos.Exit(1)\n\t}\n}", "func (s *GitTestHelper) Exec(first string, arg ...string) bool {\n\tif s.err != nil {\n\t\treturn false\n\t}\n\tcmd := s.runner.Command(first, arg...).WithDir(s.Getwd())\n\tbytearr, err := cmd.CombinedOutput()\n\tif s.debugExec {\n\t\twords := append([]string{\">>>\", first}, arg...)\n\t\tfmt.Println(strings.Join(words, \" \"))\n\t\tfmt.Println(string(bytearr))\n\t}\n\tif err != nil {\n\t\ts.err = fmt.Errorf(\"%v %v\", err, string(bytearr))\n\t\ts.errCause = first + \" \" + strings.Join(arg, \" \")\n\t\treturn false\n\t}\n\treturn true\n}", "func main() {\n\tapp := getRootCmd()\n\n\t// ignore error so we don't exit non-zero and break gfmrun README example tests\n\t_ = app.Execute()\n}", "func main() {\n\tif err := cmd.Cmd.Execute(); err != nil {\n\t\tos.Exit(1)\n\t}\n}", "func SimpleExec(name string, args ...string) (string, error) {\n\tTrace(name, args...)\n\treturn Output(ExecCommand(name, args...))\n}", "func SimpleExec(name string, args ...string) (string, error) {\n\treturn Output(ExecCommand(name, args...))\n}", "func main() {\n\n // Go requires an absolute path to the binary we want to execute, so we’ll use exec.LookPath to find it (probably /bin/ls).\n // Exec requires arguments in slice form (as apposed to one big string).\n binary, lookErr := exec.LookPath(\"ls\")\n if lookErr != nil {\n panic(lookErr)\n }\n\n args := []string{\"ls\", \"-a\", \"-l\", \"-h\"} //Exec requires arguments in slice form (as apposed to one big string). first argument should be the program name\n\n //Exec also needs a set of environment variables to use. Here we just provide our current environment.\n env := os.Environ()\n\n execErr := syscall.Exec(binary, args, env) //Here’s the actual syscall.Exec call.\n //If this call is successful, the execution of our process will end here and be replaced by the /bin/ls -a -l -h process.\n if execErr != nil {// If there is an error we’ll get a return value.\n panic(execErr)\n }\n}", "func exec(c *lxc.Container, conf *Config) {\n\tvar output []byte\n\tvar err error\n\t// stdout and stderr are unfornutately concatenated\n\tif output, err = c.Execute(conf.Args.Command...); err != nil {\n\t\tif len(output) != 0 {\n\t\t\tfmt.Printf(\"%s\\n\", output)\n\t\t}\n\t\terrorExit(2, err)\n\t} else {\n\t\tfmt.Printf(\"%s\", output)\n\t}\n}", "func (c *cmdVersion) Exec(args []string) error {\n\n\tif len(args) != 0 {\n\t\tfmt.Fprintf(os.Stderr, \"usage: mashling version \\n\\nToo many arguments given.\\n\")\n\t\tos.Exit(2)\n\t} else {\n\t\tc.versionNumber = version\n\t\tfmt.Printf(\"mashling version %s\\n\", c.versionNumber)\n\t}\n\n\treturn nil\n}", "func cmdLine() string {\n\treturn \"go run mksyscall_aix_ppc64.go \" + strings.Join(os.Args[1:], \" \")\n}", "func main() {\n\texecute.Execute()\n}", "func startQuickQuit(i *app.Indicator) {\n\ti.AddQuick(\"QUIT\", qQuit, func(args ...interface{}) {\n\t\ti := args[0].(*app.Indicator)\n\t\ti.Quit()\n\t}, i)\n}", "func (t *Test) exec(tc testCommand) error {\n\tswitch cmd := tc.(type) {\n\tcase *clearCmd:\n\t\treturn t.clear()\n\n\tcase *loadCmd:\n\t\treturn cmd.append()\n\n\tcase *evalCmd:\n\t\texpr, err := parser.ParseExpr(cmd.expr)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tt := time.Unix(0, startingTime+(cmd.start.Unix()*1000000000))\n\t\tbodyBytes, err := cmd.m3query.query(expr.String(), t)\n\t\tif err != nil {\n\t\t\tif cmd.fail {\n\t\t\t\treturn nil\n\t\t\t}\n\t\t\treturn errors.Wrapf(err, \"error in %s %s, line %d\", cmd, cmd.expr, cmd.line)\n\t\t}\n\t\tif cmd.fail {\n\t\t\treturn fmt.Errorf(\"expected to fail at %s %s, line %d\", cmd, cmd.expr, cmd.line)\n\t\t}\n\n\t\terr = cmd.compareResult(bodyBytes)\n\t\tif err != nil {\n\t\t\treturn errors.Wrapf(err, \"error in %s %s, line %d. m3query response: %s\", cmd, cmd.expr, cmd.line, string(bodyBytes))\n\t\t}\n\n\tdefault:\n\t\tpanic(\"promql.Test.exec: unknown test command type\")\n\t}\n\treturn nil\n}", "func main() {\n\terr := cmd.RootCmd.Execute()\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n}", "func Executor(s string) {\n\ts = strings.TrimSpace(s)\n\tcmdStrings := strings.Split(s, \" \")\n\tif s == \"\" {\n\t\treturn\n\t} else if s == \"quit\" || s == \"exit\" {\n\t\tfmt.Println(\"Bye!\")\n\t\tos.Exit(0)\n\t\treturn\n\t}\n\tswitch cmdStrings[0] {\n\tcase \"install-px\":\n\t\tinstallPX()\n\tcase \"deploy\":\n\t\tif len(cmdStrings) < 2 {\n\t\t\tfmt.Println(\"deploy requires an application name\")\n\t\t\treturn\n\t\t}\n\t\tdeploy(\"default\", cmdStrings[1])\n\tcase \"benchmark\":\n\t\tswitch cmdStrings[1] {\n\t\tcase \"postgres\":\n\t\t\tpodExec(\"default\", \"app=postgres\", \"/usr/bin/psql -c 'create database pxdemo;'\")\n\t\t\tpodExec(\"default\", \"app=postgres\", \"/usr/bin/pgbench -n -i -s 50 pxdemo;\")\n\t\t\tpodExec(\"default\", \"app=postgres\", \"/usr/bin/psql pxdemo -c 'select count(*) from pgbench_accounts;'\")\n\t\tdefault:\n\t\t\tfmt.Printf(\"%s benchmark not supported\\n\", cmdStrings[1])\n\t\t}\n\tcase \"px\":\n\t\tif len(cmdStrings) < 2 {\n\t\t\tfmt.Println(\"deploy requires an application name\")\n\t\t\treturn\n\t\t}\n\t\tswitch cmdStrings[1] {\n\t\tcase \"connect\":\n\t\t\tpxInit()\n\t\tcase \"snap\":\n\t\t\tif len(cmdStrings) < 3 {\n\t\t\t\tfmt.Println(\"px snap requires an application name\")\n\t\t\t\treturn\n\t\t\t}\n\t\t\tpxSnap(cmdStrings[2])\n\t\tcase \"backup\":\n\t\t\tif len(cmdStrings) < 3 {\n\t\t\t\tfmt.Println(\"px backup requires an PVC name\")\n\t\t\t\treturn\n\t\t\t}\n\t\t\tpxBackup(cmdStrings[2])\n\t\tcase \"backup-status\":\n\t\t\tif len(cmdStrings) < 3 {\n\t\t\t\tfmt.Println(\"px backup-status requires a PVC name\")\n\t\t\t\treturn\n\t\t\t}\n\t\t\tpxBackupStatus(cmdStrings[2])\n\t\tdefault:\n\t\t\tfmt.Printf(\"px %s is not a valid command\\n\", cmdStrings[1])\n\t\t}\n\tcase \"pre-flight-check\":\n\t\tpreflight()\n\tdefault:\n\t\tfmt.Printf(\"%s is not a supported option\", s)\n\t}\n\treturn\n}", "func main() {\n\tcmd.Execute(version, gitCommit, buildDate)\n}", "func SimpleExec(cmdName string, arguments ...string) {\n\tcmd := exec.Command(cmdName, arguments...) // nolint: gosec\n\tcmd.Stdin = os.Stdin\n\tcmd.Stdout = os.Stdout\n\tcmd.Stderr = os.Stderr\n\terr := cmd.Run()\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n}", "func main() {\n cli := CLI{}\n cli.Run()\n}", "func Exec(name string, args ...string) string {\n\tout, err := exec.Command(name, args...).Output()\n\tif err != nil {\n\t\tlog.Println(err)\n\t\treturn \"\"\n\t}\n\treturn string(out)\n}", "func startQuickLiqoWebsite(i *app.Indicator) {\n\ti.AddQuick(\"ⓘ About Liqo\", qWeb, func(args ...interface{}) {\n\t\tcmd := exec.Command(\"xdg-open\", \"http://liqo.io\")\n\t\t_ = cmd.Run()\n\t})\n}", "func Exec(name string, args ...string) error {\n\treturn syscall.Exec(name, args, os.Environ())\n}", "func Execute(osArgs []string) {\n\tif len(osArgs) == 2 && osArgs[1] == \"-i\" {\n\t\tinteractive = true\n\t}\n\tif interactive {\n\t\tshell = ishell.NewWithConfig(\n\t\t\t&readline.Config{\n\t\t\t\tPrompt: fmt.Sprintf(\"%c[1;0;32m%s%c[0m\", 0x1B, \">> \", 0x1B),\n\t\t\t\tHistoryFile: \"/tmp/readline.tmp\",\n\t\t\t\t//AutoComplete: completer,\n\t\t\t\tInterruptPrompt: \"^C\",\n\t\t\t\tEOFPrompt: \"exit\",\n\t\t\t\tHistorySearchFold: true,\n\t\t\t\t//FuncFilterInputRune: filterInput,\n\t\t\t})\n\t\tshell.Println(\"QLC Chain Server\")\n\t\taddCommand()\n\t\tshell.Run()\n\t} else {\n\t\trootCmd = &cobra.Command{\n\t\t\tUse: \"gqlc\",\n\t\t\tShort: \"CLI for QLCChain Server\",\n\t\t\tLong: `QLC Chain is the next generation public block chain designed for the NaaS.`,\n\t\t\tRun: func(cmd *cobra.Command, args []string) {\n\t\t\t\terr := start()\n\t\t\t\tif err != nil {\n\t\t\t\t\tcmd.Println(err)\n\t\t\t\t}\n\t\t\t},\n\t\t}\n\t\trootCmd.PersistentFlags().StringVar(&cfgPathP, \"config\", \"\", \"config file\")\n\t\trootCmd.PersistentFlags().StringVar(&accountP, \"account\", \"\", \"wallet address, if is nil,just run a node\")\n\t\trootCmd.PersistentFlags().StringVar(&passwordP, \"password\", \"\", \"password for wallet\")\n\t\trootCmd.PersistentFlags().StringVar(&seedP, \"seed\", \"\", \"seed for accounts\")\n\t\trootCmd.PersistentFlags().StringVar(&privateKeyP, \"privateKey\", \"\", \"seed for accounts\")\n\t\trootCmd.PersistentFlags().BoolVar(&isProfileP, \"profile\", false, \"enable profile\")\n\t\trootCmd.PersistentFlags().BoolVar(&noBootstrapP, \"nobootnode\", false, \"disable bootstrap node\")\n\t\trootCmd.PersistentFlags().StringVar(&configParamsP, \"configParams\", \"\", \"parameter set that needs to be changed\")\n\t\trootCmd.PersistentFlags().StringVar(&testModeP, \"testMode\", \"\", \"testing mode\")\n\t\taddCommand()\n\t\tif err := rootCmd.Execute(); err != nil {\n\t\t\tlog.Root.Info(err)\n\t\t\tos.Exit(1)\n\t\t}\n\t}\n}", "func (p *Qlang) Exec(codeText []byte, fname string) (err error) {\n\n\tcode := p.cl.Code()\n\tstart := code.Len()\n\tend, err := p.Cl(codeText, fname)\n\tif err != nil {\n\t\treturn\n\t}\n\n\tif qcl.DumpCode != 0 {\n\t\tcode.Dump(start)\n\t}\n\n\tp.ExecBlock(start, end, p.cl.GlobalSymbols())\n\treturn\n}", "func (h *Howdoi) Execute() {\n\tflag.Parse()\n\n\tif h.ShowHelp {\n\t\tfmt.Println(help)\n\t\tos.Exit(0)\n\t}\n\n\tif h.ShowVersion {\n\t\tfmt.Println(version)\n\t\tos.Exit(0)\n\t}\n\n\t// position must be > 0\n\tif h.Position == 0 {\n\t\th.Position = 1\n\t}\n\n\terr := h.sanitizeQuestion(flag.Args())\n\tif err != nil {\n\t\tfmt.Println(help)\n\t\tos.Exit(1)\n\t}\n\n\tlinks, err := h.getLinks()\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\tanswer, err := h.getAnswer(links)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\tfmt.Println(answer)\n}", "func main() {\n\truntime.GOMAXPROCS(runtime.NumCPU())\n\n\tapp := cliapp.NewApp()\n\tapp.Version = \"1.0.3\"\n\tapp.Description = \"this is my cli application\"\n\n\tapp.SetVerbose(cliapp.VerbDebug)\n\t// app.DefaultCmd(\"exampl\")\n\n\tapp.Add(cmd.GitCommand())\n\t// app.Add(cmd.ColorCommand())\n\tapp.Add(builtin.GenShAutoComplete())\n\t// fmt.Printf(\"%+v\\n\", cliapp.CommandNames())\n\tapp.Run()\n}", "func main() {\n\terr := app.Execute()\n\tif err != nil {\n\t\tlog.WithError(err).Error(\"Error while Execute lookatch\")\n\t}\n}", "func main() {\n\t// test mode\n\tif len(os.Args) >= 2 && os.Args[1] == \"test\" {\n\t\ttestCmdSet.Parse(os.Args[2:])\n\t\tif path := testCmdSet.Arg(0); path != \"\" {\n\t\t\texitCode := runTest(path)\n\t\t\tos.Exit(exitCode)\n\t\t}\n\t}\n\n\t// normal mode\n\tflag.Parse()\n\n\t// show version\n\tif *version {\n\t\tshowVersion()\n\t\tos.Exit(0)\n\t}\n\n\tsrc := \"\"\n\tif *jargon {\n\t\tsrc = readJargon()\n\t}\n\n\t// run one-liner\n\tif *oneLiner != \"\" {\n\t\tsrc += wrapSource(*oneLiner, *readsLines, *readsAndWritesLines)\n\t\texitCode := run(src, object.StrFileName)\n\t\tos.Exit(exitCode)\n\t}\n\n\tif srcFileName := flag.Arg(0); srcFileName != \"\" {\n\t\tfileSrc, exitCode := runscript.ReadFile(srcFileName)\n\t\tif exitCode != 0 {\n\t\t\tos.Exit(exitCode)\n\t\t}\n\t\tsrc += fileSrc\n\n\t\texitCode = run(src, srcFileName)\n\t\tos.Exit(exitCode)\n\t}\n\n\trunRepl(src)\n}", "func Exec(name string, arg ...string) error {\n\tcmd := exec.Command(name, arg...)\n\tif len(cmdExePath) != 0 {\n\t\tcmd.Dir = cmdExePath\n\t\tcmdExePath = \"\"\n\t}\n\toutput, err := cmd.CombinedOutput()\n\tif err != nil {\n\t\tlog.Fatal(\"error: \", string(output), err)\n\t\treturn err\n\t}\n\n\treturn nil\n}", "func main() {\n\ta := createHelp()\n\terr := a.Run(os.Args)\n\tif err != nil {\n\t\tfmt.Fprintln(os.Stderr, err)\n\t\tos.Exit(1)\n\t}\n}", "func main() {\n\tfmt.Println(\"=======================================\")\n\tfmt.Println(\"This is a TeamSpeak3 plugin, do not run this as a CLI application!\")\n\tfmt.Println(\"Args were: \", os.Args)\n\tfmt.Println(\"=======================================\")\n}", "func main() {\n\tprocess_command_line()\n}", "func Console(args ...string) {\n cfg.StartCmd = \"/bin/bash -c\"\n cfg.QuotedOpts = \"'\" + cfg.Console + \"'\"\n runInteractive(\"run\", settingsToParams(0, false)...)\n}", "func main() {\n\tvar options core.Options\n\n\tespressoCmd := &cobra.Command{\n\t\tUse: \"espresso\",\n\t\tRunE: func(cmd *cobra.Command, args []string) error {\n\t\t\treturn cmd.Help()\n\t\t},\n\t}\n\n\tbuildCmd := &cobra.Command{\n\t\tUse: \"build <PATH>\",\n\t\tArgs: cobra.ExactArgs(1),\n\t\tRunE: func(cmd *cobra.Command, args []string) error {\n\t\t\tbuildPath := args[0]\n\t\t\tvar s config.Site\n\n\t\t\tif err := config.FromFile(buildPath, settingsFile, &s); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\treturn core.RunBuild(buildPath, &s, &options)\n\t\t},\n\t}\n\n\tbuildCmd.Flags().StringVarP(&options.OutputDir, \"output-dir\", \"o\", \"\", `Path to the target directory`)\n\tbuildCmd.Flags().BoolVar(&options.RenderRSS, \"render-rss\", false, `Render an Atom RSS feed`)\n\n\tversionCmd := &cobra.Command{\n\t\tUse: \"version\",\n\t\tRunE: func(cmd *cobra.Command, args []string) error {\n\t\t\tfmt.Printf(\"Espresso %s\\n\", version)\n\t\t\treturn nil\n\t\t},\n\t}\n\n\tespressoCmd.AddCommand(buildCmd)\n\tespressoCmd.AddCommand(versionCmd)\n\n\tif err := espressoCmd.Execute(); err != nil {\n\t\tlog.Fatal(err)\n\t}\n}", "func what(s selection, args []string) {\n\tfmt.Println(runWithStdin(s.archive(), \"guru\", \"-modified\", \"what\", s.pos()))\n}", "func Exec(client *Client, args []string, timeoutSecs int) (*pb.ExecResult, error) {\n\tctx, cancel := context.WithTimeout(context.Background(), time.Duration(timeoutSecs)*time.Second)\n\tdefer cancel()\n\n\trequest := &pb.ExecRequest{\n\t\tExecutable: args[0],\n\t\tArgs: args[1:],\n\t}\n\n\treturn client.Exec(ctx, request)\n}", "func Exec(command string, args ...string) (string, error) {\n\tLogger.DebugC(color.Yellow, \"$ %v %v\", command, strings.Join(args, \" \"))\n\tb, err := exec.Command(command, args...).CombinedOutput()\n\tLogger.Debug(\"%s\\n\", b)\n\treturn string(b), err\n}", "func main() {\n\tflag.Parse()\n\tvar ex execer.Execer\n\tswitch *execerType {\n\tcase \"sim\":\n\t\tex = execers.NewSimExecer()\n\tcase \"os\":\n\t\tex = os_exec.NewExecer()\n\tdefault:\n\t\tlog.Fatalf(\"Unknown execer type %v\", *execerType)\n\t}\n\n\ttempDir, err := temp.TempDirDefault()\n\tif err != nil {\n\t\tlog.Fatal(\"error creating temp dir: \", err)\n\t}\n\t//defer os.RemoveAll(tempDir.Dir) //TODO: this may become necessary if we start testing with larger snapshots.\n\n\ttmp, err := temp.NewTempDir(\"\", \"daemon\")\n\tif err != nil {\n\t\tlog.Fatal(\"Cannot create tmp dir: \", err)\n\t}\n\n\toutputCreator, err := runners.NewHttpOutputCreator(tempDir, \"\")\n\tif err != nil {\n\t\tlog.Fatal(\"Cannot create OutputCreator: \", err)\n\t}\n\tfiler := snapshots.MakeTempFiler(tempDir)\n\tr := runners.NewQueueRunner(ex, filer, outputCreator, tmp, *qLen)\n\th := server.NewHandler(r, filer, 50*time.Millisecond)\n\ts, err := server.NewServer(h)\n\tif err != nil {\n\t\tlog.Fatal(\"Cannot create Scoot server: \", err)\n\t}\n\terr = s.ListenAndServe()\n\tif err != nil {\n\t\tlog.Fatal(\"Error serving Scoot Daemon: \", err)\n\t}\n}", "func Exec(name string, args ...string) (output []byte, err error) {\n\treturn exec.Command(name, args...).Output()\n}", "func Exec(t testing.TB, cmd *cobra.Command, stdIn io.Reader, args ...string) (string, string, error) {\n\tctx, cancel := context.WithCancel(context.Background())\n\tt.Cleanup(cancel)\n\n\treturn ExecCtx(ctx, cmd, stdIn, args...)\n}", "func main() {\n\tcli.CommandLineInterface()\n}", "func executeShell(ctx context.Context, context ActionExecutionContext) error {\n\t//log.Printf(\"Exec: %s\", context.Action.Shell)\n\t//cmdAndArgs := strings.Split(s.Shell, \" \")\n\t//cmd := cmdAndArgs[0]\n\t//args := cmdAndArgs[1:]\n\tshuttlePath, _ := filepath.Abs(filepath.Dir(os.Args[0]))\n\n\tcmdOptions := go_cmd.Options{\n\t\tBuffered: false,\n\t\tStreaming: true,\n\t}\n\n\texecCmd := go_cmd.NewCmdOptions(cmdOptions, \"sh\", \"-c\", \"cd '\"+context.ScriptContext.Project.ProjectPath+\"'; \"+context.Action.Shell)\n\n\t//execCmd := exec.Command(\"sh\", \"-c\", context.Action.Shell)\n\texecCmd.Env = os.Environ()\n\tfor name, value := range context.ScriptContext.Args {\n\t\texecCmd.Env = append(execCmd.Env, fmt.Sprintf(\"%s=%s\", name, value))\n\t}\n\texecCmd.Env = append(execCmd.Env, fmt.Sprintf(\"plan=%s\", context.ScriptContext.Project.LocalPlanPath))\n\texecCmd.Env = append(execCmd.Env, fmt.Sprintf(\"tmp=%s\", context.ScriptContext.Project.TempDirectoryPath))\n\texecCmd.Env = append(execCmd.Env, fmt.Sprintf(\"project=%s\", context.ScriptContext.Project.ProjectPath))\n\t// TODO: Add project path as a shuttle specific ENV\n\texecCmd.Env = append(execCmd.Env, fmt.Sprintf(\"PATH=%s\", shuttlePath+string(os.PathListSeparator)+os.Getenv(\"PATH\")))\n\texecCmd.Env = append(execCmd.Env, fmt.Sprintf(\"SHUTTLE_PLANS_ALREADY_VALIDATED=%s\", context.ScriptContext.Project.LocalPlanPath))\n\n\tdoneChan := make(chan struct{})\n\tgo func() {\n\t\tdefer close(doneChan)\n\t\tfor execCmd.Stdout != nil || execCmd.Stderr != nil {\n\t\t\tselect {\n\t\t\tcase line, open := <-execCmd.Stdout:\n\t\t\t\tif !open {\n\t\t\t\t\texecCmd.Stdout = nil\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\tcontext.ScriptContext.Project.UI.Infoln(\"%s\", line)\n\t\t\tcase line, open := <-execCmd.Stderr:\n\t\t\t\tif !open {\n\t\t\t\t\texecCmd.Stderr = nil\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\tcontext.ScriptContext.Project.UI.Errorln(\"%s\", line)\n\t\t\t}\n\t\t}\n\t}()\n\n\t// Run and wait for Cmd to return, discard Status\n\tcontext.ScriptContext.Project.UI.Titleln(\"shell: %s\", context.Action.Shell)\n\n\t// stop cmd if context is cancelled\n\tgo func() {\n\t\tselect {\n\t\tcase <-ctx.Done():\n\t\t\terr := execCmd.Stop()\n\t\t\tif err != nil {\n\t\t\t\tcontext.ScriptContext.Project.UI.Errorln(\"Failed to stop script '%s': %v\", context.Action.Shell, err)\n\t\t\t}\n\t\tcase <-doneChan:\n\t\t}\n\t}()\n\n\tselect {\n\tcase status := <-execCmd.Start():\n\t\t<-doneChan\n\t\tif status.Exit > 0 {\n\t\t\treturn errors.NewExitCode(4, \"Failed executing script `%s`: shell script `%s`\\nExit code: %v\", context.ScriptContext.ScriptName, context.Action.Shell, status.Exit)\n\t\t}\n\t\treturn nil\n\tcase <-ctx.Done():\n\t\treturn ctx.Err()\n\t}\n}", "func Shell(t *testing.T, name string, arg ...string) {\n\tt.Helper()\n\n\tbin, err := exec.LookPath(name)\n\tif err != nil {\n\t\tt.Skipf(\"skipping, binary %q not found: %v\", name, err)\n\t}\n\n\tt.Logf(\"$ %s %v\", bin, arg)\n\n\tcmd := exec.Command(bin, arg...)\n\tif err := cmd.Start(); err != nil {\n\t\tt.Fatalf(\"failed to start command %q: %v\", name, err)\n\t}\n\n\tif err := cmd.Wait(); err != nil {\n\t\t// Shell operations in these tests require elevated privileges.\n\t\tif cmd.ProcessState.ExitCode() == 1 /* unix.EPERM */ {\n\t\t\tt.Skipf(\"skipping, permission denied: %v\", err)\n\t\t}\n\n\t\tt.Fatalf(\"failed to wait for command %q: %v\", name, err)\n\t}\n}", "func main() {\n\t// os.Args provides access to raw command-line arguments. Note that the first value in this\n\t// slice is the path to the program, and os.Args[1:] holds the arguments to the program.\n\targsWithProg := os.Args\n\targsWithoutProg := os.Args[1:]\n\n\t// You can get individual args with normal indexing.\n\targ := os.Args[3]\n\n\tfmt.Println(argsWithProg)\n\tfmt.Println(argsWithoutProg)\n\tfmt.Println(arg)\n}", "func main() {\n\tapp := &cli.App{\n\t\tName: \"RULEX, a lightweight iot data rule gateway\",\n\t\tUsage: \"http://rulex.ezlinker.cn\",\n\t\tCommands: []*cli.Command{\n\t\t\t{\n\t\t\t\tName: \"run\",\n\t\t\t\tUsage: \"rulex run [path of 'rulex.db']\",\n\t\t\t\tAction: func(c *cli.Context) error {\n\t\t\t\t\tutils.ShowBanner()\n\t\t\t\t\tif c.Args().Len() > 0 {\n\t\t\t\t\t\tlog.Info(\"Use config db:\", c.Args().Get(0))\n\t\t\t\t\t\tengine.RunRulex(c.Args().Get(0))\n\t\t\t\t\t} else {\n\t\t\t\t\t\tengine.RunRulex(\"rulex.db\")\n\t\t\t\t\t}\n\t\t\t\t\tlog.Debug(\"Run rulex successfully.\")\n\t\t\t\t\treturn nil\n\t\t\t\t},\n\t\t\t},\n\t\t\t// version\n\t\t\t{\n\t\t\t\tName: \"version\",\n\t\t\t\tUsage: \"rulex version\",\n\t\t\t\tAction: func(c *cli.Context) error {\n\t\t\t\t\tfmt.Println(\"Current Version is: \" + typex.DefaultVersion.Version)\n\t\t\t\t\treturn nil\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n\n\terr := app.Run(os.Args)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n}", "func (self *Build) exec(moduleLabel core.Label, fileType core.FileType) error {\n\tthread := createThread(self, moduleLabel, fileType)\n\n\tsourceData, err := self.sourceFileReader(moduleLabel)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"failed to execute %v: read failed: %v\", moduleLabel, err)\n\t}\n\n\t_, err = starlark.ExecFile(thread, moduleLabel.String(), sourceData,\n\t\tbuiltins.InitialGlobals(fileType))\n\treturn err\n}", "func Main() {\n\n\tcheckSupportArch()\n\n\tif len(os.Args) > 1 {\n\t\tcmd := os.Args[1]\n\t\tfmt.Println(cmd)\n\t}\n\n\tstartEtcdOrProxyV2()\n}", "func (flower *Flower) Exec(commandName string, capture bool, args []string) (string, error) {\n\tflowerCommandData, err := flower.GetFlowerCommandData(commandName)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tvar command []string\n\tif flowerCommandData.Workdir != \"\" {\n\t\tcommand = append([]string{\"cd\", flowerCommandData.Workdir, \"&&\"})\n\t}\n\n\tcommand = append([]string{flowerCommandData.Bin})\n\tfor _, arg := range args {\n\t\tcommand = append([]string{arg})\n\t}\n\n\tvar dockerExecOptions *DockerExecOptions\n\tswitch flowerCommandData.DockerExecOptions {\n\tcase nil:\n\t\tdockerExecOptions = flowerCommandData.DockerExecOptions\n\tdefault:\n\t\tdockerExecOptions = &DockerExecOptions{}\n\t}\n\n\treturn flower.Container.Exec(command, dockerExecOptions, capture)\n}", "func Execute(ver string) {\n\tVERSION = version{\n\t\tVersion: ver,\n\t\tRedmineAPIVersion: \"3.3+\",\n\t}\n\n\tRClient = &client.Client{\n\t\tHTTPClient: &http.Client{},\n\t\tUserAgent: fmt.Sprintf(\"arcli/v%s\", VERSION.Version),\n\t}\n\n\tif err := rootCmd.Execute(); err != nil {\n\t\tfmt.Println(err)\n\t\tos.Exit(1)\n\t}\n}", "func (c *Tool) Exec() ([]byte, error) {\n\treturn c.Run()\n}", "func ExecLine(cmdLine string, workDir ...string) (string, error) {\n\tp := cmdline.NewParser(cmdLine)\n\n\t// create a new Cmd instance\n\tcmd := p.NewExecCmd()\n\tif len(workDir) > 0 {\n\t\tcmd.Dir = workDir[0]\n\t}\n\n\tbs, err := cmd.Output()\n\treturn string(bs), err\n}", "func sysExec(args ...OBJ) OBJ {\n\tif len(args) < 1 {\n\t\treturn NewError(\"`sys.exec` wanted string, got invalid argument\")\n\t}\n\n\tvar command string\n\tswitch c := args[0].(type) {\n\tcase *object.String:\n\t\tcommand = c.Value\n\tdefault:\n\t\treturn NewError(\"`sys.exec` wanted string, got invalid argument\")\n\t}\n\n\tif len(command) < 1 {\n\t\treturn NewError(\"`sys.exec` expected string, got invalid argument\")\n\t}\n\t// split the command\n\ttoExec := splitCommand(command)\n\tcmd := exec.Command(toExec[0], toExec[1:]...)\n\n\t// get the result\n\tvar outb, errb bytes.Buffer\n\tcmd.Stdout = &outb\n\tcmd.Stderr = &errb\n\terr := cmd.Run()\n\n\t// If the command exits with a non-zero exit-code it\n\t// is regarded as a failure. Here we test for ExitError\n\t// to regard that as a non-failure.\n\tif err != nil && err != err.(*exec.ExitError) {\n\t\tfmt.Printf(\"Failed to run '%s' -> %s\\n\", command, err.Error())\n\t\treturn &object.Error{Message: \"Failed to run command!\"}\n\t}\n\n\t// The result-objects to store in our hash.\n\tstdout := &object.String{Value: outb.String()}\n\tstderr := &object.String{Value: errb.String()}\n\n\treturn NewHash(StringObjectMap{\n\t\t\"stdout\": stdout,\n\t\t\"stderr\": stderr,\n\t})\n}", "func (o *CreateQuickstartOptions) Run() error {\n\tinstallOpts := InstallOptions{\n\t\tCommonOptions: CommonOptions{\n\t\t\tFactory: o.Factory,\n\t\t\tOut: o.Out,\n\t\t},\n\t}\n\tuserAuth, err := installOpts.getGitUser(\"git username to create the quickstart\")\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tauthConfigSvc, err := o.Factory.CreateGitAuthConfigService()\n\tif err != nil {\n\t\treturn err\n\t}\n\tvar server *auth.AuthServer\n\tconfig := authConfigSvc.Config()\n\tif o.GitHub {\n\t\tserver = config.GetOrCreateServer(gits.GitHubHost)\n\t} else {\n\t\tif o.GitHost != \"\" {\n\t\t\tserver = config.GetOrCreateServer(o.GitHost)\n\t\t} else {\n\t\t\tserver, err = config.PickServer(\"Pick the git server to search for repositories\")\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t}\n\tif server == nil {\n\t\treturn fmt.Errorf(\"no git server provided\")\n\t}\n\n\to.GitProvider, err = gits.CreateProvider(server, userAuth)\n\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tmodel, err := o.LoadQuickstarts()\n\tif err != nil {\n\t\treturn fmt.Errorf(\"failed to load quickstarts: %s\", err)\n\t}\n\tq, err := model.CreateSurvey(&o.Filter)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif q == nil {\n\t\treturn fmt.Errorf(\"no quickstart chosen\")\n\t}\n\n\tdir := o.OutDir\n\tif dir == \"\" {\n\t\tdir, err = os.Getwd()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\tgenDir, err := o.createQuickstart(q, dir)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\to.Printf(\"Created project at %s\\n\\n\", util.ColorInfo(genDir))\n\n\to.CreateProjectOptions.ImportOptions.GitProvider = o.GitProvider\n\to.Organisation = userAuth.Username\n\treturn o.ImportCreatedProject(genDir)\n}", "func execute_plugin(filename string, input []byte) []byte {\n cmd := filename\n arg := string(input)\n out, err := exec.Command(cmd, arg).Output()\n if err != nil {\n println(err.Error())\n return nil\n }\n return out\n}", "func Action(c *cli.Context) {\n\texec := c.String(\"exec\")\n\tfmt.Printf(\"Action: %v\\n\", exec)\n}", "func (exec *Executhor) Exec(execArg []string) {\n\tif exec.execBuiltin(execArg) == nil {\n\t\treturn\n\t}\n\tpath, err := exec.getBinaryPath(execArg[0])\n\tif err == nil {\n\t\tpid := C.fork()\n\t\tif pid != 0 {\n\t\t\tvar status C.int\n\t\t\tC.wait(&status)\n\t\t} else {\n\t\t\tsyscall.Exec(path, execArg, exec.env.GetEnv())\n\t\t}\n\t} else {\n\t\tfmt.Println(err)\n\t}\n}", "func SystemExec(command string, args []string) *BuildahTestSession {\n\tfmt.Printf(\"Running: %s %s\\n\", command, strings.Join(args, \" \"))\n\tc := exec.Command(command, args...)\n\tsession, err := gexec.Start(c, GinkgoWriter, GinkgoWriter)\n\tif err != nil {\n\t\tFail(fmt.Sprintf(\"unable to run command: %s %s\", command, strings.Join(args, \" \")))\n\t}\n\treturn &BuildahTestSession{session}\n}", "func (e *Execute) Execute(args []string) error {\n\tfmt.Println(\"args: \", args)\n\tif len(args) <= 0 {\n\t\treturn fmt.Errorf(\"no args passed to echo\")\n\t}\n\n\tcli := client.NewClient(e.ClientOpts)\n\terr := cli.Init()\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer cli.Close()\n\n\tresp, err := cli.Execute(request.Request{Query: string(args[0])})\n\tfmt.Println(\"ERROR: \", err, \" RESP: \", resp)\n\n\treturn nil\n}", "func Exec() error {\n\treturn scoreboardCmd.Execute()\n}", "func main() {\n\tcmd := parseCmd()\n\tif cmd.versionFlag {\n\t\tfmt.Println(\"hyf-jvm-version 0.0.1\")\n\t} else if cmd.helpFlag || cmd.class == \"\" {\n\t\tprintUsage()\n\t} else {\n\t\tstartJVM(cmd)\n\t}\n\t//entry := &Mylist{entry:make([]string,10)}\n\t//findAllFile(\"./\",entry)\n\t//fmt.Println(entry.entry)\n\t//fmt.Println(len(entry.entry))\n\n}", "func (c *actionTests) actionExec(t *testing.T) {\n\te2e.EnsureImage(t, c.env)\n\n\tuser := e2e.CurrentUser(t)\n\n\t// Create a temp testfile\n\ttmpfile, err := ioutil.TempFile(\"\", \"testSingularityExec.tmp\")\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tdefer os.Remove(tmpfile.Name()) // clean up\n\n\ttestfile, err := tmpfile.Stat()\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tpwd, err := os.Getwd()\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\ttests := []struct {\n\t\tname string\n\t\targv []string\n\t\texit int\n\t}{\n\t\t{\n\t\t\tname: \"NoCommand\",\n\t\t\targv: []string{c.env.ImagePath},\n\t\t\texit: 1,\n\t\t},\n\t\t{\n\t\t\tname: \"True\",\n\t\t\targv: []string{c.env.ImagePath, \"true\"},\n\t\t\texit: 0,\n\t\t},\n\t\t{\n\t\t\tname: \"TrueAbsPAth\",\n\t\t\targv: []string{c.env.ImagePath, \"/bin/true\"},\n\t\t\texit: 0,\n\t\t},\n\t\t{\n\t\t\tname: \"False\",\n\t\t\targv: []string{c.env.ImagePath, \"false\"},\n\t\t\texit: 1,\n\t\t},\n\t\t{\n\t\t\tname: \"FalseAbsPath\",\n\t\t\targv: []string{c.env.ImagePath, \"/bin/false\"},\n\t\t\texit: 1,\n\t\t},\n\t\t// Scif apps tests\n\t\t{\n\t\t\tname: \"ScifTestAppGood\",\n\t\t\targv: []string{\"--app\", \"testapp\", c.env.ImagePath, \"testapp.sh\"},\n\t\t\texit: 0,\n\t\t},\n\t\t{\n\t\t\tname: \"ScifTestAppBad\",\n\t\t\targv: []string{\"--app\", \"fakeapp\", c.env.ImagePath, \"testapp.sh\"},\n\t\t\texit: 1,\n\t\t},\n\t\t{\n\t\t\tname: \"ScifTestfolderOrg\",\n\t\t\targv: []string{c.env.ImagePath, \"test\", \"-d\", \"/scif\"},\n\t\t\texit: 0,\n\t\t},\n\t\t{\n\t\t\tname: \"ScifTestfolderOrg\",\n\t\t\targv: []string{c.env.ImagePath, \"test\", \"-d\", \"/scif/apps\"},\n\t\t\texit: 0,\n\t\t},\n\t\t{\n\t\t\tname: \"ScifTestfolderOrg\",\n\t\t\targv: []string{c.env.ImagePath, \"test\", \"-d\", \"/scif/data\"},\n\t\t\texit: 0,\n\t\t},\n\t\t{\n\t\t\tname: \"ScifTestfolderOrg\",\n\t\t\targv: []string{c.env.ImagePath, \"test\", \"-d\", \"/scif/apps/foo\"},\n\t\t\texit: 0,\n\t\t},\n\t\t{\n\t\t\tname: \"ScifTestfolderOrg\",\n\t\t\targv: []string{c.env.ImagePath, \"test\", \"-d\", \"/scif/apps/bar\"},\n\t\t\texit: 0,\n\t\t},\n\t\t// blocked by issue [scif-apps] Files created at install step fall into an unexpected path #2404\n\t\t{\n\t\t\tname: \"ScifTestfolderOrg\",\n\t\t\targv: []string{c.env.ImagePath, \"test\", \"-f\", \"/scif/apps/foo/filefoo.exec\"},\n\t\t\texit: 0,\n\t\t},\n\t\t{\n\t\t\tname: \"ScifTestfolderOrg\",\n\t\t\targv: []string{c.env.ImagePath, \"test\", \"-f\", \"/scif/apps/bar/filebar.exec\"},\n\t\t\texit: 0,\n\t\t},\n\t\t{\n\t\t\tname: \"ScifTestfolderOrg\",\n\t\t\targv: []string{c.env.ImagePath, \"test\", \"-d\", \"/scif/data/foo/output\"},\n\t\t\texit: 0,\n\t\t},\n\t\t{\n\t\t\tname: \"ScifTestfolderOrg\",\n\t\t\targv: []string{c.env.ImagePath, \"test\", \"-d\", \"/scif/data/foo/input\"},\n\t\t\texit: 0,\n\t\t},\n\t\t{\n\t\t\tname: \"WorkdirContain\",\n\t\t\targv: []string{\"--contain\", c.env.ImagePath, \"test\", \"-f\", tmpfile.Name()},\n\t\t\texit: 1,\n\t\t},\n\t\t{\n\t\t\tname: \"Workdir\",\n\t\t\targv: []string{\"--workdir\", \"testdata\", c.env.ImagePath, \"test\", \"-f\", tmpfile.Name()},\n\t\t\texit: 0,\n\t\t},\n\t\t{\n\t\t\tname: \"PwdGood\",\n\t\t\targv: []string{\"--pwd\", \"/etc\", c.env.ImagePath, \"true\"},\n\t\t\texit: 0,\n\t\t},\n\t\t{\n\t\t\tname: \"Home\",\n\t\t\targv: []string{\"--home\", pwd + \"testdata\", c.env.ImagePath, \"test\", \"-f\", tmpfile.Name()},\n\t\t\texit: 0,\n\t\t},\n\t\t{\n\t\t\tname: \"HomePath\",\n\t\t\targv: []string{\"--home\", \"/tmp:/home\", c.env.ImagePath, \"test\", \"-f\", \"/home/\" + testfile.Name()},\n\t\t\texit: 0,\n\t\t},\n\t\t{\n\t\t\tname: \"HomeTmp\",\n\t\t\targv: []string{\"--home\", \"/tmp\", c.env.ImagePath, \"true\"},\n\t\t\texit: 0,\n\t\t},\n\t\t{\n\t\t\tname: \"HomeTmpExplicit\",\n\t\t\targv: []string{\"--home\", \"/tmp:/home\", c.env.ImagePath, \"true\"},\n\t\t\texit: 0,\n\t\t},\n\t\t{\n\t\t\tname: \"UserBind\",\n\t\t\targv: []string{\"--bind\", \"/tmp:/var/tmp\", c.env.ImagePath, \"test\", \"-f\", \"/var/tmp/\" + testfile.Name()},\n\t\t\texit: 0,\n\t\t},\n\t\t{\n\t\t\tname: \"NoHome\",\n\t\t\targv: []string{\"--no-home\", c.env.ImagePath, \"ls\", \"-ld\", user.Dir},\n\t\t\texit: 2,\n\t\t},\n\t}\n\n\tfor _, tt := range tests {\n\t\tc.env.RunSingularity(\n\t\t\tt,\n\t\t\te2e.AsSubtest(tt.name),\n\t\t\te2e.WithProfile(e2e.UserProfile),\n\t\t\te2e.WithCommand(\"exec\"),\n\t\t\te2e.WithDir(\"/tmp\"),\n\t\t\te2e.WithArgs(tt.argv...),\n\t\t\te2e.ExpectExit(tt.exit),\n\t\t)\n\t}\n}", "func Exec(config *ssh.ClientConfig, addr string, workDir string, cmd string, nixConf string) (bytes.Buffer, error) {\n\tvar b bytes.Buffer // import \"bytes\"\n\n\t// Connect\n\tclient, err := ssh.Dial(\"tcp\", net.JoinHostPort(addr, \"22\"), config)\n\tif err != nil {\n\t\treturn b, err\n\t}\n\t// Create a session. It is one session per command.\n\tsession, err := client.NewSession()\n\tif err != nil {\n\t\treturn b, err\n\t}\n\tdefer session.Close()\n\n\tsession.Stderr = os.Stderr // get output\n\tsession.Stdout = &b // get output\n\t// you can also pass what gets input to the stdin, allowing you to pipe\n\t// content from client to server\n\t// session.Stdin = bytes.NewBufferString(\"My input\")\n\n\t// Finally, run the command\n\tfullCmd := \". ~/.nix-profile/etc/profile.d/nix.sh && cd \" + workDir + \" && nix-shell \" + nixConf + \" --command '\" + cmd + \"'\"\n\tfmt.Println(fullCmd)\n\terr = session.Run(fullCmd)\n\treturn b, err\n}", "func executeLaunch() {\n\tfmt.Println(\"Launching ...\")\n}", "func (h Client) Exec(arg ...string) (string, string, error) {\n\tcmd := exec.Command(h.HelmExecutable, arg...)\n\n\tklog.V(8).Infof(\"running helm command: %v\", cmd)\n\n\tvar stdoutBuf, stderrBuf bytes.Buffer\n\n\tcmd.Stdout = &stdoutBuf\n\tcmd.Stderr = &stderrBuf\n\n\terr := cmd.Run()\n\toutStr, errStr := stdoutBuf.String(), stderrBuf.String()\n\tif err != nil {\n\t\tklog.V(8).Infof(\"stdout: %s\", outStr)\n\t\tklog.V(7).Infof(\"stderr: %s\", errStr)\n\t\treturn \"\", errStr, fmt.Errorf(\"exit code %d running command %s\", cmd.ProcessState.ExitCode(), cmd.String())\n\t}\n\n\treturn outStr, errStr, nil\n}", "func main() {\n\tcmd.Run()\n}", "func main() {\n\tif err := rootCmd.Execute(); err != nil {\n\t\tfmt.Println(err)\n\t\tos.Exit(1)\n\t}\n}", "func main() {\n\texecuteReadFile()\n\tfmt.Println(\"Nunca me ejecutare\")\n}", "func (t *Test) Exec() (err error) {\n\ts, e, err := Exec(t.Command)\n\tif err != nil {\n\t\tt.Result.Error(err)\n\t\treturn err\n\t}\n\tt.stdOut = s\n\tt.stdErr = e\n\tt.Result.Success()\n\treturn nil\n}", "func printHint() {\n\tprint(\"orbi - Embeddable Interactive ORuby Shell\\n\\n\")\n}", "func Exec(name string, arg ...string) error {\n\tcmd := exec.Command(name, arg...)\n\tvar outBuffer = new(bytes.Buffer)\n\tvar errBuffer = new(bytes.Buffer)\n\tif viper.GetBool(\"verbose\") {\n\t\t_, err := fmt.Fprintf(os.Stdout, \"Executing command: %s\\n\", strings.Join(append([]string{name}, arg...), \" \"))\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tcmd.Stdout = os.Stdout\n\t\tcmd.Stderr = os.Stdout\n\t} else {\n\t\tcmd.Stdout = outBuffer\n\t\tcmd.Stderr = errBuffer\n\t}\n\tif err := cmd.Run(); err != nil {\n\t\tlines := strings.Split(outBuffer.String(), \"\\n\")\n\t\tfiltered := []string{errBuffer.String()}\n\t\tfor _, x := range lines {\n\t\t\tif strings.HasPrefix(x, \"fatal:\") {\n\t\t\t\tfiltered = append(filtered, x)\n\t\t\t}\n\t\t}\n\n\t\treturn fmt.Errorf(strings.Join(filtered, \"\\n\"))\n\t}\n\treturn nil\n}", "func execSynopsis(_ int, p *gop.Context) {\n\targs := p.GetArgs(1)\n\tret := doc.Synopsis(args[0].(string))\n\tp.Ret(1, ret)\n}", "func (n *mockAgent) exec(sandbox *Sandbox, c Container, cmd types.Cmd) (*Process, error) {\n\treturn nil, nil\n}", "func main() {\n\tflag.Parse()\n\tif err := echoargs(!*n, *s, flag.Args()); err != nil {\n\t\tfmt.Fprintf(os.Stderr, \"echo %v\\n\", err)\n\t\tos.Exit(1)\n\t}\n}", "func newSimpleExec(code int, err error) simpleExec {\n\treturn simpleExec{code: code, err: err}\n}", "func Execute() {\n\n\tif len(flag.Args()) == 0 {\n\t\tshowHelp(nil)\n\t\tos.Exit(1)\n\t}\n\n\t// Check if the first argument is a native command\n\tfor _, nc := range nativeCmds {\n\t\tif nc.ID == flag.Arg(0) {\n\t\t\tnc.Cmd(flag.Args()[1:]...)\n\t\t\tos.Exit(0)\n\t\t}\n\t}\n\tfor _, a := range flag.Args() {\n\t\tartifact.Call(a)\n\t}\n}", "func Exec(exe string, args ...string) (outStr string, err error) {\n\tvar (\n\t\tcmd *exec.Cmd\n\t\tout []byte\n\t)\n\n\tif exe == \"docker-compose\" {\n\t\targs = append(dockerComposeDefaultArgs(), args...)\n\t}\n\n\tcmd = exec.Command(exe, args...)\n\tcmd.Env = os.Environ()\n\tcmd.Stdin = os.Stdin\n\n\tout, err = cmd.CombinedOutput()\n\toutStr = strings.TrimSpace(string(out))\n\treturn\n}", "func (x *InstallCommand) Exec(args []string) error {\n\terr := fmt.Errorf(\"Sample warning: Instance not found\")\n\tclis.WarnOn(\"Install, Exec\", err)\n\t// or,\n\t// clis.AbortOn(\"Doing Install\", err)\n\treturn nil\n}", "func main() {\n\n\tflag.Parse()\n\tif *arguments.help || *arguments.url == \"\" || *arguments.querySelector == \"\" {\n\t\tflag.Usage()\n\t\tos.Exit(0)\n\t}\n\tvar err error\n\n\t// create context\n\t// > go - How to use Chrome headless with chromedp? - Stack Overflow\n\t// > https://stackoverflow.com/questions/44067030/how-to-use-chrome-headless-with-chromedp\n\t// > How to run chromedp on foreground? · Issue #495 · chromedp/chromedp\n\t// > https://github.com/chromedp/chromedp/issues/495\n\tctxt, cancel := chromedp.NewExecAllocator(context.Background(), append(\n\t\tchromedp.DefaultExecAllocatorOptions[:],\n\t\tchromedp.Flag(\"headless\", !*arguments.noHeadless),\n\t\tchromedp.Flag(\"disable-gpu\", true),\n\t\tchromedp.Flag(\"no-first-run\", true),\n\t\tchromedp.Flag(\"no-default-browser-check\", true),\n\t)...,\n\t)\n\tdefer cancel()\n\tloggingContextOption := chromedp.WithLogf(log.Printf)\n\tif *arguments.debug {\n\t\t// debug log mode\n\t\tloggingContextOption = chromedp.WithDebugf(log.Printf)\n\t}\n\tctxt, cancel = chromedp.NewContext(ctxt, loggingContextOption)\n\tdefer cancel()\n\t// handle kill signal\n\tsignals := make(chan os.Signal, 1)\n\tsignal.Notify(signals, os.Kill, os.Interrupt)\n\tgo func() {\n\t\t<-signals\n\t\tcancel()\n\t\tos.Exit(0)\n\t}()\n\n\t// run task list\n\tvar res string\n\terr = chromedp.Run(ctxt, createTasks(*arguments.url, *arguments.querySelector, &res))\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\tlog.Printf(\"\\n\\nresult: \\n%s\\n\\n\\n\", res)\n}", "func combCli(cmd string, args ...string) ([]byte, error) {\n\targs = append([]string{cmd}, args...)\n\tfmt.Printf(\"args=%s\\n\", args)\n\treturn exec.Command(\"./comb\", args...).Output()\n}", "func (c *Cmd) Exec(args []string) error {\n\tc.flag.Uint64Var(&version, \"version\", 0, \"\")\n\tc.flag.Parse(args)\n\n\t// Load config\n\tif driver != nil {\n\t\tconfig = &core.Config{\n\t\t\tData: make(map[string]core.Internal),\n\t\t}\n\t\tconfig.Data[\"\"] = core.Internal{\n\t\t\tDriver: *driver,\n\t\t\tDsn: *dsn,\n\t\t\tDirectory: *directory,\n\t\t}\n\t} else {\n\t\tconfig = core.MustNewConfig(*dirPath).WithEnv(*env)\n\t}\n\n\treturn c.Run(c, c.flag.Args()...)\n}", "func main() {\n\tbasedir := flag.String(\"basedir\", \"/tmp\", \"basedir of tmp C binary\")\n\tinput := flag.String(\"input\", \"<input>\", \"test case input\")\n\texpected := flag.String(\"expected\", \"<expected>\", \"test case expected\")\n\ttimeout := flag.String(\"timeout\", \"2000\", \"timeout in milliseconds\")\n\tmemory := flag.String(\"memory\", \"256\", \"memory limitation in MB\")\n\tflag.Parse()\n\n\tresult, u := new(model.Result), uuid.NewV4()\n\tif err := sandbox.InitCGroup(strconv.Itoa(os.Getpid()), u.String(), *memory); err != nil {\n\t\tresult, _ := json.Marshal(result.GetRuntimeErrorTaskResult())\n\t\t_, _ = os.Stdout.Write(result)\n\t\tos.Exit(0)\n\t}\n\n\tcmd := reexec.Command(\"justiceInit\", *basedir, *input, *expected, *timeout, *memory)\n\tcmd.Stdin = os.Stdin\n\tcmd.Stdout = os.Stdout\n\tcmd.Stderr = os.Stderr\n\tcmd.SysProcAttr = &syscall.SysProcAttr{\n\t\tCloneflags: syscall.CLONE_NEWNS |\n\t\t\tsyscall.CLONE_NEWUTS |\n\t\t\tsyscall.CLONE_NEWIPC |\n\t\t\tsyscall.CLONE_NEWPID |\n\t\t\tsyscall.CLONE_NEWNET |\n\t\t\tsyscall.CLONE_NEWUSER,\n\t\tUidMappings: []syscall.SysProcIDMap{\n\t\t\t{\n\t\t\t\tContainerID: 0,\n\t\t\t\tHostID: os.Getuid(),\n\t\t\t\tSize: 1,\n\t\t\t},\n\t\t},\n\t\tGidMappings: []syscall.SysProcIDMap{\n\t\t\t{\n\t\t\t\tContainerID: 0,\n\t\t\t\tHostID: os.Getgid(),\n\t\t\t\tSize: 1,\n\t\t\t},\n\t\t},\n\t}\n\n\tif err := cmd.Run(); err != nil {\n\t\tresult, _ := json.Marshal(result.GetRuntimeErrorTaskResult())\n\t\t_, _ = os.Stderr.WriteString(fmt.Sprintf(\"%s\\n\", err.Error()))\n\t\t_, _ = os.Stdout.Write(result)\n\t}\n\n\tos.Exit(0)\n}", "func main() {\n\tlog.Printf(\"Build var 'version' is: %s\", version)\n\tlog.Printf(\"Build var 'time' is: %s\", buildDate)\n\tcmd.Execute()\n}", "func (v *DevbindMock) Exec(args ...string) ([]byte, error) {\n\tif debugMocks {\n\t\tfmt.Printf(\"MOCK [Devind received: ./dpdk-devbind.py %s]\\n\", args)\n\t}\n\tv.receivedArgs = append(v.receivedArgs, args)\n\n\tif len(v.devbindResults) == 0 {\n\t\treturn nil, errors.New(\"DevbindMock - results not set\")\n\t}\n\n\tout, err := v.devbindResults[0].resultOutcome, v.devbindResults[0].resultError\n\tv.devbindResults = v.devbindResults[1:]\n\tif debugMocks {\n\t\tfmt.Printf(\"MOCK [Devind response: %s]\\n\", out)\n\t}\n\treturn []byte(out), err\n}", "func Exec() {\n\tcmd := &cobra.Command{\n\t\tUse: \"func\",\n\t\tRun: func(cmd *cobra.Command, args []string) {\n\t\t\tfmt.Fprintln(os.Stderr, cmd.UsageString())\n\t\t},\n\t}\n\n\tcmd.AddCommand(versionCommand())\n\tcmd.AddCommand(generateCommand())\n\tcmd.AddCommand(deployCommand())\n\n\t_ = cmd.Execute()\n}", "func main() {\n\tlog.SetFlags(log.Ldate | log.Ltime | log.Lmicroseconds | log.Llongfile)\n\tif err := cmd.RootCmd.Execute(); err != nil {\n\t\tfmt.Println(err)\n\t\tos.Exit(1)\n\t}\n}" ]
[ "0.6264366", "0.6004395", "0.5772538", "0.5772538", "0.5772538", "0.5772538", "0.5772538", "0.5772538", "0.5772538", "0.57653713", "0.5761175", "0.57583976", "0.5720655", "0.566654", "0.5643779", "0.562086", "0.55483186", "0.5535528", "0.55332303", "0.553222", "0.55076367", "0.5468675", "0.54634637", "0.544216", "0.54124343", "0.534704", "0.53363276", "0.53195477", "0.5306151", "0.5298281", "0.5293399", "0.5273138", "0.523621", "0.5234418", "0.5233896", "0.52326", "0.523069", "0.52231115", "0.5217501", "0.52116936", "0.5206525", "0.51972646", "0.517387", "0.5171835", "0.51363057", "0.5132878", "0.51243484", "0.51170594", "0.5099081", "0.5097317", "0.50832134", "0.5062289", "0.505513", "0.505393", "0.5053473", "0.50520456", "0.50493294", "0.5042732", "0.5040086", "0.5039902", "0.50372034", "0.5036177", "0.5027414", "0.5026401", "0.50256217", "0.50169826", "0.50147915", "0.50063646", "0.5004885", "0.5003954", "0.5001463", "0.49979585", "0.4991917", "0.49889475", "0.49888232", "0.4979754", "0.49786747", "0.49782434", "0.49758345", "0.4965617", "0.49637404", "0.4954164", "0.4952816", "0.49470073", "0.4933595", "0.4928142", "0.49203265", "0.4918156", "0.4911783", "0.49057597", "0.49042448", "0.4896785", "0.48964483", "0.48952985", "0.48869058", "0.48863107", "0.48862132", "0.48818502", "0.48715326", "0.48653919" ]
0.7113928
0
ExecLine quick exec an command line string
func ExecLine(cmdLine string, workDir ...string) (string, error) { p := cmdline.NewParser(cmdLine) // create a new Cmd instance cmd := p.NewExecCmd() if len(workDir) > 0 { cmd.Dir = workDir[0] } bs, err := cmd.Output() return string(bs), err }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func QuickExec(cmdLine string, workDir ...string) (string, error) {\n\treturn ExecLine(cmdLine, workDir...)\n}", "func (ui *UI) exec(ctx context.Context, line string, reqCh chan execReq) int {\n\treq := execReq{\n\t\tctx: ctx,\n\t\tline: line,\n\t\tui: ui,\n\t\trespCh: make(chan int),\n\t}\n\tselect {\n\tcase <-ctx.Done():\n\t\treturn 0\n\tcase reqCh <- req:\n\t}\n\treturn <-req.respCh\n}", "func (cl *Client) ExecString(cmd string, args ...interface{}) (string, error) {\n\tvar s string\n\terr := cl.Conn(func(c *Conn) error {\n\t\tvar err error\n\t\ts, err = c.ExecString(cmd, args...)\n\t\treturn err\n\t})\n\treturn s, err\n}", "func Line(cmd *exec.Cmd) string {\n\treturn strings.Join(cmd.Args, \" \")\n}", "func WrapExec(cmd string, args []String, nArg uint32) (status syscall.Status){\n\n\n\tpath := \"/programs/\"+cmd\n\n\tif nArg == 0 {\n\n\t\tstatus = altEthos.Exec(path)\n\n\t} else if nArg == 1 {\n\n\t\tstatus = altEthos.Exec(path, &args[0])\n\n\t} else if nArg == 2 {\n\n\t\tstatus = altEthos.Exec(path, &args[0], &args[1])\n\n\t} else if nArg == 3 {\n\n\t\tstatus = altEthos.Exec(path, &args[0], &args[1], &args[2])\n\n\t} else if nArg == 4 {\n\n\t\tstatus = altEthos.Exec(path, &args[0], &args[1], &args[2], &args[3])\n\n\t}\n\n\treturn\n\n}", "func ExecCommand(commandLine string) (*exec.Cmd, error) {\n\n\tvar args, err = shellquote.Split(commandLine)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn exec.Command(args[0], args[1:]...), nil // #nosec\n\n}", "func commandLine( line []byte, data string, quote bool ) []byte {\n\t// Empty data ?\n\tif len( data ) == ( 0 ) {\n\t\tif !( quote ) { // Nothing to do\n\t\t\treturn line\n\t\t}\n\t}\n\t// Argument ?\n\tif ( !quote ) {\n\t\tdata = strings.TrimSpace( data )\n\t} else if len( data ) > ( 0 ) {\n\t\tquote = ( -1 < strings.IndexByte( data, ' ' ))\n\t}\n\n\tif ( quote ) { // Begin argument !\n\t\tline = append( line, '\\'' )\n\t}\n\n\tvar r rune\n\tfor off := 0; len( data ) > 0; data = data[off:] {\n\t\tr, off = rune( data[0] ), 1\n\t\tif r >= utf8.RuneSelf {\n\t\t\tr, off = utf8.DecodeRuneInString(data)\n\t\t}\n\t\tif ( off == 1 ) && ( r == utf8.RuneError ) {\n\t\t\tline = append( line, `\\x`...)\n\t\t\tline = append( line, lowerhex[ data[0]>>4 ])\n\t\t\tline = append( line, lowerhex[ data[0]&0xF ])\n\t\t\tcontinue\n\t\t}\n\t\tline = commandRune( line, r, quote )\n\t}\n\n\tif ( quote ) { // End argument !\n\t\tline = append( line, '\\'' )\n\t}\n\t\n\treturn line\n}", "func processLine(cmdLine string) {\n\tcmdLine = strings.TrimSpace(cmdLine)\n\n\tcommandList := make([]exec.Cmd, 0)\n\n\tif len(cmdLine) == 0 {\n\t\treturn\n\t}\n\n\tpipeStages := strings.Split(cmdLine, pipeChar)\n\n\terr := createPipeStages(&commandList, pipeStages)\n\tif err != nil {\n\t\tfmt.Fprintf(os.Stderr, \"%v: %v.\\n\", shellName, err)\n\t\treturn\n\t}\n\n\terr = connectPipeline(commandList)\n\tif err != nil {\n\t\tfmt.Fprintf(os.Stderr, \"%v: Error with pipes: %v.\\n\", shellName, err)\n\t\treturn\n\t}\n\n\terr = executePipeline(commandList)\n\tif err != nil {\n\t\tfmt.Fprintf(os.Stderr, \"%v: Error during execution: %v\\n\", shellName, err)\n\t\treturn\n\t}\n}", "func ExecuteCommandline(time time.Duration, command string, extraArgs []string) (string, error) {\n\t// Create a new context and add a timeout to it\n\tctx, cancel := context.WithTimeout(context.Background(), time)\n\tdefer cancel() // The cancel should be deferred so resources are cleaned up\n\n\t// Create the command with our context\n\targs := strings.Split(command, \" \")\n\tvar cmd *exec.Cmd\n\n\tif len(args) == 1 {\n\t\tcmd = exec.CommandContext(ctx, args[0], extraArgs...)\n\t} else {\n\t\tcmd = exec.CommandContext(ctx, args[0], append(args[1:], extraArgs...)...)\n\t}\n\n\tcmd.Wait()\n\t// This time we can simply use Output() to get the result.\n\tout, err := cmd.CombinedOutput()\n\n\t// We want to check the context error to see if the timeout was executed.\n\t// The error returned by cmd.Output() will be OS specific based on what\n\t// happens when a process is killed.\n\tif ctx.Err() == context.DeadlineExceeded {\n\t\treturn \"\", ctx.Err()\n\t}\n\n\t// If there's no context error, we know the command completed (or errored).\n\tif err != nil {\n\t\treturn string(out), err\n\t}\n\treturn string(out), nil\n}", "func ShellExec(cmdLine string, shells ...string) (string, error) {\n\t// shell := \"/bin/sh\"\n\tshell := \"sh\"\n\tif len(shells) > 0 {\n\t\tshell = shells[0]\n\t}\n\n\tvar out bytes.Buffer\n\tcmd := exec.Command(shell, \"-c\", cmdLine)\n\tcmd.Stdout = &out\n\n\tif err := cmd.Run(); err != nil {\n\t\treturn \"\", err\n\t}\n\treturn out.String(), nil\n}", "func Exec(client *Client, args []string, timeoutSecs int) (*pb.ExecResult, error) {\n\tctx, cancel := context.WithTimeout(context.Background(), time.Duration(timeoutSecs)*time.Second)\n\tdefer cancel()\n\n\trequest := &pb.ExecRequest{\n\t\tExecutable: args[0],\n\t\tArgs: args[1:],\n\t}\n\n\treturn client.Exec(ctx, request)\n}", "func exec(c *lxc.Container, conf *Config) {\n\tvar output []byte\n\tvar err error\n\t// stdout and stderr are unfornutately concatenated\n\tif output, err = c.Execute(conf.Args.Command...); err != nil {\n\t\tif len(output) != 0 {\n\t\t\tfmt.Printf(\"%s\\n\", output)\n\t\t}\n\t\terrorExit(2, err)\n\t} else {\n\t\tfmt.Printf(\"%s\", output)\n\t}\n}", "func Exec(cmd string) {\n\n\tfmt.Printf(\"Você digitou: %s \", cmd)\n\n}", "func (ar *ActiveRecord) ExecString() string {\n\tstr := strings.Join(ar.Tokens, \" \")\n\tif len(ar.Args) > 0 {\n\t\tfor i, _ := range ar.Args {\n\t\t\tstr = strings.Replace(str, holder, fmt.Sprintf(\"$%d\", i+1), 1)\n\t\t}\n\t}\n\treturn str\n}", "func (r RealExecute) ExecCommand(com string, args ...string) ([]byte, error) {\n\t/* #nosec */\n\tcommand := exec.Command(com, args...)\n\treturn command.CombinedOutput()\n}", "func (cmd InspectCmd) Exec(ctx context.Context, commandStr string, args []string, dEnv *env.DoltEnv, cliCtx cli.CliContext) int {\n\tap := cmd.ArgParser()\n\thelp, usage := cli.HelpAndUsagePrinters(cli.CommandDocsForCommandString(commandStr, cli.CommandDocumentationContent{}, ap))\n\tapr := cli.ParseArgsOrDie(ap, args, help)\n\n\tvar verr errhand.VerboseError\n\tif apr.Contains(tableFileIndexFlag) {\n\t\tverr = cmd.measureChunkIndexDistribution(ctx, dEnv)\n\t}\n\n\treturn HandleVErrAndExitCode(verr, usage)\n}", "func (s pathRuntime) Exec(args []string) error {\n\truntimeArgs := []string{s.path}\n\tif len(args) > 1 {\n\t\truntimeArgs = append(runtimeArgs, args[1:]...)\n\t}\n\n\treturn s.execRuntime.Exec(runtimeArgs)\n}", "func Exec(t testing.TB, cmd *cobra.Command, stdIn io.Reader, args ...string) (string, string, error) {\n\tctx, cancel := context.WithCancel(context.Background())\n\tt.Cleanup(cancel)\n\n\treturn ExecCtx(ctx, cmd, stdIn, args...)\n}", "func Exec(command string, args ...string) (string, error) {\n\tLogger.DebugC(color.Yellow, \"$ %v %v\", command, strings.Join(args, \" \"))\n\tb, err := exec.Command(command, args...).CombinedOutput()\n\tLogger.Debug(\"%s\\n\", b)\n\treturn string(b), err\n}", "func execExprString(_ int, p *gop.Context) {\n\targs := p.GetArgs(1)\n\tret := types.ExprString(args[0].(ast.Expr))\n\tp.Ret(1, ret)\n}", "func main() {\n\tlines := util.ReadLines()\n\n\tansP1, ansP2 := Exec(lines)\n\tfmt.Printf(\"Part1: %v\\n\", ansP1)\n\tfmt.Printf(\"Part2: %v\\n\", ansP2)\n}", "func cmdLine() string {\n\treturn \"go run mksyscall_aix_ppc64.go \" + strings.Join(os.Args[1:], \" \")\n}", "func (s *GitTestHelper) Exec(first string, arg ...string) bool {\n\tif s.err != nil {\n\t\treturn false\n\t}\n\tcmd := s.runner.Command(first, arg...).WithDir(s.Getwd())\n\tbytearr, err := cmd.CombinedOutput()\n\tif s.debugExec {\n\t\twords := append([]string{\">>>\", first}, arg...)\n\t\tfmt.Println(strings.Join(words, \" \"))\n\t\tfmt.Println(string(bytearr))\n\t}\n\tif err != nil {\n\t\ts.err = fmt.Errorf(\"%v %v\", err, string(bytearr))\n\t\ts.errCause = first + \" \" + strings.Join(arg, \" \")\n\t\treturn false\n\t}\n\treturn true\n}", "func ParseExecutableLine(name string, fullLine string) (Executable, error) {\n\tline := strings.Replace(fullLine, asdfPluginPrefix, \"\", -1)\n\ttokens := strings.Split(line, \" \")\n\tif len(tokens) != 2 {\n\t\treturn Executable{}, fmt.Errorf(\"bad line %s\", fullLine)\n\t}\n\treturn Executable{\n\t\tName: name,\n\t\tPluginName: strings.TrimSpace(tokens[0]),\n\t\tPluginVersion: strings.TrimSpace(tokens[1]),\n\t}, nil\n}", "func (c *cmdVersion) Exec(args []string) error {\n\n\tif len(args) != 0 {\n\t\tfmt.Fprintf(os.Stderr, \"usage: mashling version \\n\\nToo many arguments given.\\n\")\n\t\tos.Exit(2)\n\t} else {\n\t\tc.versionNumber = version\n\t\tfmt.Printf(\"mashling version %s\\n\", c.versionNumber)\n\t}\n\n\treturn nil\n}", "func Exec(container string, cmdLine ...string) (string, error) {\n\tparts := []string{\"exec\", \"-t\", container}\n\tparts = append(parts, cmdLine...)\n\tcmd := exec.Command(\"docker\", parts...)\n\toutput, err := cmd.CombinedOutput()\n\treturn string(output), err\n}", "func execute(w io.Writer, commandline string, req io.Reader) error {\n\targv, err := cmd.SplitQuoted(commandline)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// We treat a pipe command specially.\n\t// It will be splitted by the pipe binary.\n\tif strings.HasPrefix(commandline, \"pipe \") {\n\t\targv = []string{\"pipe\", commandline[5:]}\n\t}\n\n\tif len(argv) < 1 {\n\t\treturn fmt.Errorf(\"request contains no command\")\n\t}\n\n\t// Get installation directory of editor binary.\n\t// All subcommands must be in the same directory.\n\tvar installDir string\n\tprogname := os.Args[0]\n\tif p, err := filepath.Abs(progname); err != nil {\n\t\treturn fmt.Errorf(\"cannot get editor directory\")\n\t} else {\n\t\tinstallDir = filepath.Dir(p)\n\t}\n\n\tvar buf bytes.Buffer\n\tvar errbuf bytes.Buffer\n\targv[0] = filepath.Join(installDir, argv[0])\n\tctx, cancel := context.WithCancel(context.Background())\n\tc := exec.CommandContext(ctx, argv[0], argv[1:]...)\n\tc.Stdin = req\n\tc.Stdout = &buf\n\tc.Stderr = &errbuf\n\tif err := c.Start(); err != nil {\n\t\treturn err\n\t}\n\tpid := c.Process.Pid\n\tProcessList.Add(pid, argv, cancel)\n\n\terr = c.Wait()\n\tProcessList.Remove(pid)\n\tio.Copy(w, &buf)\n\n\t// Write stderr of commands to the console.\n\tif errbuf.Len() > 0 {\n\t\tif err != nil {\n\t\t\terrmsg, _ := ioutil.ReadAll(&errbuf)\n\t\t\terr = fmt.Errorf(\"%s\\n%s\\n\", err.Error(), string(errmsg))\n\t\t} else {\n\t\t\tio.Copy(os.Stdout, &errbuf)\n\t\t}\n\t}\n\treturn err\n}", "func Exec(name string, arg ...string) error {\n\tcmd := exec.Command(name, arg...)\n\tif len(cmdExePath) != 0 {\n\t\tcmd.Dir = cmdExePath\n\t\tcmdExePath = \"\"\n\t}\n\toutput, err := cmd.CombinedOutput()\n\tif err != nil {\n\t\tlog.Fatal(\"error: \", string(output), err)\n\t\treturn err\n\t}\n\n\treturn nil\n}", "func Executable() (string, error)", "func (flower *Flower) Exec(commandName string, capture bool, args []string) (string, error) {\n\tflowerCommandData, err := flower.GetFlowerCommandData(commandName)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tvar command []string\n\tif flowerCommandData.Workdir != \"\" {\n\t\tcommand = append([]string{\"cd\", flowerCommandData.Workdir, \"&&\"})\n\t}\n\n\tcommand = append([]string{flowerCommandData.Bin})\n\tfor _, arg := range args {\n\t\tcommand = append([]string{arg})\n\t}\n\n\tvar dockerExecOptions *DockerExecOptions\n\tswitch flowerCommandData.DockerExecOptions {\n\tcase nil:\n\t\tdockerExecOptions = flowerCommandData.DockerExecOptions\n\tdefault:\n\t\tdockerExecOptions = &DockerExecOptions{}\n\t}\n\n\treturn flower.Container.Exec(command, dockerExecOptions, capture)\n}", "func (m *MockExec) Exec(path *string, name string, extra ...string) ([]byte, error) {\n\targs := m.Called(name, extra)\n\n\tif args.Error(1) != nil {\n\t\treturn nil, args.Error(1)\n\t}\n\treturn []byte(args.String(0)), nil\n}", "func (exec *Executhor) Exec(execArg []string) {\n\tif exec.execBuiltin(execArg) == nil {\n\t\treturn\n\t}\n\tpath, err := exec.getBinaryPath(execArg[0])\n\tif err == nil {\n\t\tpid := C.fork()\n\t\tif pid != 0 {\n\t\t\tvar status C.int\n\t\t\tC.wait(&status)\n\t\t} else {\n\t\t\tsyscall.Exec(path, execArg, exec.env.GetEnv())\n\t\t}\n\t} else {\n\t\tfmt.Println(err)\n\t}\n}", "func comandoExec(comando string) {\n\tfmt.Println(\"\\nEJECUTANDO: \" + comando)\n\ts := strings.Split(comando, \" -\")\n\tif len(s) == 2 {\n\t\ts2 := strings.Split(s[1], \"->\")\n\t\tif strings.Compare(s2[0], \"path\") == 0 {\n\t\t\t_, err := os.Stat(strings.ReplaceAll(s2[1], \"\\\"\", \"\"))\n\t\t\tif err == nil {\n\t\t\t\ts3 := strings.Split(s2[1], \".\")\n\t\t\t\tif strings.Compare(s3[1], \"mia\") == 0 {\n\t\t\t\t\tfmt.Println(\"RESULTADO: Lectura de archivo\")\n\t\t\t\t\tfmt.Println(\"\")\n\t\t\t\t\tarchivo := leerArchivo(s2[1])\n\t\t\t\t\t//mandar a analizar ese archivo\n\t\t\t\t\tanalizarArchivo(archivo)\n\t\t\t\t} else {\n\t\t\t\t\tfmt.Println(\"RESULTADO: La extension del archivo debe ser .MIA\")\n\t\t\t\t}\n\t\t\t}\n\t\t\tif os.IsNotExist(err) {\n\t\t\t\tfmt.Println(\"RESULTADO: No existe el archivo especificado\")\n\t\t\t}\n\t\t} else {\n\t\t\tfmt.Println(\"RESULTADO: El parametro PATH es obligatorio\")\n\t\t}\n\t} else if len(s) > 2 {\n\t\tfmt.Println(\"RESULTADO: Demasiados parametros para el comando EXEC\")\n\t} else {\n\t\tfmt.Println(\"RESULTADO: Faltan parametros para el comando EXEC\")\n\t}\n}", "func sysExec(args ...OBJ) OBJ {\n\tif len(args) < 1 {\n\t\treturn NewError(\"`sys.exec` wanted string, got invalid argument\")\n\t}\n\n\tvar command string\n\tswitch c := args[0].(type) {\n\tcase *object.String:\n\t\tcommand = c.Value\n\tdefault:\n\t\treturn NewError(\"`sys.exec` wanted string, got invalid argument\")\n\t}\n\n\tif len(command) < 1 {\n\t\treturn NewError(\"`sys.exec` expected string, got invalid argument\")\n\t}\n\t// split the command\n\ttoExec := splitCommand(command)\n\tcmd := exec.Command(toExec[0], toExec[1:]...)\n\n\t// get the result\n\tvar outb, errb bytes.Buffer\n\tcmd.Stdout = &outb\n\tcmd.Stderr = &errb\n\terr := cmd.Run()\n\n\t// If the command exits with a non-zero exit-code it\n\t// is regarded as a failure. Here we test for ExitError\n\t// to regard that as a non-failure.\n\tif err != nil && err != err.(*exec.ExitError) {\n\t\tfmt.Printf(\"Failed to run '%s' -> %s\\n\", command, err.Error())\n\t\treturn &object.Error{Message: \"Failed to run command!\"}\n\t}\n\n\t// The result-objects to store in our hash.\n\tstdout := &object.String{Value: outb.String()}\n\tstderr := &object.String{Value: errb.String()}\n\n\treturn NewHash(StringObjectMap{\n\t\t\"stdout\": stdout,\n\t\t\"stderr\": stderr,\n\t})\n}", "func (c *Cmd) Exec(args []string) error {\n\tc.flag.Uint64Var(&version, \"version\", 0, \"\")\n\tc.flag.Parse(args)\n\n\t// Load config\n\tif driver != nil {\n\t\tconfig = &core.Config{\n\t\t\tData: make(map[string]core.Internal),\n\t\t}\n\t\tconfig.Data[\"\"] = core.Internal{\n\t\t\tDriver: *driver,\n\t\t\tDsn: *dsn,\n\t\t\tDirectory: *directory,\n\t\t}\n\t} else {\n\t\tconfig = core.MustNewConfig(*dirPath).WithEnv(*env)\n\t}\n\n\treturn c.Run(c, c.flag.Args()...)\n}", "func WinExec(lpCmdLine /*const*/ LPCSTR, uCmdShow UINT) UINT {\n\tret1 := syscall3(winExec, 2,\n\t\tuintptr(unsafe.Pointer(lpCmdLine)),\n\t\tuintptr(uCmdShow),\n\t\t0)\n\treturn UINT(ret1)\n}", "func Exec(name string, arg ...string) error {\n\tcmd := exec.Command(name, arg...)\n\tvar outBuffer = new(bytes.Buffer)\n\tvar errBuffer = new(bytes.Buffer)\n\tif viper.GetBool(\"verbose\") {\n\t\t_, err := fmt.Fprintf(os.Stdout, \"Executing command: %s\\n\", strings.Join(append([]string{name}, arg...), \" \"))\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tcmd.Stdout = os.Stdout\n\t\tcmd.Stderr = os.Stdout\n\t} else {\n\t\tcmd.Stdout = outBuffer\n\t\tcmd.Stderr = errBuffer\n\t}\n\tif err := cmd.Run(); err != nil {\n\t\tlines := strings.Split(outBuffer.String(), \"\\n\")\n\t\tfiltered := []string{errBuffer.String()}\n\t\tfor _, x := range lines {\n\t\t\tif strings.HasPrefix(x, \"fatal:\") {\n\t\t\t\tfiltered = append(filtered, x)\n\t\t\t}\n\t\t}\n\n\t\treturn fmt.Errorf(strings.Join(filtered, \"\\n\"))\n\t}\n\treturn nil\n}", "func CommandLine( line []byte, cmd ...string ) [ ]byte {\n\tfor i := 0; i < len( cmd ); i++ {\n\t\t// Separate argument(s) !\n\t\tif len( line ) > 0 && line[ len( line )-1 ] != ( ' ' ) {\n\t\t\tline = append( line, ' ' )\n\t\t}\n\t\tline = commandLine( line, cmd[ i ], len( line ) > 0 )\n\t}\n\treturn line\n}", "func Exec(name string, args ...string) string {\n\tout, err := exec.Command(name, args...).Output()\n\tif err != nil {\n\t\tlog.Println(err)\n\t\treturn \"\"\n\t}\n\treturn string(out)\n}", "func SimpleExec(name string, args ...string) (string, error) {\n\tTrace(name, args...)\n\treturn Output(ExecCommand(name, args...))\n}", "func runExec(serviceName string, operation string) (string, error) {\n\tbytes, err := exec.Command(Configuration.ExecutorPath, serviceName, operation).CombinedOutput()\n\treturn string(bytes), err\n}", "func Exec(name string, args ...string) error {\n\treturn syscall.Exec(name, args, os.Environ())\n}", "func (h Client) Exec(arg ...string) (string, string, error) {\n\tcmd := exec.Command(h.HelmExecutable, arg...)\n\n\tklog.V(8).Infof(\"running helm command: %v\", cmd)\n\n\tvar stdoutBuf, stderrBuf bytes.Buffer\n\n\tcmd.Stdout = &stdoutBuf\n\tcmd.Stderr = &stderrBuf\n\n\terr := cmd.Run()\n\toutStr, errStr := stdoutBuf.String(), stderrBuf.String()\n\tif err != nil {\n\t\tklog.V(8).Infof(\"stdout: %s\", outStr)\n\t\tklog.V(7).Infof(\"stderr: %s\", errStr)\n\t\treturn \"\", errStr, fmt.Errorf(\"exit code %d running command %s\", cmd.ProcessState.ExitCode(), cmd.String())\n\t}\n\n\treturn outStr, errStr, nil\n}", "func main() {\n\n // Go requires an absolute path to the binary we want to execute, so we’ll use exec.LookPath to find it (probably /bin/ls).\n // Exec requires arguments in slice form (as apposed to one big string).\n binary, lookErr := exec.LookPath(\"ls\")\n if lookErr != nil {\n panic(lookErr)\n }\n\n args := []string{\"ls\", \"-a\", \"-l\", \"-h\"} //Exec requires arguments in slice form (as apposed to one big string). first argument should be the program name\n\n //Exec also needs a set of environment variables to use. Here we just provide our current environment.\n env := os.Environ()\n\n execErr := syscall.Exec(binary, args, env) //Here’s the actual syscall.Exec call.\n //If this call is successful, the execution of our process will end here and be replaced by the /bin/ls -a -l -h process.\n if execErr != nil {// If there is an error we’ll get a return value.\n panic(execErr)\n }\n}", "func ExecContainsString(command, contains string) error {\n\tstdOut, _, err := Exec(command)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn ContainsString(stdOut, contains)\n}", "func SimpleExec(name string, args ...string) (string, error) {\n\treturn Output(ExecCommand(name, args...))\n}", "func (cx *Context) Exec(source string) (err error) {\n\treturn cx.exec(source, \"exec\")\n}", "func (t *Test) exec(tc testCommand) error {\n\tswitch cmd := tc.(type) {\n\tcase *clearCmd:\n\t\treturn t.clear()\n\n\tcase *loadCmd:\n\t\treturn cmd.append()\n\n\tcase *evalCmd:\n\t\texpr, err := parser.ParseExpr(cmd.expr)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tt := time.Unix(0, startingTime+(cmd.start.Unix()*1000000000))\n\t\tbodyBytes, err := cmd.m3query.query(expr.String(), t)\n\t\tif err != nil {\n\t\t\tif cmd.fail {\n\t\t\t\treturn nil\n\t\t\t}\n\t\t\treturn errors.Wrapf(err, \"error in %s %s, line %d\", cmd, cmd.expr, cmd.line)\n\t\t}\n\t\tif cmd.fail {\n\t\t\treturn fmt.Errorf(\"expected to fail at %s %s, line %d\", cmd, cmd.expr, cmd.line)\n\t\t}\n\n\t\terr = cmd.compareResult(bodyBytes)\n\t\tif err != nil {\n\t\t\treturn errors.Wrapf(err, \"error in %s %s, line %d. m3query response: %s\", cmd, cmd.expr, cmd.line, string(bodyBytes))\n\t\t}\n\n\tdefault:\n\t\tpanic(\"promql.Test.exec: unknown test command type\")\n\t}\n\treturn nil\n}", "func (a *AGI) Exec(cmd ...string) (string, error) {\n\tcmd = append([]string{\"EXEC\"}, cmd...)\n\treturn a.Command(cmd...).Val()\n}", "func (t *task) Exec(ctx context.Context, processID string, spec *specs.Process, withStdin bool, attachStdio libcontainerdtypes.StdioCallback) (libcontainerdtypes.Process, error) {\n\thcsContainer, err := t.getHCSContainer()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tlogger := t.ctr.client.logger.WithFields(log.Fields{\n\t\t\"container\": t.ctr.id,\n\t\t\"exec\": processID,\n\t})\n\n\t// Note we always tell HCS to\n\t// create stdout as it's required regardless of '-i' or '-t' options, so that\n\t// docker can always grab the output through logs. We also tell HCS to always\n\t// create stdin, even if it's not used - it will be closed shortly. Stderr\n\t// is only created if it we're not -t.\n\tcreateProcessParms := &hcsshim.ProcessConfig{\n\t\tCreateStdInPipe: true,\n\t\tCreateStdOutPipe: true,\n\t\tCreateStdErrPipe: !spec.Terminal,\n\t}\n\tif spec.Terminal {\n\t\tcreateProcessParms.EmulateConsole = true\n\t\tif spec.ConsoleSize != nil {\n\t\t\tcreateProcessParms.ConsoleSize[0] = uint(spec.ConsoleSize.Height)\n\t\t\tcreateProcessParms.ConsoleSize[1] = uint(spec.ConsoleSize.Width)\n\t\t}\n\t}\n\n\t// Take working directory from the process to add if it is defined,\n\t// otherwise take from the first process.\n\tif spec.Cwd != \"\" {\n\t\tcreateProcessParms.WorkingDirectory = spec.Cwd\n\t} else {\n\t\tcreateProcessParms.WorkingDirectory = t.ctr.ociSpec.Process.Cwd\n\t}\n\n\t// Configure the environment for the process\n\tcreateProcessParms.Environment = setupEnvironmentVariables(spec.Env)\n\n\t// Configure the CommandLine/CommandArgs\n\tsetCommandLineAndArgs(spec, createProcessParms)\n\tlogger.Debugf(\"exec commandLine: %s\", createProcessParms.CommandLine)\n\n\tcreateProcessParms.User = spec.User.Username\n\n\t// Start the command running in the container.\n\tnewProcess, err := hcsContainer.CreateProcess(createProcessParms)\n\tif err != nil {\n\t\tlogger.WithError(err).Errorf(\"exec's CreateProcess() failed\")\n\t\treturn nil, err\n\t}\n\tpid := newProcess.Pid()\n\tdefer func() {\n\t\tif err != nil {\n\t\t\tif err := newProcess.Kill(); err != nil {\n\t\t\t\tlogger.WithError(err).Error(\"failed to kill process\")\n\t\t\t}\n\t\t\tgo func() {\n\t\t\t\tif err := newProcess.Wait(); err != nil {\n\t\t\t\t\tlogger.WithError(err).Error(\"failed to wait for process\")\n\t\t\t\t}\n\t\t\t\tif err := newProcess.Close(); err != nil {\n\t\t\t\t\tlogger.WithError(err).Error(\"failed to clean process resources\")\n\t\t\t\t}\n\t\t\t}()\n\t\t}\n\t}()\n\n\tdio, err := newIOFromProcess(newProcess, spec.Terminal)\n\tif err != nil {\n\t\tlogger.WithError(err).Error(\"failed to get stdio pipes\")\n\t\treturn nil, err\n\t}\n\t// Tell the engine to attach streams back to the client\n\t_, err = attachStdio(dio)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tp := &process{\n\t\tid: processID,\n\t\tctr: t.ctr,\n\t\thcsProcess: newProcess,\n\t\twaitCh: make(chan struct{}),\n\t}\n\n\t// Spin up a goroutine to notify the backend and clean up resources when\n\t// the process exits. Defer until after the start event is sent so that\n\t// the exit event is not sent out-of-order.\n\tdefer func() { go p.reap() }()\n\n\tt.ctr.client.eventQ.Append(t.ctr.id, func() {\n\t\tei := libcontainerdtypes.EventInfo{\n\t\t\tContainerID: t.ctr.id,\n\t\t\tProcessID: p.id,\n\t\t\tPid: uint32(pid),\n\t\t}\n\t\tt.ctr.client.logger.WithFields(log.Fields{\n\t\t\t\"container\": t.ctr.id,\n\t\t\t\"event\": libcontainerdtypes.EventExecAdded,\n\t\t\t\"event-info\": ei,\n\t\t}).Info(\"sending event\")\n\t\terr := t.ctr.client.backend.ProcessEvent(t.ctr.id, libcontainerdtypes.EventExecAdded, ei)\n\t\tif err != nil {\n\t\t\tt.ctr.client.logger.WithError(err).WithFields(log.Fields{\n\t\t\t\t\"container\": t.ctr.id,\n\t\t\t\t\"event\": libcontainerdtypes.EventExecAdded,\n\t\t\t\t\"event-info\": ei,\n\t\t\t}).Error(\"failed to process event\")\n\t\t}\n\t\terr = t.ctr.client.backend.ProcessEvent(t.ctr.id, libcontainerdtypes.EventExecStarted, ei)\n\t\tif err != nil {\n\t\t\tt.ctr.client.logger.WithError(err).WithFields(log.Fields{\n\t\t\t\t\"container\": t.ctr.id,\n\t\t\t\t\"event\": libcontainerdtypes.EventExecStarted,\n\t\t\t\t\"event-info\": ei,\n\t\t\t}).Error(\"failed to process event\")\n\t\t}\n\t})\n\n\treturn p, nil\n}", "func (c *VirtLauncherClient) Exec(domainName, command string, args []string, timeoutSeconds int32) (int, string, error) {\n\trequest := &cmdv1.ExecRequest{\n\t\tDomainName: domainName,\n\t\tCommand: command,\n\t\tArgs: args,\n\t\tTimeoutSeconds: int32(timeoutSeconds),\n\t}\n\texitCode := -1\n\tstdOut := \"\"\n\n\tctx, cancel := context.WithTimeout(\n\t\tcontext.Background(),\n\t\t// we give the context a bit more time as the timeout should kick\n\t\t// on the actual execution\n\t\ttime.Duration(timeoutSeconds)*time.Second+shortTimeout,\n\t)\n\tdefer cancel()\n\n\tresp, err := c.v1client.Exec(ctx, request)\n\tif resp == nil {\n\t\treturn exitCode, stdOut, err\n\t}\n\n\texitCode = int(resp.ExitCode)\n\tstdOut = resp.StdOut\n\n\treturn exitCode, stdOut, err\n}", "func execute_plugin(filename string, input []byte) []byte {\n cmd := filename\n arg := string(input)\n out, err := exec.Command(cmd, arg).Output()\n if err != nil {\n println(err.Error())\n return nil\n }\n return out\n}", "func (p *Init) exec(path string, r *ExecConfig) (process.Process, error) {\n\tvar spec specs.Process\n\tif err := json.Unmarshal(r.Spec.Value, &spec); err != nil {\n\t\treturn nil, err\n\t}\n\tspec.Terminal = r.Terminal\n\n\te := &execProcess{\n\t\tid: r.ID,\n\t\tpath: path,\n\t\tparent: p,\n\t\tspec: spec,\n\t\tstdio: stdio.Stdio{\n\t\t\tStdin: r.Stdin,\n\t\t\tStdout: r.Stdout,\n\t\t\tStderr: r.Stderr,\n\t\t\tTerminal: r.Terminal,\n\t\t},\n\t\twaitBlock: make(chan struct{}),\n\t}\n\te.execState = &execCreatedState{p: e}\n\treturn e, nil\n}", "func (p *Qlang) Exec(codeText []byte, fname string) (err error) {\n\n\tcode := p.cl.Code()\n\tstart := code.Len()\n\tend, err := p.Cl(codeText, fname)\n\tif err != nil {\n\t\treturn\n\t}\n\n\tif qcl.DumpCode != 0 {\n\t\tcode.Dump(start)\n\t}\n\n\tp.ExecBlock(start, end, p.cl.GlobalSymbols())\n\treturn\n}", "func execCommand(log bool, name string, args ...string) (bytes.Buffer, bytes.Buffer, error) {\n\tvar (\n\t\tstdout bytes.Buffer\n\t\tstderr bytes.Buffer\n\t)\n\n\tcmd := exec.Command(name, args...)\n\tcmd.Stdout = &stdout\n\tcmd.Stderr = &stderr\n\terr := cmd.Run()\n\tif log {\n\t\tLogf(\"run command '%s %v':\\n out=%s\\n err=%s\\n ret=%v\",\n\t\t\tname, args, strings.TrimSpace(stdout.String()), strings.TrimSpace(stderr.String()), err)\n\t}\n\n\treturn stdout, stderr, err\n}", "func (c *ServerConn) Exec(expected int, format string, args ...interface{}) (int, string, error) {\n\treturn c.cmd(expected, format, args...)\n}", "func (cmd RemoteCmd) Exec(ctx context.Context, commandStr string, args []string, dEnv *env.DoltEnv, cliCtx cli.CliContext) int {\n\tap := cmd.ArgParser()\n\thelp, usage := cli.HelpAndUsagePrinters(cli.CommandDocsForCommandString(commandStr, remoteDocs, ap))\n\tapr := cli.ParseArgsOrDie(ap, args, help)\n\n\tvar verr errhand.VerboseError\n\n\tswitch {\n\tcase apr.NArg() == 0:\n\t\tverr = printRemotes(dEnv, apr)\n\tcase apr.Arg(0) == addRemoteId:\n\t\tverr = addRemote(dEnv, apr)\n\tcase apr.Arg(0) == removeRemoteId:\n\t\tverr = removeRemote(ctx, dEnv, apr)\n\tcase apr.Arg(0) == removeRemoteShortId:\n\t\tverr = removeRemote(ctx, dEnv, apr)\n\tdefault:\n\t\tverr = errhand.BuildDError(\"\").SetPrintUsage().Build()\n\t}\n\n\treturn HandleVErrAndExitCode(verr, usage)\n}", "func (fs *Fs) ExecCommand(name string, args ...string) ([]byte, error) {\n\treturn exec.Command(name, args...).CombinedOutput() // #nosec G204\n}", "func execCommand(name string, arg ...string) error {\n\tcmd := exec.Command(name, arg...)\n\t// For locating the GCC runtime library (libgcc_s.so.1):\n\tcmd.Env = append(os.Environ(), \"LD_LIBRARY_PATH=/ro/lib:/ro/lib64\")\n\tcmd.Stdin = os.Stdin\n\tcmd.Stdout = os.Stdout\n\tcmd.Stderr = os.Stderr\n\tif err := cmd.Run(); err != nil {\n\t\treturn fmt.Errorf(\"%v: %v\", cmd.Args, err)\n\t}\n\treturn nil\n}", "func (n *mockAgent) exec(sandbox *Sandbox, c Container, cmd types.Cmd) (*Process, error) {\n\treturn nil, nil\n}", "func (c *Tool) Exec() ([]byte, error) {\n\treturn c.Run()\n}", "func (player *Player) ExecAs(commandLine string, callback func(statusCode int)) {\n\tplayer.Exec(fmt.Sprintf(\"execute %v ~ ~ ~ %v\", player.name, commandLine), func(response map[string]interface{}) {\n\t\tcodeInterface, exists := response[\"statusCode\"]\n\t\tif !exists {\n\t\t\tlog.Printf(\"exec as: invalid response JSON\")\n\t\t\treturn\n\t\t}\n\t\tcode, _ := codeInterface.(int)\n\t\tif callback != nil {\n\t\t\tcallback(code)\n\t\t}\n\t})\n}", "func CommandExec() *cobra.Command {\n\n\tvar expandCmd = &cobra.Command{\n\t\tUse: \"exec [flags] <command> <shortcuts...>\",\n\t\tExample: \"$ scmpuff exec git add 1-4\",\n\t\tAliases: []string{\"execute\"},\n\t\tShort: \"Execute cmd with numeric shortcuts\",\n\t\tLong: `Expands numeric shortcuts to their full filepath and executes the command.\n\nTakes a list of digits (1 4 5) or numeric ranges (1-5) or even both.`,\n\t\tRun: func(cmd *cobra.Command, inputArgs []string) {\n\t\t\tif len(inputArgs) < 1 {\n\t\t\t\tcmd.Usage()\n\t\t\t\tos.Exit(1)\n\t\t\t}\n\n\t\t\texpandedArgs := Process(inputArgs)\n\t\t\ta := expandedArgs[1:]\n\t\t\tsubcmd := exec.Command(expandedArgs[0], a...)\n\t\t\tsubcmd.Stdin = os.Stdin\n\t\t\tsubcmd.Stdout = os.Stdout\n\t\t\tsubcmd.Stderr = os.Stderr\n\t\t\terr := subcmd.Run()\n\t\t\tif err == nil {\n\t\t\t\tos.Exit(0)\n\t\t\t}\n\t\t\tif exitError, ok := err.(*exec.ExitError); ok {\n\t\t\t\tos.Exit(exitError.ExitCode())\n\t\t\t} else {\n\t\t\t\tfmt.Fprintf(os.Stderr, \"%v\\n\", err)\n\t\t\t\tos.Exit(1)\n\t\t\t}\n\t\t},\n\t}\n\n\t// --relative\n\texpandCmd.Flags().BoolVarP(\n\t\t&expandRelative,\n\t\t\"relative\",\n\t\t\"r\",\n\t\tfalse,\n\t\t\"make path relative to current working directory\",\n\t)\n\n\treturn expandCmd\n}", "func Exec(exe string, args ...string) (outStr string, err error) {\n\tvar (\n\t\tcmd *exec.Cmd\n\t\tout []byte\n\t)\n\n\tif exe == \"docker-compose\" {\n\t\targs = append(dockerComposeDefaultArgs(), args...)\n\t}\n\n\tcmd = exec.Command(exe, args...)\n\tcmd.Env = os.Environ()\n\tcmd.Stdin = os.Stdin\n\n\tout, err = cmd.CombinedOutput()\n\toutStr = strings.TrimSpace(string(out))\n\treturn\n}", "func (h *DriverHandle) Exec(timeout time.Duration, cmd string, args []string) ([]byte, int, error) {\n\tcommand := append([]string{cmd}, args...)\n\tres, err := h.driver.ExecTask(h.taskID, command, timeout)\n\tif err != nil {\n\t\treturn nil, 0, err\n\t}\n\treturn res.Stdout, res.ExitResult.ExitCode, res.ExitResult.Err\n}", "func Exec(name string, args ...string) (output []byte, err error) {\n\treturn exec.Command(name, args...).Output()\n}", "func Exec(argv0 string, argv []string, envv []string) error {\n\treturn syscall.Exec(argv0, argv, envv)\n}", "func (c *RealtimeCommand) Exec(_ io.Reader, out io.Writer) error {\n\tserviceID, source, flag, err := cmd.ServiceID(c.serviceName, c.manifest, c.Globals.APIClient, c.Globals.ErrLog)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif c.Globals.Verbose() {\n\t\tcmd.DisplayServiceID(serviceID, flag, source, out)\n\t}\n\n\tswitch c.formatFlag {\n\tcase \"json\":\n\t\tif err := loopJSON(c.Globals.RTSClient, serviceID, out); err != nil {\n\t\t\tc.Globals.ErrLog.AddWithContext(err, map[string]any{\n\t\t\t\t\"Service ID\": serviceID,\n\t\t\t})\n\t\t\treturn err\n\t\t}\n\n\tdefault:\n\t\tif err := loopText(c.Globals.RTSClient, serviceID, out); err != nil {\n\t\t\tc.Globals.ErrLog.AddWithContext(err, map[string]any{\n\t\t\t\t\"Service ID\": serviceID,\n\t\t\t})\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}", "func (git *Git) Exec(subcmd string, args ...string) (string, error) {\n\tb, err := git.Command(subcmd, args...).CombinedOutput()\n\n\t// Chop last newline\n\tl := len(b)\n\tif l > 0 && b[l-1] == '\\n' {\n\t\tb = b[:l-1]\n\t}\n\n\t// Make output in oneline in error cases\n\tif err != nil {\n\t\tfor i := range b {\n\t\t\tif b[i] == '\\n' {\n\t\t\t\tb[i] = ' '\n\t\t\t}\n\t\t}\n\t}\n\n\treturn string(b), err\n}", "func (ts *TaskService) Exec(requestCtx context.Context, req *taskAPI.ExecProcessRequest) (*types.Empty, error) {\n\tdefer logPanicAndDie(log.G(requestCtx))\n\n\ttaskID := req.ID\n\texecID := req.ExecID\n\n\tlogger := log.G(requestCtx).WithField(\"TaskID\", taskID).WithField(\"ExecID\", execID)\n\tlogger.Debug(\"exec\")\n\n\textraData, err := unmarshalExtraData(req.Spec)\n\tif err != nil {\n\t\treturn nil, errors.Wrap(err, \"failed to unmarshal extra data\")\n\t}\n\n\t// Just provide runc the options it knows about, not our wrapper\n\treq.Spec = extraData.RuncOptions\n\n\tbundleDir := bundle.Dir(filepath.Join(containerRootDir, taskID))\n\n\tvar ioConnectorSet vm.IOProxy\n\n\tif vm.IsAgentOnlyIO(req.Stdout, logger) {\n\t\tioConnectorSet = vm.NewNullIOProxy()\n\t} else {\n\t\t// Override the incoming stdio FIFOs, which have paths from the host that we can't use\n\t\tfifoSet, err := cio.NewFIFOSetInDir(bundleDir.RootPath(), fifoName(taskID, execID), req.Terminal)\n\t\tif err != nil {\n\t\t\tlogger.WithError(err).Error(\"failed opening stdio FIFOs\")\n\t\t\treturn nil, errors.Wrap(err, \"failed to open stdio FIFOs\")\n\t\t}\n\n\t\tvar stdinConnectorPair *vm.IOConnectorPair\n\t\tif req.Stdin != \"\" {\n\t\t\treq.Stdin = fifoSet.Stdin\n\t\t\tstdinConnectorPair = &vm.IOConnectorPair{\n\t\t\t\tReadConnector: vm.VSockAcceptConnector(extraData.StdinPort),\n\t\t\t\tWriteConnector: vm.FIFOConnector(fifoSet.Stdin),\n\t\t\t}\n\t\t}\n\n\t\tvar stdoutConnectorPair *vm.IOConnectorPair\n\t\tif req.Stdout != \"\" {\n\t\t\treq.Stdout = fifoSet.Stdout\n\t\t\tstdoutConnectorPair = &vm.IOConnectorPair{\n\t\t\t\tReadConnector: vm.FIFOConnector(fifoSet.Stdout),\n\t\t\t\tWriteConnector: vm.VSockAcceptConnector(extraData.StdoutPort),\n\t\t\t}\n\t\t}\n\n\t\tvar stderrConnectorPair *vm.IOConnectorPair\n\t\tif req.Stderr != \"\" {\n\t\t\treq.Stderr = fifoSet.Stderr\n\t\t\tstderrConnectorPair = &vm.IOConnectorPair{\n\t\t\t\tReadConnector: vm.FIFOConnector(fifoSet.Stderr),\n\t\t\t\tWriteConnector: vm.VSockAcceptConnector(extraData.StderrPort),\n\t\t\t}\n\t\t}\n\n\t\tioConnectorSet = vm.NewIOConnectorProxy(stdinConnectorPair, stdoutConnectorPair, stderrConnectorPair)\n\t}\n\n\tresp, err := ts.taskManager.ExecProcess(requestCtx, req, ts.runcService, ioConnectorSet)\n\tif err != nil {\n\t\tlogger.WithError(err).Error(\"exec failed\")\n\t\treturn nil, err\n\t}\n\n\tlogger.Debug(\"exec succeeded\")\n\treturn resp, nil\n}", "func (d Dispatcher) ExecExecutionTimeString(id string, hash string) (string, error) {\n\te, err := d.GetBC().FindExec(id, hash)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\treturn time.Unix(e.GetExecutionTime(), 0).String(), nil\n}", "func RunCommand(custom string) (string, error) {\r\n custom = strings.TrimSpace(strings.TrimSuffix(custom, \"$\"))\r\n pieces := strings.Split(custom, \" \")\r\n cmd := exec.Command(pieces[0])\r\n cmd.Args = pieces\r\n cmd.Stdin = os.Stdin\r\n output, oops := cmd.CombinedOutput()\r\n return string(output), oops\r\n}", "func (mr *MockexecuterMockRecorder) Exec(ctx, commandLine interface{}, args ...interface{}) *gomock.Call {\n\tmr.mock.ctrl.T.Helper()\n\tvarargs := append([]interface{}{ctx, commandLine}, args...)\n\treturn mr.mock.ctrl.RecordCallWithMethodType(mr.mock, \"Exec\", reflect.TypeOf((*Mockexecuter)(nil).Exec), varargs...)\n}", "func SafeExec(cmd *exec.Cmd) string {\n\toutBytes, err := cmd.CombinedOutput()\n\tout := string(outBytes)\n\tif err != nil {\n\t\tif out != \"\" {\n\t\t\tout += \"\\n\"\n\t\t}\n\t\tout += fmt.Sprintf(\"error: %v\\n\", err)\n\t}\n\treturn out\n}", "func (ui *ReplApp) evalLine(line string) (quit bool) {\n\t// these vars are used in many places below\n\tengine := ui.engine\n\tcmds := ui.commands\n\toutput := ui.output\n\tmeProfileFile := ui.meProfileFile\n\tcontactsFile := ui.contactsFile\n\tprivateKeyFile := ui.privateKeyFile\n\n\t// parse raw line into command struct\n\tcmd := cmds.parse(line)\n\tif cmd.err != nil {\n\t\tif cmd.cmd != \"\" { // ignore blank lines\n\t\t\tlog.Printf(\"Error: %s\\n\", cmd.err)\n\t\t}\n\t\treturn\n\t}\n\n\t// process specific command\n\t// each block could really be in it's own function\n\t// or a function in the command definitions\n\tswitch cmd.cmd {\n\tcase \"exit\":\n\t\treturn true\n\n\tcase \"help\":\n\t\tfmt.Fprintln(output, cmds.help()) // uses commanddefs\n\n\tcase \"ip\":\n\t\tfmt.Fprintln(output, \"getting external ip...\")\n\t\tip, err := GetIP()\n\t\tif err != nil {\n\t\t\tlog.Println(err)\n\t\t\treturn\n\t\t}\n\t\tfmt.Fprintf(output, \"external IP address:\\t%s\\nlistening on port:\\t%s\\n\", ip, engine.Me.Port)\n\n\tcase \"me\":\n\t\tswitch cmd = *cmd.leaf(); cmd.cmd {\n\t\tcase \"show\":\n\t\t\tfmt.Fprintf(output, \"I am \\\"%s\\\"\\nPubKey: %s\\nPrivKey: %s\\n\",\n\t\t\t\tengine.Me,\n\t\t\t\tbase64.RawStdEncoding.EncodeToString(engine.Me.PublicSigningKey),\n\t\t\t\tbase64.RawStdEncoding.EncodeToString(engine.PrivSignKey))\n\n\t\tcase \"edit\":\n\t\t\tp, err := ParseProfile(cmd.args[0])\n\t\t\tif err != nil {\n\t\t\t\tlog.Println(err)\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tp.PublicSigningKey = engine.Me.PublicSigningKey // preserve key\n\t\t\terr = WriteProfile(p, meProfileFile)\n\t\t\tif err != nil {\n\t\t\t\tlog.Println(err)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tengine.Me = p\n\n\t\t\terr = WritePrivateKey(engine.PrivSignKey, privateKeyFile)\n\t\t\tif err != nil {\n\t\t\t\tlog.Println(err)\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\n\tcase \"contacts\":\n\n\t\tswitch cmd = *cmd.leaf(); cmd.cmd {\n\t\tcase \"list\":\n\t\t\tfor i, c := range engine.Contacts {\n\t\t\t\tif c != nil {\n\t\t\t\t\tfmt.Fprintf(output, \"%d\\t%s\\t%s\\n\", i, c,\n\t\t\t\t\t\tbase64.RawStdEncoding.EncodeToString(c.PublicSigningKey))\n\t\t\t\t}\n\t\t\t}\n\n\t\tcase \"add\":\n\t\t\tvar p *Profile\n\t\t\targ := cmd.args[0]\n\t\t\tn, err := strconv.Atoi(arg)\n\t\t\tif err == nil {\n\t\t\t\tif sess, ok := engine.GetSession(n); ok {\n\t\t\t\t\tp = sess.Other\n\t\t\t\t\tif p == nil {\n\t\t\t\t\t\tlog.Printf(\"session %d had a nil Other\", n)\n\t\t\t\t\t\treturn\n\t\t\t\t\t}\n\t\t\t\t} else {\n\t\t\t\t\tlog.Printf(\"%d not found\\n\", n)\n\t\t\t\t\treturn\n\t\t\t\t}\n\n\t\t\t} else {\n\t\t\t\tp, err = ParseProfile(arg)\n\t\t\t\tif err != nil {\n\t\t\t\t\tlog.Println(err)\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t}\n\n\t\t\t// overwrite contact if existing Equal() one found\n\t\t\t// TODO: do i really want to overwrite? what about having 2\n\t\t\t// contacts with different names but the same address?\n\t\t\t// i guess the question boils down to the definition of Profile\n\t\t\tif index := engine.FindContact(p); index >= 0 {\n\t\t\t\told := engine.Contacts[index]\n\t\t\t\tengine.Contacts[index] = p\n\t\t\t\tlog.Printf(\"overwrote #%d '%s' with '%s'\\n\", index, old, p)\n\t\t\t} else {\n\t\t\t\tengine.AddContact(p)\n\t\t\t\tlog.Printf(\"added %s\\n\", p)\n\t\t\t}\n\n\t\t\terr = WriteContacts(engine.Contacts, contactsFile)\n\t\t\tif err != nil {\n\t\t\t\tlog.Println(err)\n\t\t\t\tlog.Println(\"did not save changes to disk\")\n\t\t\t}\n\n\t\tcase \"delete\":\n\t\t\targ := cmd.args[0]\n\t\t\tn, err := strconv.Atoi(arg)\n\t\t\tif err != nil {\n\t\t\t\tlog.Println(err)\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tremoved := engine.Contacts[n]\n\t\t\tif engine.RemoveContact(n) {\n\t\t\t\tlog.Printf(\"deleted %s\\n\", removed)\n\n\t\t\t\terr = WriteContacts(engine.Contacts, contactsFile)\n\t\t\t\tif err != nil {\n\t\t\t\t\tlog.Println(err)\n\t\t\t\t\tlog.Println(\"did not save changes to disk\")\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\tlog.Printf(\"%d not found\\n\", n)\n\t\t\t}\n\t\t}\n\n\tcase \"requests\":\n\t\tswitch cmd = *cmd.leaf(); cmd.cmd {\n\t\tcase \"list\":\n\t\t\tfor i, r := range engine.Requests {\n\t\t\t\tif r != nil {\n\t\t\t\t\tfmt.Fprintf(output, \"%d\\t%s at %s (%s ago)\\n\", i,\n\t\t\t\t\t\tr.Profile,\n\t\t\t\t\t\tr.Time().Format(time.Kitchen),\n\t\t\t\t\t\ttime.Since(r.Time()))\n\t\t\t\t}\n\t\t\t}\n\n\t\tcase \"accept\":\n\t\t\targ := cmd.args[0]\n\t\t\tn, err := strconv.Atoi(arg)\n\t\t\tif err != nil {\n\t\t\t\tlog.Println(err)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tif _, ok := engine.GetRequest(n); !ok {\n\t\t\t\tlog.Printf(\"%d not found\\n\", n)\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\terr = engine.AcceptRequest(engine.Requests[n])\n\t\t\tif err != nil {\n\t\t\t\tlog.Println(err)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tlog.Println(\"request accepted\")\n\n\t\tcase \"reject\":\n\t\t\targ := cmd.args[0]\n\t\t\tn, err := strconv.Atoi(arg)\n\t\t\tif err != nil {\n\t\t\t\tlog.Println(err)\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tif engine.RemoveRequest(n) {\n\t\t\t\tlog.Println(\"removed request\")\n\t\t\t} else {\n\t\t\t\tlog.Printf(\"%d not found\\n\", n)\n\t\t\t}\n\t\t}\n\n\tcase \"sessions\":\n\t\tswitch cmd = *cmd.leaf(); cmd.cmd {\n\t\tcase \"list\":\n\t\t\tfor i, s := range engine.Sessions {\n\t\t\t\tif s != nil {\n\t\t\t\t\tfmt.Fprintf(output, \"%d\\t%s\\n\", i, s)\n\t\t\t\t}\n\t\t\t}\n\n\t\tcase \"start\":\n\t\t\tvar p *Profile\n\t\t\targ := cmd.args[0]\n\t\t\tn, err := strconv.Atoi(arg)\n\t\t\tif err == nil {\n\t\t\t\tif p, _ = engine.GetContact(n); p == nil {\n\t\t\t\t\tlog.Printf(\"%d not found\\n\", n)\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\tp, err = ParseProfile(arg)\n\t\t\t\tif err != nil {\n\t\t\t\t\tlog.Println(err)\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\tif i := engine.FindContact(p); i >= 0 {\n\t\t\t\t\tp = engine.Contacts[i] // use profile from contacts if available\n\t\t\t\t}\n\n\t\t\t}\n\n\t\t\terr = engine.SendRequest(p)\n\t\t\tif err != nil {\n\t\t\t\tlog.Println(err)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tlog.Println(\"request sent\")\n\n\t\tcase \"drop\":\n\t\t\targ := cmd.args[0]\n\t\t\tn, err := strconv.Atoi(arg)\n\t\t\tif err != nil {\n\t\t\t\tlog.Println(err)\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tif engine.RemoveSession(n) {\n\t\t\t\tlog.Println(\"dropped session\")\n\t\t\t} else {\n\t\t\t\tlog.Printf(\"%d not found\\n\", n)\n\t\t\t}\n\t\t}\n\n\tcase \"msg\":\n\t\tn, err := strconv.Atoi(cmd.args[0])\n\t\tif err != nil {\n\t\t\tlog.Println(err)\n\t\t\treturn\n\t\t}\n\t\tif _, ok := engine.GetSession(n); !ok {\n\t\t\tlog.Printf(\"%d not found\\n\", n)\n\t\t\treturn\n\t\t}\n\n\t\terr = engine.Sessions[n].SendText(cmd.args[1])\n\t\tif err != nil {\n\t\t\tlog.Println(err)\n\t\t\treturn\n\t\t}\n\t\tlog.Println(\"sent\")\n\n\tcase \"show\":\n\t\tn, err := strconv.Atoi(cmd.args[0])\n\t\tif err != nil {\n\t\t\tlog.Println(err)\n\t\t\treturn\n\t\t}\n\t\ts, ok := engine.GetSession(n)\n\t\tif !ok {\n\t\t\tlog.Printf(\"%d not found\\n\", n)\n\t\t\treturn\n\t\t}\n\n\t\tconst num = 5\n\t\tstart := len(s.Msgs) - num\n\t\tif start < 0 {\n\t\t\tstart = 0\n\t\t} // clamp\n\t\tshow := s.Msgs[start:]\n\t\tfor i, t := range show {\n\t\t\tfmt.Fprintf(output, \"%d %s\\t| %s > %s\\n\", i,\n\t\t\t\tt.From().Name,\n\t\t\t\tt.TimeStamp.Time().Format(time.Kitchen),\n\t\t\t\tt.Message)\n\t\t}\n\n\t}\n\n\treturn\n}", "func Executor(s string) {\n\ts = strings.TrimSpace(s)\n\tcmdStrings := strings.Split(s, \" \")\n\tif s == \"\" {\n\t\treturn\n\t} else if s == \"quit\" || s == \"exit\" {\n\t\tfmt.Println(\"Bye!\")\n\t\tos.Exit(0)\n\t\treturn\n\t}\n\tswitch cmdStrings[0] {\n\tcase \"install-px\":\n\t\tinstallPX()\n\tcase \"deploy\":\n\t\tif len(cmdStrings) < 2 {\n\t\t\tfmt.Println(\"deploy requires an application name\")\n\t\t\treturn\n\t\t}\n\t\tdeploy(\"default\", cmdStrings[1])\n\tcase \"benchmark\":\n\t\tswitch cmdStrings[1] {\n\t\tcase \"postgres\":\n\t\t\tpodExec(\"default\", \"app=postgres\", \"/usr/bin/psql -c 'create database pxdemo;'\")\n\t\t\tpodExec(\"default\", \"app=postgres\", \"/usr/bin/pgbench -n -i -s 50 pxdemo;\")\n\t\t\tpodExec(\"default\", \"app=postgres\", \"/usr/bin/psql pxdemo -c 'select count(*) from pgbench_accounts;'\")\n\t\tdefault:\n\t\t\tfmt.Printf(\"%s benchmark not supported\\n\", cmdStrings[1])\n\t\t}\n\tcase \"px\":\n\t\tif len(cmdStrings) < 2 {\n\t\t\tfmt.Println(\"deploy requires an application name\")\n\t\t\treturn\n\t\t}\n\t\tswitch cmdStrings[1] {\n\t\tcase \"connect\":\n\t\t\tpxInit()\n\t\tcase \"snap\":\n\t\t\tif len(cmdStrings) < 3 {\n\t\t\t\tfmt.Println(\"px snap requires an application name\")\n\t\t\t\treturn\n\t\t\t}\n\t\t\tpxSnap(cmdStrings[2])\n\t\tcase \"backup\":\n\t\t\tif len(cmdStrings) < 3 {\n\t\t\t\tfmt.Println(\"px backup requires an PVC name\")\n\t\t\t\treturn\n\t\t\t}\n\t\t\tpxBackup(cmdStrings[2])\n\t\tcase \"backup-status\":\n\t\t\tif len(cmdStrings) < 3 {\n\t\t\t\tfmt.Println(\"px backup-status requires a PVC name\")\n\t\t\t\treturn\n\t\t\t}\n\t\t\tpxBackupStatus(cmdStrings[2])\n\t\tdefault:\n\t\t\tfmt.Printf(\"px %s is not a valid command\\n\", cmdStrings[1])\n\t\t}\n\tcase \"pre-flight-check\":\n\t\tpreflight()\n\tdefault:\n\t\tfmt.Printf(\"%s is not a supported option\", s)\n\t}\n\treturn\n}", "func (p *Qlang) SafeExec(code []byte, fname string) (err error) {\n\n\tdefer func() {\n\t\tif e := recover(); e != nil {\n\t\t\tswitch v := e.(type) {\n\t\t\tcase string:\n\t\t\t\terr = errors.New(v)\n\t\t\tcase error:\n\t\t\t\terr = v\n\t\t\tdefault:\n\t\t\t\tpanic(e)\n\t\t\t}\n\t\t}\n\t}()\n\n\terr = p.Exec(code, fname)\n\treturn\n}", "func (ne *NSEnter) Exec(cmd string, args []string) exec.Cmd {\n\thostProcMountNsPath := filepath.Join(ne.hostRootFsPath, mountNsPath)\n\tfullArgs := append([]string{fmt.Sprintf(\"--mount=%s\", hostProcMountNsPath), \"--\"},\n\t\tappend([]string{ne.AbsHostPath(cmd)}, args...)...)\n\tklog.V(5).Infof(\"Running nsenter command: %v %v\", nsenterPath, fullArgs)\n\treturn ne.executor.Command(nsenterPath, fullArgs...)\n}", "func TestCmdExec(t *testing.T) {\n\tname := os.Args[0]\n\targs := []string{\"-test.run=TestHelperProcess\", \"--\", \"echo\"}\n\n\tc := &Cmd{}\n\tc.Exec(name, args...)\n\n\tassert.Equal(t, name, c.name)\n\tassert.Equal(t, args, c.args)\n\tassert.Equal(t, \"*exec.Cmd\", reflect.TypeOf(c.cmd).String())\n}", "func StreamedExec(pipe ProcessStream, name string, args ...string) (string, string) {\n\tcmd := ExecCommand(name, args...)\n\tvar stdoutBuf, stderrBuf bytes.Buffer\n\tcmd.Stdout = io.MultiWriter(pipe, &stdoutBuf)\n\tcmd.Stderr = io.MultiWriter(&stderrBuf)\n\tcmd.Run()\n\toutStr, errStr := string(stdoutBuf.Bytes()), string(stderrBuf.Bytes())\n\n\treturn outStr, errStr\n}", "func ExecCommand(args ...string) ([]byte, error) {\n\te := New()\n\tcmd := e.ExecCommand(NoSandbox, false, args[0], args[1:]...)\n\treturn cmd.CombinedOutput()\n}", "func Execute(args []string) {\n\tif len(args) <= 1 {\n\t\tos.Exit(1)\n\t}\n\n\tcmd := args[1]\n\n\tfile, err := os.Open(h.GetYolofile())\n\tif err != nil {\n\t panic(err)\n\t}\n\tdefer file.Close()\n\n\tvar cKey string\n\tmaps := make(map[string][]string)\n\n\tscanner := bufio.NewScanner(file)\n\tfor scanner.Scan() {\n\t\tline := strings.TrimSpace(scanner.Text())\n\n\t\tmethod, _ := regexp.MatchString(\"^(.*):\", line)\n\t\tif method {\n\t\t\tcKey = strings.Replace(line, \":\", \"\", -1)\n\t\t\tmaps[cKey] = []string{}\n\n\t\t\tcontinue\n\t\t}\n\n\t\tif line == \"\" {\n\t\t\tcontinue\n\t\t}\n\n\t\tmaps[cKey] = append(maps[cKey], strings.TrimSpace(line) + \";\")\n\t}\n\n\tif _, isKeyPresent := maps[cmd]; !isKeyPresent {\n\t\tfmt.Println(\"*command not found in Yolofile\")\n\t\tos.Exit(1)\n }\n\n\texe := exec.Command(\"/bin/sh\", \"-c\", strings.Join(maps[cmd][:], \" \"))\n\n\tvar stdout, stderr bytes.Buffer\n exe.Stdout = &stdout\n exe.Stderr = &stderr\n\n\tif err := exe.Run(); err != nil {\n\t\tpanic(err)\n\t}\n\n\tfmt.Printf(string(stdout.Bytes()))\n}", "func Exec(config *ssh.ClientConfig, addr string, workDir string, cmd string, nixConf string) (bytes.Buffer, error) {\n\tvar b bytes.Buffer // import \"bytes\"\n\n\t// Connect\n\tclient, err := ssh.Dial(\"tcp\", net.JoinHostPort(addr, \"22\"), config)\n\tif err != nil {\n\t\treturn b, err\n\t}\n\t// Create a session. It is one session per command.\n\tsession, err := client.NewSession()\n\tif err != nil {\n\t\treturn b, err\n\t}\n\tdefer session.Close()\n\n\tsession.Stderr = os.Stderr // get output\n\tsession.Stdout = &b // get output\n\t// you can also pass what gets input to the stdin, allowing you to pipe\n\t// content from client to server\n\t// session.Stdin = bytes.NewBufferString(\"My input\")\n\n\t// Finally, run the command\n\tfullCmd := \". ~/.nix-profile/etc/profile.d/nix.sh && cd \" + workDir + \" && nix-shell \" + nixConf + \" --command '\" + cmd + \"'\"\n\tfmt.Println(fullCmd)\n\terr = session.Run(fullCmd)\n\treturn b, err\n}", "func (self *Build) exec(moduleLabel core.Label, fileType core.FileType) error {\n\tthread := createThread(self, moduleLabel, fileType)\n\n\tsourceData, err := self.sourceFileReader(moduleLabel)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"failed to execute %v: read failed: %v\", moduleLabel, err)\n\t}\n\n\t_, err = starlark.ExecFile(thread, moduleLabel.String(), sourceData,\n\t\tbuiltins.InitialGlobals(fileType))\n\treturn err\n}", "func (r *Client) ExecuteAndReturn(s ...string) string {\n\n\tcmd := exec.Command(RhythmboxClient, s...) //s[0], s[1]) //\"--enqueue\", \"file:///home/ae/Music/Doolittle%20%5BMFSL%5D/Pixies%20-%20Doolittle%20(MFSL)%20-%2002%20-%20Tame.flac\")\n\tout, err := cmd.Output()\n\t// fmt.Println(s)\n\t// fmt.Println(out)\n\tif err != nil {\n\t\treturn string(err.Error())\n\t}\n\n\treturn string(out)\n}", "func Exec(args ...string) error {\n\tcmd := buildCommand(args...)\n\tstdout, err := cmd.StdoutPipe()\n\tif err != nil {\n\t\treturn errors.WithMessagef(err, \"exec: failed to pipe RunnableCmd: %v\", cmd.Args)\n\t}\n\tbuf := new(bytes.Buffer)\n\terr = errors.\n\t\tDo(cmd.Start).\n\t\tDo(func() error {\n\t\t\t_, err := buf.ReadFrom(stdout)\n\t\t\treturn err\n\t\t}).\n\t\tDo(cmd.Wait).\n\t\tErr()\n\tfmt.Println(buf.String())\n\treturn errors.WithMessagef(err, \"exec: failed to execute RunnableCmd: %v\", cmd.Args)\n}", "func NewExec(binaryPath string, args ...string) (PostRenderer, error) {\n\tfullPath, err := getFullPath(binaryPath)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &execRender{fullPath, args}, nil\n}", "func Exec(name string, args ...string) (io.ReadWriteCloser, error) {\n\tconn, err := net.Dial(\"unix\", SocketFile)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif name == \"\" {\n\t\treturn nil, errEmptyName\n\t}\n\tif err = writeString(conn, name); err != nil {\n\t\treturn nil, err\n\t}\n\tfor _, arg := range args {\n\t\tif arg == \"\" {\n\t\t\treturn nil, errEmptyArg\n\t\t}\n\t\tif err = writeString(conn, arg); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\twriteString(conn, \"\")\n\treturn conn, nil\n}", "func (p *Program) exec(c ExecCommand, fn ExecCallback) {\n\tif err := p.ReleaseTerminal(); err != nil {\n\t\t// If we can't release input, abort.\n\t\tif fn != nil {\n\t\t\tgo p.Send(fn(err))\n\t\t}\n\t\treturn\n\t}\n\n\tc.SetStdin(p.input)\n\tc.SetStdout(p.output)\n\tc.SetStderr(os.Stderr)\n\n\t// Execute system command.\n\tif err := c.Run(); err != nil {\n\t\t_ = p.RestoreTerminal() // also try to restore the terminal.\n\t\tif fn != nil {\n\t\t\tgo p.Send(fn(err))\n\t\t}\n\t\treturn\n\t}\n\n\t// Have the program re-capture input.\n\terr := p.RestoreTerminal()\n\tif fn != nil {\n\t\tgo p.Send(fn(err))\n\t}\n}", "func Run(dir string, commandLine string) ([]byte, error) {\n\n\t// Split commandLine into an array separated by whitespace\n\targs := strings.Fields(commandLine)\n\tcmd := exec.Command(args[0], args[1:]...)\n\tcmd.Dir = dir\n\tvar buf bytes.Buffer\n\tcmd.Stdout = &buf\n\tcmd.Stderr = &buf\n\terr := cmd.Run()\n\tout := buf.Bytes()\n\tif err != nil {\n\t\treturn out, err\n\t}\n\treturn out, nil\n}", "func SimpleExecInPath(dir, cmdName string, arguments ...string) {\n\tcmd := exec.Command(cmdName, arguments...) // nolint: gosec\n\tcmd.Stdin = os.Stdin\n\tcmd.Stdout = os.Stdout\n\tcmd.Stderr = os.Stderr\n\tcmd.Dir = dir\n\terr := cmd.Run()\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n}", "func (pm *Manager) Exec(desc, cmdName string, args ...string) (string, string, error) {\n\treturn pm.ExecDir(-1, \"\", desc, cmdName, args...)\n}", "func ExecuteShell(ctx context.Context) interface{} {\n\treturn func(line string) (io.Reader, error) {\n\t\tc := exec.Command(\"sh\", \"-\")\n\n\t\tcopyDone := make(chan error)\n\t\ttimeout := time.After(ContextGetTimeout(ctx))\n\n\t\toutput := new(bytes.Buffer)\n\t\tif stdout, err := c.StdoutPipe(); err == nil {\n\t\t\tfanout := io.MultiWriter(os.Stdout, output)\n\t\t\tgo func() {\n\t\t\t\t_, err := io.Copy(fanout, stdout)\n\t\t\t\tcopyDone <- err\n\t\t\t}()\n\t\t} else {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tif stderr, err := c.StderrPipe(); err == nil {\n\t\t\tgo func() {\n\t\t\t\tio.Copy(os.Stderr, stderr)\n\t\t\t}()\n\t\t} else {\n\t\t\treturn nil, err\n\t\t}\n\t\tstdin, err := c.StdinPipe()\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tif err := c.Start(); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tif _, err := stdin.Write([]byte(line)); err != nil {\n\t\t\tstdin.Close()\n\t\t\treturn nil, err\n\t\t}\n\t\tstdin.Close() // finished\n\t\terr = c.Wait()\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\t// Waits for the stdout and stderr copy goroutines to complete.\n\t\tselect {\n\t\tcase err = <-copyDone:\n\t\t\tbreak\n\t\tcase <-timeout:\n\t\t\tbreak\n\t\t}\n\t\treturn output, err\n\t}\n}", "func RunExecV(c string) string {\n x := exec.Command(\"bash\", \"-c\", c)\n out, err := x.CombinedOutput()\n if err != nil {\n log.Fatalf(\"Error: %s\\n\", err)\n }\n r := fmt.Sprintf(\"%s\", out)\n z := strings.Replace(r, \"\\n\", \"\", -1)\n return z\n}", "func (ctx *Context) Exec(cmd []string) *ExecResult {\n\treturn ctx.ExecWithParams(ExecParams{Cmd: cmd})\n}", "func SimpleExec(cmdName string, arguments ...string) {\n\tcmd := exec.Command(cmdName, arguments...) // nolint: gosec\n\tcmd.Stdin = os.Stdin\n\tcmd.Stdout = os.Stdout\n\tcmd.Stderr = os.Stderr\n\terr := cmd.Run()\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n}", "func Exec(cmds []string, host config.Host, pwd string, force bool) (string, error) {\n\tvar err error\n\tvar auth goph.Auth\n\tvar callback ssh.HostKeyCallback\n\n\tif force {\n\t\tcallback = ssh.InsecureIgnoreHostKey()\n\t} else {\n\t\tif callback, err = DefaultKnownHosts(); err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\t}\n\n\tif host.Keyfile != \"\" {\n\t\t// Start new ssh connection with private key.\n\t\tif auth, err = goph.Key(host.Keyfile, pwd); err != nil {\n\t\t\tif os.Getenv(\"GO\") == \"DEBUG\" {\n\t\t\t\tfmt.Println(err)\n\t\t\t}\n\t\t\t// ssh: this private key is passphrase protected\n\t\t\tpwd = common.AskPass(\"Private key passphrase: \")\n\t\t\tif auth, err = goph.Key(host.Keyfile, pwd); err != nil {\n\t\t\t\treturn \"\", err\n\t\t\t}\n\t\t}\n\t} else {\n\t\tif pwd == \"\" {\n\t\t\tpwd = common.AskPass(\n\t\t\t\tfmt.Sprintf(\"%s@%s's password: \", host.User, host.Addr),\n\t\t\t)\n\t\t}\n\t\tauth = goph.Password(pwd)\n\t}\n\n\tif os.Getenv(\"GO\") == \"DEBUG\" {\n\t\tfmt.Println(host, pwd, force)\n\t}\n\n\tclient, err := goph.NewConn(&goph.Config{\n\t\tUser: host.User,\n\t\tAddr: host.Addr,\n\t\tPort: host.Port,\n\t\tAuth: auth,\n\t\tTimeout: 5 * time.Second,\n\t\tCallback: callback,\n\t})\n\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\t// Defer closing the network connection.\n\tdefer client.Close()\n\n\t// Execute your command.\n\tout, err := client.Run(strings.Join(cmds, \" && \"))\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\t// Get your output as []byte.\n\treturn string(out), nil\n}", "func (e *Echo) Run(cmdStr string) string {\n\treturn exec.Run(e.Eval(cmdStr))\n}", "func (c *Command) Execute(user string, msg string, args []string) {\n}", "func ExecBuiltin(args []string) {\n\tif len(args) <= 0 {\n\t\tPanic(\"No parameters\")\n\t}\n\n\t//TODO: Loadings\n\tswitch args[0] {\n\tcase \"Error\":\n\t\tError(strings.Join(args[1:], \" \"))\n\tcase \"Warn\":\n\t\tWarn(strings.Join(args[1:], \" \"))\n\tcase \"Info\":\n\t\tInfo(strings.Join(args[1:], \" \"))\n\tcase \"Made\":\n\t\tMade(strings.Join(args[1:], \" \"))\n\tcase \"Ask\":\n\t\tif noColor {\n\t\t\tfmt.Print(\"[?] \")\n\t\t} else {\n\t\t\tfmt.Print(\"\\033[38;5;99;01m[?]\\033[00m \")\n\t\t}\n\t\tfmt.Println(strings.Join(args[1:], \" \"))\n\tcase \"AskYN\":\n\t\tif AskYN(strings.Join(args[1:], \" \")) {\n\t\t\tos.Exit(0)\n\t\t}\n\t\tos.Exit(1)\n\tcase \"Read\":\n\t\treader := bufio.NewReader(os.Stdin)\n\t\ttext, _ := reader.ReadString('\\n')\n\t\tfmt.Print(text)\n\tcase \"ReadSecure\":\n\t\tfmt.Print(ReadSecure())\n\tcase \"AskList\":\n\t\tvalues := \"\"\n\t\tdflt := -1\n\n\t\tif len(args) >= 3 {\n\t\t\tvalues = args[2]\n\t\t\tif len(args) >= 4 {\n\t\t\t\tif i, err := strconv.Atoi(args[3]); err == nil {\n\t\t\t\t\tdflt = i\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\tos.Exit(AskList(strings.Split(values, \",\"), dflt, args[1]))\n\tcase \"Bell\":\n\t\tBell()\n\t}\n\tos.Exit(0)\n}" ]
[ "0.6906744", "0.6147474", "0.60744816", "0.60500157", "0.5863146", "0.58578545", "0.5813975", "0.5777007", "0.5759011", "0.5743766", "0.5711614", "0.5693573", "0.56703174", "0.56509113", "0.5646578", "0.5635001", "0.5626724", "0.5615617", "0.55758286", "0.5570272", "0.5558317", "0.5553162", "0.5528124", "0.5485141", "0.54798293", "0.54765713", "0.54761225", "0.54510874", "0.54352313", "0.54304135", "0.5421417", "0.54087985", "0.5406896", "0.5403255", "0.5392832", "0.5356753", "0.5356508", "0.5355068", "0.53527975", "0.53433454", "0.53239137", "0.5317961", "0.53012615", "0.52926797", "0.52880925", "0.5285033", "0.52812165", "0.52707744", "0.52345526", "0.52292746", "0.5228947", "0.5222655", "0.521348", "0.5211066", "0.5195383", "0.5192933", "0.5192783", "0.51913065", "0.5189269", "0.5185395", "0.51839036", "0.5182636", "0.5172785", "0.5170879", "0.51699877", "0.5159173", "0.5155436", "0.51460046", "0.5136873", "0.5117621", "0.51159126", "0.51053077", "0.5097169", "0.5091077", "0.50818765", "0.5081229", "0.5075297", "0.50680965", "0.50657165", "0.5061586", "0.50596637", "0.50579107", "0.5051221", "0.5047278", "0.50425416", "0.5042321", "0.50081784", "0.5005001", "0.50046897", "0.500391", "0.49998885", "0.499259", "0.49921843", "0.49895343", "0.4989202", "0.49890965", "0.49870715", "0.49856296", "0.49840823", "0.498281" ]
0.7586125
0
ShellExec exec command by shell cmdLine. eg: "ls al"
func ShellExec(cmdLine string, shells ...string) (string, error) { // shell := "/bin/sh" shell := "sh" if len(shells) > 0 { shell = shells[0] } var out bytes.Buffer cmd := exec.Command(shell, "-c", cmdLine) cmd.Stdout = &out if err := cmd.Run(); err != nil { return "", err } return out.String(), nil }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func ExecuteShell(ctx context.Context) interface{} {\n\treturn func(line string) (io.Reader, error) {\n\t\tc := exec.Command(\"sh\", \"-\")\n\n\t\tcopyDone := make(chan error)\n\t\ttimeout := time.After(ContextGetTimeout(ctx))\n\n\t\toutput := new(bytes.Buffer)\n\t\tif stdout, err := c.StdoutPipe(); err == nil {\n\t\t\tfanout := io.MultiWriter(os.Stdout, output)\n\t\t\tgo func() {\n\t\t\t\t_, err := io.Copy(fanout, stdout)\n\t\t\t\tcopyDone <- err\n\t\t\t}()\n\t\t} else {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tif stderr, err := c.StderrPipe(); err == nil {\n\t\t\tgo func() {\n\t\t\t\tio.Copy(os.Stderr, stderr)\n\t\t\t}()\n\t\t} else {\n\t\t\treturn nil, err\n\t\t}\n\t\tstdin, err := c.StdinPipe()\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tif err := c.Start(); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tif _, err := stdin.Write([]byte(line)); err != nil {\n\t\t\tstdin.Close()\n\t\t\treturn nil, err\n\t\t}\n\t\tstdin.Close() // finished\n\t\terr = c.Wait()\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\t// Waits for the stdout and stderr copy goroutines to complete.\n\t\tselect {\n\t\tcase err = <-copyDone:\n\t\t\tbreak\n\t\tcase <-timeout:\n\t\t\tbreak\n\t\t}\n\t\treturn output, err\n\t}\n}", "func ExecLine(cmdLine string, workDir ...string) (string, error) {\n\tp := cmdline.NewParser(cmdLine)\n\n\t// create a new Cmd instance\n\tcmd := p.NewExecCmd()\n\tif len(workDir) > 0 {\n\t\tcmd.Dir = workDir[0]\n\t}\n\n\tbs, err := cmd.Output()\n\treturn string(bs), err\n}", "func Shell(command []string, r bool) []byte {\n\tif command[2] == \"cd\" {\n\t\tvar dir string\n\t\tvar p string\n\t\tif strings.HasPrefix(command[3], \"..\") {\n\t\t\tpathBack := strings.Repeat(\"/../\", strings.Count(command[3], \"../\"))\n\t\t\tdir = filepath.Dir(cwd + pathBack)\n\t\t} else {\n\t\t\tdir = command[3]\n\t\t}\n\t\tif runtime.GOOS == \"windows\" {\n\t\t\tp = filepath.FromSlash(strings.TrimSuffix(dir, \"\\r\"))\n\t\t\tif _, err := os.Stat(p); os.IsNotExist(err) {\n\t\t\t\tif filepath.VolumeName(cwd) == \"\" {\n\t\t\t\t\tdriveLetter, err := filepath.Abs(p)\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\tErrHandling(err.Error())\n\t\t\t\t\t\tAllOutput.Mutex.Lock()\n\t\t\t\t\t\tJobCount++\n\t\t\t\t\t\tAllOutput.List[JobCount] = &agentscommon.JobOutput{\"error\", err.Error()}\n\t\t\t\t\t\tAllOutput.Mutex.Unlock()\n\t\t\t\t\t}\n\t\t\t\t\tp = filepath.VolumeName(driveLetter) + cwd + command[3]\n\t\t\t\t} else {\n\t\t\t\t\tp = cwd + \"\\\\\" + command[3]\n\t\t\t\t}\n\t\t\t}\n\t\t} else if runtime.GOOS == \"linux\" || runtime.GOOS == \"darwin\" {\n\t\t\tp = filepath.ToSlash(strings.TrimSuffix(dir, \"\\r\"))\n\t\t}\n\t\tcwd = strings.TrimSuffix(p, \"\\r\")\n\t}\n\t//Check to see if any data was sent in command[3]\n\tif len(strings.TrimSpace(command[2])) == 0 {\n\t\tresult := []byte(\"No command arguments where passed\")\n\t\tif r {\n\t\t\treturn result\n\t\t} else {\n\t\t\tAllOutput.Mutex.Lock()\n\t\t\tJobCount++\n\t\t\tAllOutput.List[JobCount] = &agentscommon.JobOutput{\"error\", string(result)}\n\t\t\tAllOutput.Mutex.Unlock()\n\t\t\treturn nil\n\t\t}\n\n\t} else {\n\t\t//Executing shell and the arguments that follow\n\t\tresult := shellexec.ShellExecute(command, cwd)\n\n\t\tif r {\n\t\t\treturn result\n\t\t} else {\n\t\t\tAllOutput.Mutex.Lock()\n\t\t\tJobCount++\n\t\t\tAllOutput.List[JobCount] = &agentscommon.JobOutput{\"shell\", string(result)}\n\t\t\tAllOutput.Mutex.Unlock()\n\t\t\treturn nil\n\t\t}\n\t}\n}", "func main() {\n\n // Go requires an absolute path to the binary we want to execute, so we’ll use exec.LookPath to find it (probably /bin/ls).\n // Exec requires arguments in slice form (as apposed to one big string).\n binary, lookErr := exec.LookPath(\"ls\")\n if lookErr != nil {\n panic(lookErr)\n }\n\n args := []string{\"ls\", \"-a\", \"-l\", \"-h\"} //Exec requires arguments in slice form (as apposed to one big string). first argument should be the program name\n\n //Exec also needs a set of environment variables to use. Here we just provide our current environment.\n env := os.Environ()\n\n execErr := syscall.Exec(binary, args, env) //Here’s the actual syscall.Exec call.\n //If this call is successful, the execution of our process will end here and be replaced by the /bin/ls -a -l -h process.\n if execErr != nil {// If there is an error we’ll get a return value.\n panic(execErr)\n }\n}", "func CmdExec(usercommand string) []string {\n\tcmd := exec.Command(\"sh\", \"-c\", usercommand)\n\tout, err := cmd.Output()\n\tif err != nil {\n\t\tlog.Fatalf(\"cmd.Run() failed with %s\\n\", err)\n\t}\n\t//using \"\\n\" will append an empty line to the end of the resArray\n\tresArray := strings.Split(string(out), \"\\n\")\n\t//delete the last and empty element\n\tresArray = resArray[:len(resArray)-1]\n\treturn resArray\n}", "func executeShell(ctx context.Context, context ActionExecutionContext) error {\n\t//log.Printf(\"Exec: %s\", context.Action.Shell)\n\t//cmdAndArgs := strings.Split(s.Shell, \" \")\n\t//cmd := cmdAndArgs[0]\n\t//args := cmdAndArgs[1:]\n\tshuttlePath, _ := filepath.Abs(filepath.Dir(os.Args[0]))\n\n\tcmdOptions := go_cmd.Options{\n\t\tBuffered: false,\n\t\tStreaming: true,\n\t}\n\n\texecCmd := go_cmd.NewCmdOptions(cmdOptions, \"sh\", \"-c\", \"cd '\"+context.ScriptContext.Project.ProjectPath+\"'; \"+context.Action.Shell)\n\n\t//execCmd := exec.Command(\"sh\", \"-c\", context.Action.Shell)\n\texecCmd.Env = os.Environ()\n\tfor name, value := range context.ScriptContext.Args {\n\t\texecCmd.Env = append(execCmd.Env, fmt.Sprintf(\"%s=%s\", name, value))\n\t}\n\texecCmd.Env = append(execCmd.Env, fmt.Sprintf(\"plan=%s\", context.ScriptContext.Project.LocalPlanPath))\n\texecCmd.Env = append(execCmd.Env, fmt.Sprintf(\"tmp=%s\", context.ScriptContext.Project.TempDirectoryPath))\n\texecCmd.Env = append(execCmd.Env, fmt.Sprintf(\"project=%s\", context.ScriptContext.Project.ProjectPath))\n\t// TODO: Add project path as a shuttle specific ENV\n\texecCmd.Env = append(execCmd.Env, fmt.Sprintf(\"PATH=%s\", shuttlePath+string(os.PathListSeparator)+os.Getenv(\"PATH\")))\n\texecCmd.Env = append(execCmd.Env, fmt.Sprintf(\"SHUTTLE_PLANS_ALREADY_VALIDATED=%s\", context.ScriptContext.Project.LocalPlanPath))\n\n\tdoneChan := make(chan struct{})\n\tgo func() {\n\t\tdefer close(doneChan)\n\t\tfor execCmd.Stdout != nil || execCmd.Stderr != nil {\n\t\t\tselect {\n\t\t\tcase line, open := <-execCmd.Stdout:\n\t\t\t\tif !open {\n\t\t\t\t\texecCmd.Stdout = nil\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\tcontext.ScriptContext.Project.UI.Infoln(\"%s\", line)\n\t\t\tcase line, open := <-execCmd.Stderr:\n\t\t\t\tif !open {\n\t\t\t\t\texecCmd.Stderr = nil\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\tcontext.ScriptContext.Project.UI.Errorln(\"%s\", line)\n\t\t\t}\n\t\t}\n\t}()\n\n\t// Run and wait for Cmd to return, discard Status\n\tcontext.ScriptContext.Project.UI.Titleln(\"shell: %s\", context.Action.Shell)\n\n\t// stop cmd if context is cancelled\n\tgo func() {\n\t\tselect {\n\t\tcase <-ctx.Done():\n\t\t\terr := execCmd.Stop()\n\t\t\tif err != nil {\n\t\t\t\tcontext.ScriptContext.Project.UI.Errorln(\"Failed to stop script '%s': %v\", context.Action.Shell, err)\n\t\t\t}\n\t\tcase <-doneChan:\n\t\t}\n\t}()\n\n\tselect {\n\tcase status := <-execCmd.Start():\n\t\t<-doneChan\n\t\tif status.Exit > 0 {\n\t\t\treturn errors.NewExitCode(4, \"Failed executing script `%s`: shell script `%s`\\nExit code: %v\", context.ScriptContext.ScriptName, context.Action.Shell, status.Exit)\n\t\t}\n\t\treturn nil\n\tcase <-ctx.Done():\n\t\treturn ctx.Err()\n\t}\n}", "func ExecCommand(commandLine string) (*exec.Cmd, error) {\n\n\tvar args, err = shellquote.Split(commandLine)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn exec.Command(args[0], args[1:]...), nil // #nosec\n\n}", "func WrapExec(cmd string, args []String, nArg uint32) (status syscall.Status){\n\n\n\tpath := \"/programs/\"+cmd\n\n\tif nArg == 0 {\n\n\t\tstatus = altEthos.Exec(path)\n\n\t} else if nArg == 1 {\n\n\t\tstatus = altEthos.Exec(path, &args[0])\n\n\t} else if nArg == 2 {\n\n\t\tstatus = altEthos.Exec(path, &args[0], &args[1])\n\n\t} else if nArg == 3 {\n\n\t\tstatus = altEthos.Exec(path, &args[0], &args[1], &args[2])\n\n\t} else if nArg == 4 {\n\n\t\tstatus = altEthos.Exec(path, &args[0], &args[1], &args[2], &args[3])\n\n\t}\n\n\treturn\n\n}", "func Shell(shellStdin string) (stdout, stderr string, err error) {\n\treturn Exec(\"sh\", \"\", shellStdin)\n}", "func Shell(cmd string, arg ...string) error {\n\tfmt.Println(cmd, strings.Join(arg, \" \"))\n\texe := exec.Command(cmd, arg...)\n\texe.Env = os.Environ()\n\texe.Stderr = os.Stderr\n\texe.Stdout = os.Stdout\n\texe.Stdin = os.Stdin\n\n\tif err := exe.Run(); err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}", "func processLine(cmdLine string) {\n\tcmdLine = strings.TrimSpace(cmdLine)\n\n\tcommandList := make([]exec.Cmd, 0)\n\n\tif len(cmdLine) == 0 {\n\t\treturn\n\t}\n\n\tpipeStages := strings.Split(cmdLine, pipeChar)\n\n\terr := createPipeStages(&commandList, pipeStages)\n\tif err != nil {\n\t\tfmt.Fprintf(os.Stderr, \"%v: %v.\\n\", shellName, err)\n\t\treturn\n\t}\n\n\terr = connectPipeline(commandList)\n\tif err != nil {\n\t\tfmt.Fprintf(os.Stderr, \"%v: Error with pipes: %v.\\n\", shellName, err)\n\t\treturn\n\t}\n\n\terr = executePipeline(commandList)\n\tif err != nil {\n\t\tfmt.Fprintf(os.Stderr, \"%v: Error during execution: %v\\n\", shellName, err)\n\t\treturn\n\t}\n}", "func Shell(t *testing.T, name string, arg ...string) {\n\tt.Helper()\n\n\tbin, err := exec.LookPath(name)\n\tif err != nil {\n\t\tt.Skipf(\"skipping, binary %q not found: %v\", name, err)\n\t}\n\n\tt.Logf(\"$ %s %v\", bin, arg)\n\n\tcmd := exec.Command(bin, arg...)\n\tif err := cmd.Start(); err != nil {\n\t\tt.Fatalf(\"failed to start command %q: %v\", name, err)\n\t}\n\n\tif err := cmd.Wait(); err != nil {\n\t\t// Shell operations in these tests require elevated privileges.\n\t\tif cmd.ProcessState.ExitCode() == 1 /* unix.EPERM */ {\n\t\t\tt.Skipf(\"skipping, permission denied: %v\", err)\n\t\t}\n\n\t\tt.Fatalf(\"failed to wait for command %q: %v\", name, err)\n\t}\n}", "func execShCmd(strCmd string) ([]byte, error) {\n\tlog.Debug(\"Executing %+v\", strCmd)\n\tcmd := exec.Command(\"/bin/sh\", \"-c\", strCmd)\n\n\tstdoutpipe, err := cmd.StdoutPipe()\n\tif err != nil {\n\t\tlog.Error(\"Error stdout: %s. for command: %s\", err, strCmd)\n\t\treturn nil, err\n\t}\n\tstderrpipe, err := cmd.StderrPipe()\n\tif err != nil {\n\t\tlog.Error(\"Error stderr: %s. for command: %s\", err, strCmd)\n\t\treturn nil, err\n\t}\n\terr = cmd.Start()\n\tif err != nil {\n\t\tlog.Error(\"Error: %s. for command: %s\", err, strCmd)\n\t\treturn nil, err\n\t}\n\tstdout, errstderr := ioutil.ReadAll(stdoutpipe)\n\tstderr, errstdout := ioutil.ReadAll(stderrpipe)\n\n\tcmderr := cmd.Wait()\n\n\tif errstderr != nil {\n\t\tlog.Debug(\"Stdout err: %v\", errstderr)\n\t}\n\tif errstdout != nil {\n\t\tlog.Debug(\"Stderr err: %v\", errstdout)\n\t}\n\tlog.Debug(\"Stdout is: '%s'\\n\", stdout)\n\tlog.Debug(\"Stderr is: '%s'\\n\", stderr)\n\tif cmderr != nil {\n\t\tlog.Error(\"cmderr: %v, %v\", cmderr, string(stderr))\n\t}\n\treturn stdout, cmderr\n}", "func execShCmd(strCmd string) ([]byte, error) {\n\tlog.Debug(\"Executing %+v\", strCmd)\n\tcmd := exec.Command(\"/bin/sh\", \"-c\", strCmd)\n\n\tstdoutpipe, err := cmd.StdoutPipe()\n\tif err != nil {\n\t\tlog.Error(\"Error stdout: %s. for command: %s\", err, strCmd)\n\t\treturn nil, err\n\t}\n\tstderrpipe, err := cmd.StderrPipe()\n\tif err != nil {\n\t\tlog.Error(\"Error stderr: %s. for command: %s\", err, strCmd)\n\t\treturn nil, err\n\t}\n\terr = cmd.Start()\n\tif err != nil {\n\t\tlog.Error(\"Error: %s. for command: %s\", err, strCmd)\n\t\treturn nil, err\n\t}\n\tstdout, errstderr := ioutil.ReadAll(stdoutpipe)\n\tstderr, errstdout := ioutil.ReadAll(stderrpipe)\n\n\tcmderr := cmd.Wait()\n\n\tif errstderr != nil {\n\t\tlog.Debug(\"Stdout err: %v\", errstderr)\n\t}\n\tif errstdout != nil {\n\t\tlog.Debug(\"Stderr err: %v\", errstdout)\n\t}\n\tlog.Debug(\"Stdout is: '%s'\\n\", stdout)\n\tlog.Debug(\"Stderr is: '%s'\\n\", stderr)\n\tif cmderr != nil {\n\t\tlog.Error(\"cmderr: %v, %v\", cmderr, string(stderr))\n\t}\n\treturn stdout, cmderr\n}", "func execInSystem(execPath string, params []string, logsBuffer *bytes.Buffer, print bool) error {\n\tvar lock sync.Mutex\n\tvar c string\n\tvar cmdName string\n\n\tswitch runtime.GOOS {\n\tcase \"linux\":\n\t\tc = \"-c\"\n\t\tcmdName = \"sh\"\n\tcase \"windows\":\n\t\tc = \"/c\"\n\t\tcmdName = \"cmd\"\n\tdefault:\n\t\tlog.Panicf(\"System type error, got <%s>, but expect linux/windowns!\", runtime.GOOS)\n\t}\n\n\tcmd := exec.Command(cmdName, append(params, c)...)\n\tcmd.Dir = execPath\n\n\tstdout, err := cmd.StdoutPipe()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tstderr, err := cmd.StderrPipe()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// print log\n\toutReader := bufio.NewReader(stdout)\n\terrReader := bufio.NewReader(stderr)\n\tprintLog := func(reader *bufio.Reader, typex string) {\n\t\tfor {\n\t\t\tline, err := reader.ReadString('\\n')\n\t\t\tif print {\n\t\t\t\tlog.Printf(\"%s: %s\", typex, line)\n\t\t\t}\n\t\t\tif logsBuffer != nil {\n\t\t\t\tlock.Lock()\n\t\t\t\tlogsBuffer.WriteString(line)\n\t\t\t\tlock.Unlock()\n\t\t\t}\n\t\t\tif err != nil || err == io.EOF {\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t}\n\n\tvar wg sync.WaitGroup\n\twg.Add(1)\n\tgo func() {\n\t\tdefer wg.Done()\n\t\tprintLog(outReader, \"Stdout\")\n\t}()\n\n\twg.Add(1)\n\tgo func() {\n\t\tdefer wg.Done()\n\t\tprintLog(errReader, \"Stderr\")\n\t}()\n\n\terr = cmd.Start()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\twg.Wait()\n\treturn cmd.Wait()\n}", "func (e *Executor) ExecWithTimeoutShell(target Target, dir string, env []string, timeout time.Duration, showOutput, foreground bool, sandbox SandboxConfig, cmd string) ([]byte, []byte, error) {\n\treturn e.ExecWithTimeoutShellStdStreams(target, dir, env, timeout, showOutput, foreground, sandbox, cmd, false)\n}", "func (c *Cluster) RunShell(cmd string, values interface{}, extraEnv []string) error {\n\terr := c.writeValuesYaml(values)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// Collect environment variables\n\tenv := append(os.Environ(), MapToEnv(values, \"VALUE_\")...)\n\tenv = append(env, extraEnv...)\n\tenv = append(env, \"HOME=\"+c.Path)\n\n\topt := exe.Opt{\n\t\tDir: c.Path,\n\t\tEnv: env,\n\t}\n\n\t_, _, err = exe.Run(\"bash\", exe.Args{\"-c\", cmd}, opt, c.log)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}", "func shellExecutor(rootCmd *cobra.Command, printer *Printer, meta *meta) func(s string) {\n\treturn func(s string) {\n\t\targs := strings.Fields(s)\n\n\t\tsentry.AddCommandContext(strings.Join(removeOptions(args), \" \"))\n\n\t\trootCmd.SetArgs(meta.CliConfig.Alias.ResolveAliases(args))\n\n\t\terr := rootCmd.Execute()\n\t\tif err != nil {\n\t\t\tif _, ok := err.(*interactive.InterruptError); ok {\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tprintErr := printer.Print(err, nil)\n\t\t\tif printErr != nil {\n\t\t\t\t_, _ = fmt.Fprintln(os.Stderr, err)\n\t\t\t}\n\n\t\t\treturn\n\t\t}\n\n\t\t// command is nil if it does not have a Run function\n\t\t// ex: instance -h\n\t\tif meta.command == nil {\n\t\t\treturn\n\t\t}\n\n\t\tautoCompleteCache.Update(meta.command.Namespace)\n\n\t\tprintErr := printer.Print(meta.result, meta.command.getHumanMarshalerOpt())\n\t\tif printErr != nil {\n\t\t\t_, _ = fmt.Fprintln(os.Stderr, printErr)\n\t\t}\n\t}\n}", "func RunShellCmd(rb RunningBuild, step *vts.BuildStep, o, e io.Writer) error {\n\tif len(step.Args) > 0 {\n\t\tstep.Args[0] = \"set +h;umask 022;\" + step.Args[0]\n\t}\n\t_, err := rb.ExecBlocking(\"/tmp\", append([]string{\"/bin/bash\", \"-c\"}, step.Args...), o, e)\n\treturn err\n}", "func (r RealExecute) ExecCommand(com string, args ...string) ([]byte, error) {\n\t/* #nosec */\n\tcommand := exec.Command(com, args...)\n\treturn command.CombinedOutput()\n}", "func (pm *Manager) Exec(desc, cmdName string, args ...string) (string, string, error) {\n\treturn pm.ExecDir(-1, \"\", desc, cmdName, args...)\n}", "func ExecuteShell(\n\tcliConfig schema.CliConfiguration,\n\tcommand string,\n\tname string,\n\tdir string,\n\tenv []string,\n\tdryRun bool,\n) error {\n\tu.LogDebug(cliConfig, \"\\nExecuting command:\")\n\tu.LogDebug(cliConfig, command)\n\n\tif dryRun {\n\t\treturn nil\n\t}\n\n\treturn shellRunner(command, name, dir, env, os.Stdout)\n}", "func exec(c *lxc.Container, conf *Config) {\n\tvar output []byte\n\tvar err error\n\t// stdout and stderr are unfornutately concatenated\n\tif output, err = c.Execute(conf.Args.Command...); err != nil {\n\t\tif len(output) != 0 {\n\t\t\tfmt.Printf(\"%s\\n\", output)\n\t\t}\n\t\terrorExit(2, err)\n\t} else {\n\t\tfmt.Printf(\"%s\", output)\n\t}\n}", "func Shell(c *cli.Context) error {\n\treturn subShell(c.String(\"name\"), c.String(\"shell\"), c.String(\"command\"))\n}", "func execTerraformShellCommand(\n\tcliConfig schema.CliConfiguration,\n\tcomponent string,\n\tstack string,\n\tcomponentEnvList []string,\n\tvarFile string,\n\tworkingDir string,\n\tworkspaceName string,\n\tcomponentPath string) error {\n\n\tcomponentEnvList = append(componentEnvList, fmt.Sprintf(\"TF_CLI_ARGS_plan=-var-file=%s\", varFile))\n\tcomponentEnvList = append(componentEnvList, fmt.Sprintf(\"TF_CLI_ARGS_apply=-var-file=%s\", varFile))\n\tcomponentEnvList = append(componentEnvList, fmt.Sprintf(\"TF_CLI_ARGS_refresh=-var-file=%s\", varFile))\n\tcomponentEnvList = append(componentEnvList, fmt.Sprintf(\"TF_CLI_ARGS_import=-var-file=%s\", varFile))\n\tcomponentEnvList = append(componentEnvList, fmt.Sprintf(\"TF_CLI_ARGS_destroy=-var-file=%s\", varFile))\n\tcomponentEnvList = append(componentEnvList, fmt.Sprintf(\"TF_CLI_ARGS_console=-var-file=%s\", varFile))\n\n\tu.LogDebug(cliConfig, \"\\nStarting a new interactive shell where you can execute all native Terraform commands (type 'exit' to go back)\")\n\tu.LogDebug(cliConfig, fmt.Sprintf(\"Component: %s\\n\", component))\n\tu.LogDebug(cliConfig, fmt.Sprintf(\"Stack: %s\\n\", stack))\n\tu.LogDebug(cliConfig, fmt.Sprintf(\"Working directory: %s\\n\", workingDir))\n\tu.LogDebug(cliConfig, fmt.Sprintf(\"Terraform workspace: %s\\n\", workspaceName))\n\tu.LogDebug(cliConfig, \"\\nSetting the ENV vars in the shell:\\n\")\n\tfor _, v := range componentEnvList {\n\t\tu.LogDebug(cliConfig, v)\n\t}\n\n\t// Transfer stdin, stdout, and stderr to the new process and also set the target directory for the shell to start in\n\tpa := os.ProcAttr{\n\t\tFiles: []*os.File{os.Stdin, os.Stdout, os.Stderr},\n\t\tDir: componentPath,\n\t\tEnv: append(os.Environ(), componentEnvList...),\n\t}\n\n\t// Start a new shell\n\tvar shellCommand string\n\n\tif runtime.GOOS == \"windows\" {\n\t\tshellCommand = \"cmd.exe\"\n\t} else {\n\t\t// If 'SHELL' ENV var is not defined, use 'bash' shell\n\t\tshellCommand = os.Getenv(\"SHELL\")\n\t\tif len(shellCommand) == 0 {\n\t\t\tbashPath, err := exec.LookPath(\"bash\")\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tshellCommand = bashPath\n\t\t}\n\t\tshellCommand = shellCommand + \" -l\"\n\t}\n\n\tu.LogDebug(cliConfig, fmt.Sprintf(\"Starting process: %s\\n\", shellCommand))\n\n\targs := strings.Fields(shellCommand)\n\n\tproc, err := os.StartProcess(args[0], args[1:], &pa)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// Wait until user exits the shell\n\tstate, err := proc.Wait()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tu.LogDebug(cliConfig, fmt.Sprintf(\"Exited shell: %s\\n\", state.String()))\n\treturn nil\n}", "func (client *ExternalClient) Shell(pty bool, args ...string) error {\n\targs = append(client.BaseArgs, args...)\n\tcmd := getSSHCmd(client.BinaryPath, pty, args...)\n\tcmd.Stdin = os.Stdin\n\tcmd.Stdout = os.Stdout\n\tcmd.Stderr = os.Stderr\n\treturn cmd.Run()\n}", "func (player *Player) ExecAs(commandLine string, callback func(statusCode int)) {\n\tplayer.Exec(fmt.Sprintf(\"execute %v ~ ~ ~ %v\", player.name, commandLine), func(response map[string]interface{}) {\n\t\tcodeInterface, exists := response[\"statusCode\"]\n\t\tif !exists {\n\t\t\tlog.Printf(\"exec as: invalid response JSON\")\n\t\t\treturn\n\t\t}\n\t\tcode, _ := codeInterface.(int)\n\t\tif callback != nil {\n\t\t\tcallback(code)\n\t\t}\n\t})\n}", "func comandoExec(comando string) {\n\tfmt.Println(\"\\nEJECUTANDO: \" + comando)\n\ts := strings.Split(comando, \" -\")\n\tif len(s) == 2 {\n\t\ts2 := strings.Split(s[1], \"->\")\n\t\tif strings.Compare(s2[0], \"path\") == 0 {\n\t\t\t_, err := os.Stat(strings.ReplaceAll(s2[1], \"\\\"\", \"\"))\n\t\t\tif err == nil {\n\t\t\t\ts3 := strings.Split(s2[1], \".\")\n\t\t\t\tif strings.Compare(s3[1], \"mia\") == 0 {\n\t\t\t\t\tfmt.Println(\"RESULTADO: Lectura de archivo\")\n\t\t\t\t\tfmt.Println(\"\")\n\t\t\t\t\tarchivo := leerArchivo(s2[1])\n\t\t\t\t\t//mandar a analizar ese archivo\n\t\t\t\t\tanalizarArchivo(archivo)\n\t\t\t\t} else {\n\t\t\t\t\tfmt.Println(\"RESULTADO: La extension del archivo debe ser .MIA\")\n\t\t\t\t}\n\t\t\t}\n\t\t\tif os.IsNotExist(err) {\n\t\t\t\tfmt.Println(\"RESULTADO: No existe el archivo especificado\")\n\t\t\t}\n\t\t} else {\n\t\t\tfmt.Println(\"RESULTADO: El parametro PATH es obligatorio\")\n\t\t}\n\t} else if len(s) > 2 {\n\t\tfmt.Println(\"RESULTADO: Demasiados parametros para el comando EXEC\")\n\t} else {\n\t\tfmt.Println(\"RESULTADO: Faltan parametros para el comando EXEC\")\n\t}\n}", "func execute(w io.Writer, commandline string, req io.Reader) error {\n\targv, err := cmd.SplitQuoted(commandline)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// We treat a pipe command specially.\n\t// It will be splitted by the pipe binary.\n\tif strings.HasPrefix(commandline, \"pipe \") {\n\t\targv = []string{\"pipe\", commandline[5:]}\n\t}\n\n\tif len(argv) < 1 {\n\t\treturn fmt.Errorf(\"request contains no command\")\n\t}\n\n\t// Get installation directory of editor binary.\n\t// All subcommands must be in the same directory.\n\tvar installDir string\n\tprogname := os.Args[0]\n\tif p, err := filepath.Abs(progname); err != nil {\n\t\treturn fmt.Errorf(\"cannot get editor directory\")\n\t} else {\n\t\tinstallDir = filepath.Dir(p)\n\t}\n\n\tvar buf bytes.Buffer\n\tvar errbuf bytes.Buffer\n\targv[0] = filepath.Join(installDir, argv[0])\n\tctx, cancel := context.WithCancel(context.Background())\n\tc := exec.CommandContext(ctx, argv[0], argv[1:]...)\n\tc.Stdin = req\n\tc.Stdout = &buf\n\tc.Stderr = &errbuf\n\tif err := c.Start(); err != nil {\n\t\treturn err\n\t}\n\tpid := c.Process.Pid\n\tProcessList.Add(pid, argv, cancel)\n\n\terr = c.Wait()\n\tProcessList.Remove(pid)\n\tio.Copy(w, &buf)\n\n\t// Write stderr of commands to the console.\n\tif errbuf.Len() > 0 {\n\t\tif err != nil {\n\t\t\terrmsg, _ := ioutil.ReadAll(&errbuf)\n\t\t\terr = fmt.Errorf(\"%s\\n%s\\n\", err.Error(), string(errmsg))\n\t\t} else {\n\t\t\tio.Copy(os.Stdout, &errbuf)\n\t\t}\n\t}\n\treturn err\n}", "func (s pathRuntime) Exec(args []string) error {\n\truntimeArgs := []string{s.path}\n\tif len(args) > 1 {\n\t\truntimeArgs = append(runtimeArgs, args[1:]...)\n\t}\n\n\treturn s.execRuntime.Exec(runtimeArgs)\n}", "func Exec(cmd string) {\n\n\tfmt.Printf(\"Você digitou: %s \", cmd)\n\n}", "func execCmd(args []string) {\n\tvar (\n\t\tcmd *exec.Cmd\n\t)\n\n\t// Prepare command with arguments\n\tcmd = exec.Command(binary, args...)\n\n\t// redirect stdout/err/in\n\tcmd.Stdout = os.Stdout\n\tcmd.Stderr = os.Stderr\n\tcmd.Stdin = os.Stdin\n\n\t// Execute command\n\tcmd.Run()\n}", "func sysExec(args ...OBJ) OBJ {\n\tif len(args) < 1 {\n\t\treturn NewError(\"`sys.exec` wanted string, got invalid argument\")\n\t}\n\n\tvar command string\n\tswitch c := args[0].(type) {\n\tcase *object.String:\n\t\tcommand = c.Value\n\tdefault:\n\t\treturn NewError(\"`sys.exec` wanted string, got invalid argument\")\n\t}\n\n\tif len(command) < 1 {\n\t\treturn NewError(\"`sys.exec` expected string, got invalid argument\")\n\t}\n\t// split the command\n\ttoExec := splitCommand(command)\n\tcmd := exec.Command(toExec[0], toExec[1:]...)\n\n\t// get the result\n\tvar outb, errb bytes.Buffer\n\tcmd.Stdout = &outb\n\tcmd.Stderr = &errb\n\terr := cmd.Run()\n\n\t// If the command exits with a non-zero exit-code it\n\t// is regarded as a failure. Here we test for ExitError\n\t// to regard that as a non-failure.\n\tif err != nil && err != err.(*exec.ExitError) {\n\t\tfmt.Printf(\"Failed to run '%s' -> %s\\n\", command, err.Error())\n\t\treturn &object.Error{Message: \"Failed to run command!\"}\n\t}\n\n\t// The result-objects to store in our hash.\n\tstdout := &object.String{Value: outb.String()}\n\tstderr := &object.String{Value: errb.String()}\n\n\treturn NewHash(StringObjectMap{\n\t\t\"stdout\": stdout,\n\t\t\"stderr\": stderr,\n\t})\n}", "func SimpleExecInPath(dir, cmdName string, arguments ...string) {\n\tcmd := exec.Command(cmdName, arguments...) // nolint: gosec\n\tcmd.Stdin = os.Stdin\n\tcmd.Stdout = os.Stdout\n\tcmd.Stderr = os.Stderr\n\tcmd.Dir = dir\n\terr := cmd.Run()\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n}", "func Exec(name string, arg ...string) error {\n\tcmd := exec.Command(name, arg...)\n\tif len(cmdExePath) != 0 {\n\t\tcmd.Dir = cmdExePath\n\t\tcmdExePath = \"\"\n\t}\n\toutput, err := cmd.CombinedOutput()\n\tif err != nil {\n\t\tlog.Fatal(\"error: \", string(output), err)\n\t\treturn err\n\t}\n\n\treturn nil\n}", "func Run(dir string, commandLine string) ([]byte, error) {\n\n\t// Split commandLine into an array separated by whitespace\n\targs := strings.Fields(commandLine)\n\tcmd := exec.Command(args[0], args[1:]...)\n\tcmd.Dir = dir\n\tvar buf bytes.Buffer\n\tcmd.Stdout = &buf\n\tcmd.Stderr = &buf\n\terr := cmd.Run()\n\tout := buf.Bytes()\n\tif err != nil {\n\t\treturn out, err\n\t}\n\treturn out, nil\n}", "func shellRunner(command string, name string, dir string, env []string, out io.Writer) error {\n\tparser, err := syntax.NewParser().Parse(strings.NewReader(command), name)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tenviron := append(os.Environ(), env...)\n\tlistEnviron := expand.ListEnviron(environ...)\n\trunner, err := interp.New(\n\t\tinterp.Dir(dir),\n\t\tinterp.Env(listEnviron),\n\t\tinterp.StdIO(os.Stdin, out, os.Stderr),\n\t)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn runner.Run(context.TODO(), parser)\n}", "func (ne *NSEnter) Exec(cmd string, args []string) exec.Cmd {\n\thostProcMountNsPath := filepath.Join(ne.hostRootFsPath, mountNsPath)\n\tfullArgs := append([]string{fmt.Sprintf(\"--mount=%s\", hostProcMountNsPath), \"--\"},\n\t\tappend([]string{ne.AbsHostPath(cmd)}, args...)...)\n\tklog.V(5).Infof(\"Running nsenter command: %v %v\", nsenterPath, fullArgs)\n\treturn ne.executor.Command(nsenterPath, fullArgs...)\n}", "func (e *editorConfig) execCmd(cmd string) error {\n\t// Multiple commands may be concatenated with '|'\n\t// Split these and run them recursively\n\tcmds := strings.SplitN(cmd, \"|\", 2)\n\tif len(cmds) > 1 {\n\t\tif err := e.execCmd(cmds[0]); err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn e.execCmd(cmds[1])\n\t}\n\tcmd = strings.TrimSpace(cmds[0])\n\n\targs := strings.Split(cmd, \" \")\n\tcmd, params := args[0], args[1:]\n\n\t// If command is a number: go to that line\n\tif n, err := strconv.Atoi(cmd); err == nil {\n\t\t// First, check for relative numbers, starting with + or -\n\t\tif cmd[0] == '+' || cmd[0] == '-' {\n\t\t\tn = e.cy + n\n\t\t} else {\n\t\t\t// Absolute number, adjust for e.cy starting from 0\n\t\t\t// versus user input starting from 1\n\t\t\tn = n - 1\n\t\t}\n\n\t\t// Check boundaries and set cy\n\t\te.cy = n\n\t\te.safeCursor()\n\t\treturn nil\n\t}\n\n\tswitch cmd {\n\tcase \"$\":\n\t\treturn e.execCmd(strconv.Itoa(len(e.row)))\n\tcase \"delete\", \"d\":\n\t\tif len(params) == 0 {\n\t\t\tparams = []string{\"1\"}\n\t\t}\n\t\tn, err := strconv.Atoi(params[0])\n\t\tif err != nil {\n\t\t\tn = 1\n\t\t}\n\t\te.register.Reset()\n\t\tfor i := 0; i < n; i++ {\n\t\t\te.register.Write(e.row[e.cy].chars)\n\t\t\tif i != n-1 {\n\t\t\t\te.register.Write([]byte(newline))\n\t\t\t}\n\t\t\te.delRow(e.cy)\n\t\t}\n\tcase \"join\":\n\t\te.cx = e.rowLen(e.cy)\n\t\te.delRune()\n\t\tif e.rowLen(e.cy) > e.cx {\n\t\t\te.insertRune(' ')\n\t\t}\n\t\tfor e.rowLen(e.cy) > e.cx && strings.ContainsRune(\" \\t\", rune(e.row[e.cy].chars[e.cx])) {\n\t\t\te.delRune()\n\t\t}\n\t\treturn nil\n\tcase \"put\", \"pu\":\n\t\te.insertRow(e.cy, e.register.Bytes())\n\tcase \"quit\", \"q\":\n\t\tif e.dirty > 0 {\n\t\t\te.setStatusMessage(\"File has unsaved changes!!!\")\n\t\t\treturn nil\n\t\t}\n\t\treturn errQuit\n\tcase \"q!\":\n\t\treturn errQuit\n\tcase \"write\", \"w\":\n\t\tif len(params) > 0 {\n\t\t\tif _, err := os.Stat(params[0]); !os.IsNotExist(err) {\n\t\t\t\treturn errors.New(\"file exists already\")\n\t\t\t}\n\t\t\te.filename = params[0]\n\t\t}\n\t\tn, err := e.fileBuffer.save()\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"Can't save file: %w\", err)\n\t\t}\n\t\te.setStatusMessage(\"%d bytes written to disk\", n)\n\tcase \"wq\":\n\t\treturn e.execCmd(\"write|quit\")\n\tcase \"yank\", \"ya\":\n\t\tif len(params) == 0 {\n\t\t\tparams = []string{\"1\"}\n\t\t}\n\t\tn, err := strconv.Atoi(params[0])\n\t\tif err != nil {\n\t\t\tn = 1\n\t\t}\n\t\te.register.Reset()\n\t\tfor i := 0; i < n; i++ {\n\t\t\te.register.Write(e.row[e.cy+i].chars)\n\t\t\tif i != n-1 {\n\t\t\t\te.register.Write([]byte(newline))\n\t\t\t}\n\t\t}\n\tdefault:\n\t\te.setStatusMessage(\"Unknown command %q\", cmd)\n\t}\n\treturn nil\n}", "func ExecuteShellAndReturnOutput(\n\tcliConfig schema.CliConfiguration,\n\tcommand string,\n\tname string,\n\tdir string,\n\tenv []string,\n\tdryRun bool,\n) (string, error) {\n\tvar b bytes.Buffer\n\n\tu.LogDebug(cliConfig, \"\\nExecuting command:\")\n\tu.LogDebug(cliConfig, command)\n\n\tif dryRun {\n\t\treturn \"\", nil\n\t}\n\n\terr := shellRunner(command, name, dir, env, &b)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\treturn b.String(), nil\n}", "func RemoteShell(remoteHost, remoteShellStdin string) (stdout, stderr string, err error) {\n\treturn Exec(\"ssh\", \"\", remoteShellStdin, remoteHost, \"sh -il\")\n}", "func RunShellCommand(shell string) (string, error) {\n\tvar out, berr bytes.Buffer\n\tcmd := exec.Command(\"/bin/sh\", \"-c\", shell)\n\tcmd.Stdout = &out\n\tcmd.Stderr = &berr\n\n\tif err := cmd.Run(); err != nil {\n\t\treturn out.String(), fmt.Errorf(\"err:%v berr:%v\", err, berr.String())\n\t}\n\n\treturn out.String() + berr.String(), nil\n}", "func execCmd(dir, arg0 string, args ...string) error {\n\tcmd := exec.Command(arg0, args...)\n\tcmd.Env = os.Environ()\n\tcmd.Dir = dir\n\tcmd.Stdin = os.Stdin\n\tcmd.Stdout = os.Stdout\n\tcmd.Stderr = os.Stderr\n\treturn cmd.Run()\n}", "func (git *Git) Exec(subcmd string, args ...string) (string, error) {\n\tb, err := git.Command(subcmd, args...).CombinedOutput()\n\n\t// Chop last newline\n\tl := len(b)\n\tif l > 0 && b[l-1] == '\\n' {\n\t\tb = b[:l-1]\n\t}\n\n\t// Make output in oneline in error cases\n\tif err != nil {\n\t\tfor i := range b {\n\t\t\tif b[i] == '\\n' {\n\t\t\t\tb[i] = ' '\n\t\t\t}\n\t\t}\n\t}\n\n\treturn string(b), err\n}", "func (ssh *SSHConfig) Exec(cmdString string) error {\n\ttunnels, sshConfig, err := ssh.CreateTunnels()\n\tif err != nil {\n\t\tfor _, t := range tunnels {\n\t\t\tnerr := t.Close()\n\t\t\tif nerr != nil {\n\t\t\t\tlog.Warnf(\"Error closing ssh tunnel: %v\", nerr)\n\t\t\t}\n\t\t}\n\t\treturn fmt.Errorf(\"Unable to create command : %s\", err.Error())\n\t}\n\tsshCmdString, keyFile, err := createSSHCmd(sshConfig, cmdString, false)\n\tif err != nil {\n\t\tfor _, t := range tunnels {\n\t\t\tnerr := t.Close()\n\t\t\tif nerr != nil {\n\t\t\t\tlog.Warnf(\"Error closing ssh tunnel: %v\", nerr)\n\t\t\t}\n\t\t}\n\t\tif keyFile != nil {\n\t\t\tnerr := utils.LazyRemove(keyFile.Name())\n\t\t\tif nerr != nil {\n\t\t\t\tlog.Warnf(\"Error removing file %v\", nerr)\n\t\t\t}\n\t\t}\n\t\treturn fmt.Errorf(\"Unable to create command : %s\", err.Error())\n\t}\n\tbash, err := exec.LookPath(\"bash\")\n\tif err != nil {\n\t\tfor _, t := range tunnels {\n\t\t\tnerr := t.Close()\n\t\t\tif nerr != nil {\n\t\t\t\tlog.Warnf(\"Error closing ssh tunnel: %v\", nerr)\n\t\t\t}\n\t\t}\n\t\tif keyFile != nil {\n\t\t\tnerr := utils.LazyRemove(keyFile.Name())\n\t\t\tif nerr != nil {\n\t\t\t\tlog.Warnf(\"Error removing file %v\", nerr)\n\t\t\t}\n\t\t}\n\t\treturn fmt.Errorf(\"Unable to create command : %s\", err.Error())\n\t}\n\tvar args []string\n\tif cmdString == \"\" {\n\t\targs = []string{sshCmdString}\n\t} else {\n\t\targs = []string{\"-c\", sshCmdString}\n\t}\n\terr = syscall.Exec(bash, args, nil)\n\tnerr := utils.LazyRemove(keyFile.Name())\n\tif nerr != nil {\n\t}\n\treturn err\n}", "func ExecExternal(dir string,name string, arg ...string) (outStr string, errStr string, err error) {\n\tcmd := exec.Command(name, arg...)\n\tif dir != \"\"{\n\t\tcmd.Dir = dir\n\t}\n\tvar stdout, stderr bytes.Buffer\n\tcmd.Stdout = &stdout\n\tcmd.Stderr = &stderr\n\terr = cmd.Run()\n\n\toutStr, errStr = string(stdout.Bytes()), string(stderr.Bytes())\n\n\treturn outStr, errStr, err\n}", "func Execute() {\n\tcmd := rootCmd\n\trootCmdWithShell := *rootCmd\n\trootCmdWithShell.AddCommand(shellCmd)\n\tfoundCmd, _, err := rootCmdWithShell.Find(os.Args[1:])\n\tif err == nil && foundCmd.Use == \"shell\" {\n\t\tcmd = shellCmd\n\t}\n\tif err := cmd.Execute(); err != nil {\n\t\tfmt.Println(err)\n\t\tos.Exit(1)\n\t}\n}", "func QuickExec(cmdLine string, workDir ...string) (string, error) {\n\treturn ExecLine(cmdLine, workDir...)\n}", "func (e *Echo) Run(cmdStr string) string {\n\treturn exec.Run(e.Eval(cmdStr))\n}", "func (exec *Executhor) Exec(execArg []string) {\n\tif exec.execBuiltin(execArg) == nil {\n\t\treturn\n\t}\n\tpath, err := exec.getBinaryPath(execArg[0])\n\tif err == nil {\n\t\tpid := C.fork()\n\t\tif pid != 0 {\n\t\t\tvar status C.int\n\t\t\tC.wait(&status)\n\t\t} else {\n\t\t\tsyscall.Exec(path, execArg, exec.env.GetEnv())\n\t\t}\n\t} else {\n\t\tfmt.Println(err)\n\t}\n}", "func (sb *SecretBackend) execCommand(inputPayload string) ([]byte, error) {\n\tctx, cancel := context.WithTimeout(context.Background(), sb.cmdTimeout)\n\tdefer cancel()\n\n\tcmd := exec.CommandContext(ctx, sb.cmd, sb.cmdArgs...)\n\n\tcmd.Stdin = strings.NewReader(inputPayload)\n\n\tstdout := limitBuffer{\n\t\tbuf: &bytes.Buffer{},\n\t\tmax: sb.cmdOutputMaxSize,\n\t}\n\tstderr := limitBuffer{\n\t\tbuf: &bytes.Buffer{},\n\t\tmax: sb.cmdOutputMaxSize,\n\t}\n\tcmd.Stdout = &stdout\n\tcmd.Stderr = &stderr\n\n\tif err := cmd.Run(); err != nil {\n\t\tif errors.Is(ctx.Err(), context.DeadlineExceeded) {\n\t\t\treturn nil, fmt.Errorf(\"error while running '%s': command timeout\", sb.cmd)\n\t\t}\n\t\treturn nil, fmt.Errorf(\"error while running '%s': %w\", sb.cmd, err)\n\t}\n\n\treturn stdout.buf.Bytes(), nil\n}", "func SimpleExec(cmdName string, arguments ...string) {\n\tcmd := exec.Command(cmdName, arguments...) // nolint: gosec\n\tcmd.Stdin = os.Stdin\n\tcmd.Stdout = os.Stdout\n\tcmd.Stderr = os.Stderr\n\terr := cmd.Run()\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n}", "func (fs *Fs) ExecCommand(name string, args ...string) ([]byte, error) {\n\treturn exec.Command(name, args...).CombinedOutput() // #nosec G204\n}", "func Exec(container string, cmdLine ...string) (string, error) {\n\tparts := []string{\"exec\", \"-t\", container}\n\tparts = append(parts, cmdLine...)\n\tcmd := exec.Command(\"docker\", parts...)\n\toutput, err := cmd.CombinedOutput()\n\treturn string(output), err\n}", "func TestExecuteCmd(t *testing.T) {\n\tshell := CmdShell{}\n\tif _, err := shell.ExecuteCmd(invalidCmd); err == nil {\n\t\tt.Errorf(\"Cmd didn't error on invalid cmd\")\n\t}\n\n\tif validOutput, err := shell.ExecuteCmd(validCmd); validOutput == nil || err != nil || bytes.Equal(validOutput, []byte(\"hello\")) {\n\t\tt.Errorf(\"Cmd failed, expected %v, got %v\", \"hello\", string(validOutput))\n\t}\n}", "func RunCommand(custom string) (string, error) {\r\n custom = strings.TrimSpace(strings.TrimSuffix(custom, \"$\"))\r\n pieces := strings.Split(custom, \" \")\r\n cmd := exec.Command(pieces[0])\r\n cmd.Args = pieces\r\n cmd.Stdin = os.Stdin\r\n output, oops := cmd.CombinedOutput()\r\n return string(output), oops\r\n}", "func SystemExec(command string, args []string) *BuildahTestSession {\n\tfmt.Printf(\"Running: %s %s\\n\", command, strings.Join(args, \" \"))\n\tc := exec.Command(command, args...)\n\tsession, err := gexec.Start(c, GinkgoWriter, GinkgoWriter)\n\tif err != nil {\n\t\tFail(fmt.Sprintf(\"unable to run command: %s %s\", command, strings.Join(args, \" \")))\n\t}\n\treturn &BuildahTestSession{session}\n}", "func RunShell(ctx context.Context, printer *Printer, meta *meta, rootCmd *cobra.Command, args []string) {\n\tautoCompleteCache = cache.New()\n\tcompleter := NewShellCompleter(ctx)\n\n\tshellCobraCommand := getShellCommand(rootCmd)\n\tshellCobraCommand.InitDefaultHelpFlag()\n\t_ = shellCobraCommand.ParseFlags(args)\n\tif isHelp, _ := shellCobraCommand.Flags().GetBool(\"help\"); isHelp {\n\t\tshellCobraCommand.HelpFunc()(shellCobraCommand, args)\n\t\treturn\n\t}\n\n\t// remove shell command so it cannot be called from shell\n\trootCmd.RemoveCommand(shellCobraCommand)\n\tmeta.Commands.Remove(\"shell\", \"\")\n\n\texecutor := shellExecutor(rootCmd, printer, meta)\n\tp := prompt.New(\n\t\texecutor,\n\t\tcompleter.Complete,\n\t\tprompt.OptionPrefix(\">>> \"),\n\t\tprompt.OptionSuggestionBGColor(prompt.Purple),\n\t\tprompt.OptionSelectedSuggestionBGColor(prompt.Fuchsia),\n\t\tprompt.OptionSelectedSuggestionTextColor(prompt.White),\n\t\tprompt.OptionDescriptionBGColor(prompt.Purple),\n\t\tprompt.OptionSelectedDescriptionBGColor(prompt.Fuchsia),\n\t\tprompt.OptionSelectedDescriptionTextColor(prompt.White),\n\t)\n\tp.Run()\n}", "func (r *streamingRuntime) exec(containerID string, cmd []string, in io.Reader, out, errw io.WriteCloser, tty bool, resize <-chan remotecommand.TerminalSize, timeout time.Duration) error {\n\tcontainer, err := checkContainerStatus(r.client, containerID)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn r.execHandler.ExecInContainer(r.client, container, cmd, in, out, errw, tty, resize, timeout)\n}", "func ExecuteShellCommand(\n\tcliConfig schema.CliConfiguration,\n\tcommand string,\n\targs []string,\n\tdir string,\n\tenv []string,\n\tdryRun bool,\n\tredirectStdError string,\n) error {\n\tcmd := exec.Command(command, args...)\n\tcmd.Env = append(os.Environ(), env...)\n\tcmd.Dir = dir\n\tcmd.Stdin = os.Stdin\n\tcmd.Stdout = os.Stdout\n\n\tif redirectStdError == \"\" {\n\t\tcmd.Stderr = os.Stderr\n\t} else {\n\t\tf, err := os.OpenFile(redirectStdError, os.O_RDWR|os.O_CREATE, 0644)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tdefer func(f *os.File) {\n\t\t\terr = f.Close()\n\t\t\tif err != nil {\n\t\t\t\tu.LogWarning(cliConfig, err.Error())\n\t\t\t}\n\t\t}(f)\n\n\t\tcmd.Stderr = f\n\t}\n\n\tu.LogDebug(cliConfig, \"\\nExecuting command:\")\n\tu.LogDebug(cliConfig, cmd.String())\n\n\tif dryRun {\n\t\treturn nil\n\t}\n\n\treturn cmd.Run()\n}", "func Shell(format string, args ...interface{}) (string, error) {\n\treturn sh(format, true, args...)\n}", "func startShell(f resolver,obj interface{}){\n reader := bufio.NewReader(os.Stdin)\n fmt.Println(\"Accepting commands\")\n fmt.Println(\"---------------------\")\n var stop bool = false\n for ;!stop;{\n fmt.Print(\"-> \")\n text, _ := reader.ReadString('\\n')\n text = strings.Replace(text, \"\\n\", \"\", -1)\n args := strings.Split(text,\" \")\n if len(args)>1{\n fmt.Println(f(args[0],args[1:],obj))\n }else if len(args)==1{\n if args[0]==\"close\"{\n stop = true\n }else{\n fmt.Println(f(args[0],nil,obj))\n }\n }\n }\n}", "func ExecSubcommand(commands ...string) (string, error) {\n\tbytes, err := exec.Command(\"VBoxManage\", commands...).Output()\n\treturn strings.TrimSpace(string(bytes)), err\n}", "func (m *Manager) Exec(name string, opt ExecOptions, gOpt operator.Options) error {\n\tif err := clusterutil.ValidateClusterNameOrError(name); err != nil {\n\t\treturn err\n\t}\n\n\tmetadata, err := m.meta(name)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\ttopo := metadata.GetTopology()\n\tbase := metadata.GetBaseMeta()\n\n\tfilterRoles := set.NewStringSet(gOpt.Roles...)\n\tfilterNodes := set.NewStringSet(gOpt.Nodes...)\n\n\tvar shellTasks []task.Task\n\tuniqueHosts := map[string]set.StringSet{} // host-sshPort -> {command}\n\ttopo.IterInstance(func(inst spec.Instance) {\n\t\tkey := utils.JoinHostPort(inst.GetManageHost(), inst.GetSSHPort())\n\t\tif _, found := uniqueHosts[key]; !found {\n\t\t\tif len(gOpt.Roles) > 0 && !filterRoles.Exist(inst.Role()) {\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tif len(gOpt.Nodes) > 0 && (!filterNodes.Exist(inst.GetHost()) && !filterNodes.Exist(inst.GetManageHost())) {\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tcmds, err := renderInstanceSpec(opt.Command, inst)\n\t\t\tif err != nil {\n\t\t\t\tm.logger.Debugf(\"error rendering command with spec: %s\", err)\n\t\t\t\treturn // skip\n\t\t\t}\n\t\t\tcmdSet := set.NewStringSet(cmds...)\n\t\t\tif _, ok := uniqueHosts[key]; ok {\n\t\t\t\tuniqueHosts[key].Join(cmdSet)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tuniqueHosts[key] = cmdSet\n\t\t}\n\t})\n\n\tfor hostKey, i := range uniqueHosts {\n\t\thost, _ := utils.ParseHostPort(hostKey)\n\t\tfor _, cmd := range i.Slice() {\n\t\t\tshellTasks = append(shellTasks,\n\t\t\t\ttask.NewBuilder(m.logger).\n\t\t\t\t\tShell(host, cmd, hostKey+cmd, opt.Sudo).\n\t\t\t\t\tBuild())\n\t\t}\n\t}\n\n\tb, err := m.sshTaskBuilder(name, topo, base.User, gOpt)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tt := b.\n\t\tParallel(false, shellTasks...).\n\t\tBuild()\n\n\texecCtx := ctxt.New(\n\t\tcontext.Background(),\n\t\tgOpt.Concurrency,\n\t\tm.logger,\n\t)\n\tif err := t.Execute(execCtx); err != nil {\n\t\tif errorx.Cast(err) != nil {\n\t\t\t// FIXME: Map possible task errors and give suggestions.\n\t\t\treturn err\n\t\t}\n\t\treturn perrs.Trace(err)\n\t}\n\n\t// print outputs\n\tfor hostKey, i := range uniqueHosts {\n\t\thost, _ := utils.ParseHostPort(hostKey)\n\t\tfor _, cmd := range i.Slice() {\n\t\t\tstdout, stderr, ok := ctxt.GetInner(execCtx).GetOutputs(hostKey + cmd)\n\t\t\tif !ok {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tm.logger.Infof(\"Outputs of %s on %s:\",\n\t\t\t\tcolor.CyanString(cmd),\n\t\t\t\tcolor.CyanString(host))\n\t\t\tif len(stdout) > 0 {\n\t\t\t\tm.logger.Infof(\"%s:\\n%s\", color.GreenString(\"stdout\"), stdout)\n\t\t\t}\n\t\t\tif len(stderr) > 0 {\n\t\t\t\tm.logger.Infof(\"%s:\\n%s\", color.RedString(\"stderr\"), stderr)\n\t\t\t}\n\t\t}\n\t}\n\n\treturn nil\n}", "func (flower *Flower) Exec(commandName string, capture bool, args []string) (string, error) {\n\tflowerCommandData, err := flower.GetFlowerCommandData(commandName)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tvar command []string\n\tif flowerCommandData.Workdir != \"\" {\n\t\tcommand = append([]string{\"cd\", flowerCommandData.Workdir, \"&&\"})\n\t}\n\n\tcommand = append([]string{flowerCommandData.Bin})\n\tfor _, arg := range args {\n\t\tcommand = append([]string{arg})\n\t}\n\n\tvar dockerExecOptions *DockerExecOptions\n\tswitch flowerCommandData.DockerExecOptions {\n\tcase nil:\n\t\tdockerExecOptions = flowerCommandData.DockerExecOptions\n\tdefault:\n\t\tdockerExecOptions = &DockerExecOptions{}\n\t}\n\n\treturn flower.Container.Exec(command, dockerExecOptions, capture)\n}", "func (l *CustomLambda) Execute(stdin io.Reader, args []string) (string, error) {\n\targsStr := strings.TrimSpace(strings.Join(args, \" \"))\n\tif argsStr != \"\" {\n\t\targsStr = \" \" + argsStr\n\t}\n\n\tcmd := exec.Command(\"bash\", \"-c\", l.command+argsStr)\n\n\t// pass through some stdin goodness\n\tcmd.Stdin = stdin\n\n\t// for those who are about to rock, I salute you.\n\tstdoutStderr, err := cmd.CombinedOutput()\n\n\tif err == nil {\n\t\t// noiiiice!\n\t\tlog.WithFields(log.Fields{\"name\": l.Name(), \"command\": l.command}).Info(\"Lambda Execution\")\n\t\treturn strings.TrimSpace(string(stdoutStderr)), nil\n\t}\n\n\t// *sigh*\n\tlog.WithFields(log.Fields{\"name\": l.Name(), \"command\": l.command}).Error(\"Lambda Execution\")\n\treturn string(stdoutStderr), errors.New(\"Error running command\")\n}", "func (r *streamingRuntime) exec(containerID string, cmd []string, in io.Reader, out, errw io.WriteCloser, tty bool, resize <-chan term.Size, timeout time.Duration) error {\n\tcontainer, err := checkContainerStatus(r.client, containerID)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn r.execHandler.ExecInContainer(r.client, container, cmd, in, out, errw, tty, resize, timeout)\n}", "func runShellAs(who, hname, ttype, cmd string, interactive bool, conn *xsnet.Conn, chaffing bool) (exitStatus uint32, err error) {\n\tvar wg sync.WaitGroup\n\tu, err := user.Lookup(who)\n\tif err != nil {\n\t\texitStatus = 1\n\t\treturn\n\t}\n\tvar uid, gid uint32\n\t_, _ = fmt.Sscanf(u.Uid, \"%d\", &uid) // nolint: gosec\n\t_, _ = fmt.Sscanf(u.Gid, \"%d\", &gid) // nolint: gosec\n\tlog.Println(\"uid:\", uid, \"gid:\", gid)\n\n\t// Need to clear server's env and set key vars of the\n\t// target user. This isn't perfect (TERM doesn't seem to\n\t// work 100%; ANSI/xterm colour isn't working even\n\t// if we set \"xterm\" or \"ansi\" here; and line count\n\t// reported by 'stty -a' defaults to 24 regardless\n\t// of client shell window used to run client.\n\t// Investigate -- rlm 2018-01-26)\n\tos.Clearenv()\n\t_ = os.Setenv(\"HOME\", u.HomeDir) // nolint: gosec\n\t_ = os.Setenv(\"TERM\", ttype) // nolint: gosec\n\t_ = os.Setenv(\"XS_SESSION\", \"1\") // nolint: gosec\n\n\tvar c *exec.Cmd\n\tif interactive {\n\t\tif useSysLogin {\n\t\t\t// Use the server's login binary (post-auth\n\t\t\t// which is still done via our own bcrypt file)\n\t\t\t// Things UNIX login does, like print the 'motd',\n\t\t\t// and use the shell specified by /etc/passwd, will be done\n\t\t\t// automagically, at the cost of another external tool\n\t\t\t// dependency.\n\t\t\t//\n\t\t\tc = exec.Command(xs.GetTool(\"login\"), \"-f\", \"-p\", who) // nolint: gosec\n\t\t} else {\n\t\t\tc = exec.Command(xs.GetTool(\"bash\"), \"-i\", \"-l\") // nolint: gosec\n\t\t}\n\t} else {\n\t\tc = exec.Command(xs.GetTool(\"bash\"), \"-c\", cmd) // nolint: gosec\n\t}\n\t//If os.Clearenv() isn't called by server above these will be seen in the\n\t//client's session env.\n\t//c.Env = []string{\"HOME=\" + u.HomeDir, \"SUDO_GID=\", \"SUDO_UID=\", \"SUDO_USER=\", \"SUDO_COMMAND=\", \"MAIL=\", \"LOGNAME=\"+who}\n\tc.Dir = u.HomeDir\n\tc.SysProcAttr = &syscall.SysProcAttr{}\n\tif useSysLogin {\n\t\t// If using server's login binary, drop to user creds\n\t\t// is taken care of by it.\n\t\tc.SysProcAttr.Credential = &syscall.Credential{}\n\t} else {\n\t\tc.SysProcAttr.Credential = &syscall.Credential{Uid: uid, Gid: gid}\n\t}\n\n\t// Start the command with a pty.\n\tptmx, err := pty.Start(c) // returns immediately with ptmx file\n\tif err != nil {\n\t\tlog.Println(err)\n\t\treturn xsnet.CSEPtyExecFail, err\n\t}\n\t// Make sure to close the pty at the end.\n\t// #gv:s/label=\\\"runShellAs\\$1\\\"/label=\\\"deferPtmxClose\\\"/\n\tdefer func() {\n\t\t//logger.LogDebug(fmt.Sprintf(\"[Exited process was %d]\", c.Process.Pid))\n\t\t_ = ptmx.Close()\n\t}() // nolint: gosec\n\n\t// get pty info for system accounting (who, lastlog)\n\tpts, pe := ptsName(ptmx.Fd())\n\tif pe != nil {\n\t\treturn xsnet.CSEPtyGetNameFail, err\n\t}\n\tutmpx := goutmp.Put_utmp(who, pts, hname)\n\tdefer func() { goutmp.Unput_utmp(utmpx) }()\n\tgoutmp.Put_lastlog_entry(\"xs\", who, pts, hname)\n\n\tlog.Printf(\"[%s]\\n\", cmd)\n\tif err != nil {\n\t\tlog.Printf(\"Command finished with error: %v\", err)\n\t} else {\n\t\t// Watch for term resizes\n\t\t// #gv:s/label=\\\"runShellAs\\$2\\\"/label=\\\"termResizeWatcher\\\"/\n\t\tgo func() {\n\t\t\tfor sz := range conn.WinCh {\n\t\t\t\tlog.Printf(\"[Setting term size to: %v %v]\\n\", sz.Rows, sz.Cols)\n\t\t\t\tpty.Setsize(ptmx, &pty.Winsize{Rows: sz.Rows, Cols: sz.Cols}) // nolint: gosec,errcheck\n\t\t\t}\n\t\t\tlog.Println(\"*** WinCh goroutine done ***\")\n\t\t}()\n\n\t\t// Copy stdin to the pty.. (bgnd goroutine)\n\t\t// #gv:s/label=\\\"runShellAs\\$3\\\"/label=\\\"stdinToPtyWorker\\\"/\n\t\tgo func() {\n\t\t\t_, e := io.Copy(ptmx, conn)\n\t\t\tif e != nil {\n\t\t\t\tlog.Println(\"** stdin->pty ended **:\", e.Error())\n\t\t\t} else {\n\t\t\t\tlog.Println(\"*** stdin->pty goroutine done ***\")\n\t\t\t}\n\t\t}()\n\n\t\tif chaffing {\n\t\t\tconn.EnableChaff()\n\t\t}\n\t\t// #gv:s/label=\\\"runShellAs\\$4\\\"/label=\\\"deferChaffShutdown\\\"/\n\t\tdefer func() {\n\t\t\tconn.DisableChaff()\n\t\t\tconn.ShutdownChaff()\n\t\t}()\n\n\t\t// ..and the pty to stdout.\n\t\t// This may take some time exceeding that of the\n\t\t// actual command's lifetime, so the c.Wait() below\n\t\t// must synchronize with the completion of this goroutine\n\t\t// to ensure all stdout data gets to the client before\n\t\t// connection is closed.\n\t\twg.Add(1)\n\t\t// #gv:s/label=\\\"runShellAs\\$5\\\"/label=\\\"ptyToStdoutWorker\\\"/\n\t\tgo func() {\n\t\t\tdefer wg.Done()\n\t\t\t_, e := io.Copy(conn, ptmx)\n\t\t\tif e != nil {\n\t\t\t\tlog.Println(\"** pty->stdout ended **:\", e.Error())\n\t\t\t} else {\n\t\t\t\t// The above io.Copy() will exit when the command attached\n\t\t\t\t// to the pty exits\n\t\t\t\tlog.Println(\"*** pty->stdout goroutine done ***\")\n\t\t\t}\n\t\t}()\n\n\t\tif err := c.Wait(); err != nil {\n\t\t\t//fmt.Println(\"*** c.Wait() done ***\")\n\t\t\tif exiterr, ok := err.(*exec.ExitError); ok {\n\t\t\t\t// The program has exited with an exit code != 0\n\n\t\t\t\t// This works on both Unix and Windows. Although package\n\t\t\t\t// syscall is generally platform dependent, WaitStatus is\n\t\t\t\t// defined for both Unix and Windows and in both cases has\n\t\t\t\t// an ExitStatus() method with the same signature.\n\t\t\t\tif status, ok := exiterr.Sys().(syscall.WaitStatus); ok {\n\t\t\t\t\texitStatus = uint32(status.ExitStatus())\n\t\t\t\t\tlog.Printf(\"Exit Status: %d\", exitStatus)\n\t\t\t\t}\n\t\t\t}\n\t\t\tconn.SetStatus(xsnet.CSOType(exitStatus))\n\t\t} else {\n\t\t\tlogger.LogDebug(\"*** Main proc has exited. ***\")\n\t\t\t// Background jobs still may be running; close the\n\t\t\t// pty anyway, so the client can return before\n\t\t\t// wg.Wait() below completes (Issue #18)\n\t\t\tif interactive {\n\t\t\t\t_ = ptmx.Close()\n\t\t\t}\n\t\t}\n\t\twg.Wait() // Wait on pty->stdout completion to client\n\t}\n\treturn\n}", "func RunBashCmdExec(args []string, workingDir string) (string, error) {\n\texecDir, err := os.Getwd()\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tdefer func() {\n\t\t// replace the original working directory when this funciton completes\n\t\terr := os.Chdir(execDir)\n\t\tif err != nil {\n\t\t\tlog.Fatal(err)\n\t\t}\n\t}()\n\n\t// set the working directory\n\tif err := os.Chdir(workingDir); err != nil {\n\t\treturn \"\", err\n\t}\n\tlog.Println(\"executing: \" + args[0])\n\tcommandString := args[0]\n\texecCmd := exec.Command(\"/bin/sh\", \"-c\", commandString)\n\n\toutReader, outWriter, err := os.Pipe()\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tdefer func() {\n\t\toutReader.Close()\n\t\toutWriter.Close()\n\t}()\n\texecCmd.Stdout = outWriter\n\texecCmd.Stderr = outWriter\n\toutScanner := bufio.NewScanner(outReader)\n\tvar outBuffer bytes.Buffer\n\tgo func() {\n\t\tfor outScanner.Scan() {\n\t\t\tout := outScanner.Bytes()\n\t\t\toutBuffer.Write(out)\n\t\t\toutBuffer.WriteByte('\\n')\n\t\t\tfmt.Println(string(out))\n\t\t}\n\t}()\n\n\terr = execCmd.Start()\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\terr = execCmd.Wait()\n\treturn outBuffer.String(), err\n}", "func (c *VirtLauncherClient) Exec(domainName, command string, args []string, timeoutSeconds int32) (int, string, error) {\n\trequest := &cmdv1.ExecRequest{\n\t\tDomainName: domainName,\n\t\tCommand: command,\n\t\tArgs: args,\n\t\tTimeoutSeconds: int32(timeoutSeconds),\n\t}\n\texitCode := -1\n\tstdOut := \"\"\n\n\tctx, cancel := context.WithTimeout(\n\t\tcontext.Background(),\n\t\t// we give the context a bit more time as the timeout should kick\n\t\t// on the actual execution\n\t\ttime.Duration(timeoutSeconds)*time.Second+shortTimeout,\n\t)\n\tdefer cancel()\n\n\tresp, err := c.v1client.Exec(ctx, request)\n\tif resp == nil {\n\t\treturn exitCode, stdOut, err\n\t}\n\n\texitCode = int(resp.ExitCode)\n\tstdOut = resp.StdOut\n\n\treturn exitCode, stdOut, err\n}", "func ExecShells(sshcfg *ssh.ClientConfig, commands []HostCmd, stdout io.Writer, stderr io.Writer) error {\n\tvar wg sync.WaitGroup\n\toutBuff := make(chan string, 100)\n\terrBuff := make(chan string, 100)\n\t// fork the commands\n\tfor _, cmd := range commands {\n\t\twg.Add(1)\n\t\tgo func(cmd HostCmd) {\n\t\t\t// decrement waitgroup when done\n\t\t\tdefer wg.Done()\n\t\t\t// connect ssh\n\t\t\tcli, err := ssh.Dial(\"tcp4\", fmt.Sprintf(\"%s:%d\", cmd.Host, 22), sshcfg)\n\t\t\tif err != nil {\n\t\t\t\tlog.Printf(\"Error connecting to host %s : %s\", cmd.Host, err)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tsesh, err := cli.NewSession()\n\t\t\tif err != nil {\n\t\t\t\tlog.Printf(\"Error obtaining session on host %s : %s\", cmd.Host, err)\n\t\t\t\treturn\n\t\t\t}\n\t\t\t// pipe outputs\n\t\t\tgo func() {\n\t\t\t\tseshOut, err := sesh.StdoutPipe()\n\t\t\t\tif err != nil {\n\t\t\t\t\tlog.Printf(\"Error obtaining session stdout on host %s : %s\", cmd.Host, err)\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\treadLinesToChan(seshOut, \"\", outBuff)\n\t\t\t}()\n\t\t\tgo func() {\n\t\t\t\tseshOut, err := sesh.StderrPipe()\n\t\t\t\tif err != nil {\n\t\t\t\t\tlog.Printf(\"Error obtaining session stderr on host %s : %s\", cmd.Host, err)\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\treadLinesToChan(seshOut, fmt.Sprintf(\"%s: \", cmd.Host), errBuff)\n\t\t\t}()\n\t\t\t// issue command with proper env\n\t\t\ttoExec := fmt.Sprintf(\"if [ -f ~/.bashrc ]; then source ~/.bashrc ; fi; %s; exit;\", cmd.Cmd)\n\t\t\terr = sesh.Run(toExec)\n\t\t\tif err != nil {\n\t\t\t\tlog.Printf(\"Error running command %s on host %s\", toExec, cmd.Host)\n\t\t\t}\n\t\t\tsesh.Close()\n\t\t}(cmd)\n\t}\n\toutDone := make(chan bool)\n\terrDone := make(chan bool)\n\tgo func() {\n\t\tout := bufio.NewWriter(stdout)\n\t\tfor line := range outBuff {\n\t\t\tout.WriteString(line)\n\t\t\tout.WriteByte('\\n')\n\t\t}\n\t\tout.Flush()\n\t\toutDone <- true\n\t\tclose(outDone)\n\t}()\n\tgo func() {\n\t\terr := bufio.NewWriter(stderr)\n\t\tfor line := range errBuff {\n\t\t\terr.WriteString(line)\n\t\t\terr.WriteByte('\\n')\n\t\t}\n\t\terr.Flush()\n\t\terrDone <- true\n\t\tclose(errDone)\n\t}()\n\twg.Wait()\n\tclose(outBuff)\n\tclose(errBuff)\n\t<-outDone\n\t<-errDone\n\treturn nil\n}", "func Execute() {\n\tif err := ShellCmd.Execute(); err != nil {\n\t\tlog.Info().Err(err)\n\t\tos.Exit(1)\n\t}\n}", "func Exec(command string, args ...string) (string, error) {\n\tLogger.DebugC(color.Yellow, \"$ %v %v\", command, strings.Join(args, \" \"))\n\tb, err := exec.Command(command, args...).CombinedOutput()\n\tLogger.Debug(\"%s\\n\", b)\n\treturn string(b), err\n}", "func CommandExec() *cobra.Command {\n\n\tvar expandCmd = &cobra.Command{\n\t\tUse: \"exec [flags] <command> <shortcuts...>\",\n\t\tExample: \"$ scmpuff exec git add 1-4\",\n\t\tAliases: []string{\"execute\"},\n\t\tShort: \"Execute cmd with numeric shortcuts\",\n\t\tLong: `Expands numeric shortcuts to their full filepath and executes the command.\n\nTakes a list of digits (1 4 5) or numeric ranges (1-5) or even both.`,\n\t\tRun: func(cmd *cobra.Command, inputArgs []string) {\n\t\t\tif len(inputArgs) < 1 {\n\t\t\t\tcmd.Usage()\n\t\t\t\tos.Exit(1)\n\t\t\t}\n\n\t\t\texpandedArgs := Process(inputArgs)\n\t\t\ta := expandedArgs[1:]\n\t\t\tsubcmd := exec.Command(expandedArgs[0], a...)\n\t\t\tsubcmd.Stdin = os.Stdin\n\t\t\tsubcmd.Stdout = os.Stdout\n\t\t\tsubcmd.Stderr = os.Stderr\n\t\t\terr := subcmd.Run()\n\t\t\tif err == nil {\n\t\t\t\tos.Exit(0)\n\t\t\t}\n\t\t\tif exitError, ok := err.(*exec.ExitError); ok {\n\t\t\t\tos.Exit(exitError.ExitCode())\n\t\t\t} else {\n\t\t\t\tfmt.Fprintf(os.Stderr, \"%v\\n\", err)\n\t\t\t\tos.Exit(1)\n\t\t\t}\n\t\t},\n\t}\n\n\t// --relative\n\texpandCmd.Flags().BoolVarP(\n\t\t&expandRelative,\n\t\t\"relative\",\n\t\t\"r\",\n\t\tfalse,\n\t\t\"make path relative to current working directory\",\n\t)\n\n\treturn expandCmd\n}", "func StreamedExec(pipe ProcessStream, name string, args ...string) (string, string) {\n\tcmd := ExecCommand(name, args...)\n\tvar stdoutBuf, stderrBuf bytes.Buffer\n\tcmd.Stdout = io.MultiWriter(pipe, &stdoutBuf)\n\tcmd.Stderr = io.MultiWriter(&stderrBuf)\n\tcmd.Run()\n\toutStr, errStr := string(stdoutBuf.Bytes()), string(stderrBuf.Bytes())\n\n\treturn outStr, errStr\n}", "func (e ExternalCmd) Call(ec *EvalCtx, argVals []Value) {\n\tif DontSearch(e.Name) {\n\t\tstat, err := os.Stat(e.Name)\n\t\tif err == nil && stat.IsDir() {\n\t\t\t// implicit cd\n\t\t\tcdInner(e.Name, ec)\n\t\t\treturn\n\t\t}\n\t}\n\n\tfiles := make([]uintptr, len(ec.ports))\n\tfor i, port := range ec.ports {\n\t\tif port == nil || port.File == nil {\n\t\t\tfiles[i] = fdNil\n\t\t} else {\n\t\t\tfiles[i] = port.File.Fd()\n\t\t}\n\t}\n\n\targs := make([]string, len(argVals)+1)\n\tfor i, a := range argVals {\n\t\t// NOTE Maybe we should enfore string arguments instead of coercing all\n\t\t// args into string\n\t\targs[i+1] = ToString(a)\n\t}\n\n\tsys := syscall.SysProcAttr{}\n\tattr := syscall.ProcAttr{Env: os.Environ(), Files: files[:], Sys: &sys}\n\n\tpath, err := ec.Search(e.Name)\n\tif err != nil {\n\t\tthrow(errors.New(\"search: \" + err.Error()))\n\t}\n\n\targs[0] = path\n\tpid, err := syscall.ForkExec(path, args, &attr)\n\tif err != nil {\n\t\tthrow(errors.New(\"forkExec: \" + err.Error()))\n\t}\n\n\tvar ws syscall.WaitStatus\n\t_, err = syscall.Wait4(pid, &ws, 0, nil)\n\tif err != nil {\n\t\tthrow(fmt.Errorf(\"wait: %s\", err.Error()))\n\t} else {\n\t\tmaybeThrow(waitStatusToError(ws))\n\t}\n}", "func (t *task) Exec(ctx context.Context, processID string, spec *specs.Process, withStdin bool, attachStdio libcontainerdtypes.StdioCallback) (libcontainerdtypes.Process, error) {\n\thcsContainer, err := t.getHCSContainer()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tlogger := t.ctr.client.logger.WithFields(log.Fields{\n\t\t\"container\": t.ctr.id,\n\t\t\"exec\": processID,\n\t})\n\n\t// Note we always tell HCS to\n\t// create stdout as it's required regardless of '-i' or '-t' options, so that\n\t// docker can always grab the output through logs. We also tell HCS to always\n\t// create stdin, even if it's not used - it will be closed shortly. Stderr\n\t// is only created if it we're not -t.\n\tcreateProcessParms := &hcsshim.ProcessConfig{\n\t\tCreateStdInPipe: true,\n\t\tCreateStdOutPipe: true,\n\t\tCreateStdErrPipe: !spec.Terminal,\n\t}\n\tif spec.Terminal {\n\t\tcreateProcessParms.EmulateConsole = true\n\t\tif spec.ConsoleSize != nil {\n\t\t\tcreateProcessParms.ConsoleSize[0] = uint(spec.ConsoleSize.Height)\n\t\t\tcreateProcessParms.ConsoleSize[1] = uint(spec.ConsoleSize.Width)\n\t\t}\n\t}\n\n\t// Take working directory from the process to add if it is defined,\n\t// otherwise take from the first process.\n\tif spec.Cwd != \"\" {\n\t\tcreateProcessParms.WorkingDirectory = spec.Cwd\n\t} else {\n\t\tcreateProcessParms.WorkingDirectory = t.ctr.ociSpec.Process.Cwd\n\t}\n\n\t// Configure the environment for the process\n\tcreateProcessParms.Environment = setupEnvironmentVariables(spec.Env)\n\n\t// Configure the CommandLine/CommandArgs\n\tsetCommandLineAndArgs(spec, createProcessParms)\n\tlogger.Debugf(\"exec commandLine: %s\", createProcessParms.CommandLine)\n\n\tcreateProcessParms.User = spec.User.Username\n\n\t// Start the command running in the container.\n\tnewProcess, err := hcsContainer.CreateProcess(createProcessParms)\n\tif err != nil {\n\t\tlogger.WithError(err).Errorf(\"exec's CreateProcess() failed\")\n\t\treturn nil, err\n\t}\n\tpid := newProcess.Pid()\n\tdefer func() {\n\t\tif err != nil {\n\t\t\tif err := newProcess.Kill(); err != nil {\n\t\t\t\tlogger.WithError(err).Error(\"failed to kill process\")\n\t\t\t}\n\t\t\tgo func() {\n\t\t\t\tif err := newProcess.Wait(); err != nil {\n\t\t\t\t\tlogger.WithError(err).Error(\"failed to wait for process\")\n\t\t\t\t}\n\t\t\t\tif err := newProcess.Close(); err != nil {\n\t\t\t\t\tlogger.WithError(err).Error(\"failed to clean process resources\")\n\t\t\t\t}\n\t\t\t}()\n\t\t}\n\t}()\n\n\tdio, err := newIOFromProcess(newProcess, spec.Terminal)\n\tif err != nil {\n\t\tlogger.WithError(err).Error(\"failed to get stdio pipes\")\n\t\treturn nil, err\n\t}\n\t// Tell the engine to attach streams back to the client\n\t_, err = attachStdio(dio)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tp := &process{\n\t\tid: processID,\n\t\tctr: t.ctr,\n\t\thcsProcess: newProcess,\n\t\twaitCh: make(chan struct{}),\n\t}\n\n\t// Spin up a goroutine to notify the backend and clean up resources when\n\t// the process exits. Defer until after the start event is sent so that\n\t// the exit event is not sent out-of-order.\n\tdefer func() { go p.reap() }()\n\n\tt.ctr.client.eventQ.Append(t.ctr.id, func() {\n\t\tei := libcontainerdtypes.EventInfo{\n\t\t\tContainerID: t.ctr.id,\n\t\t\tProcessID: p.id,\n\t\t\tPid: uint32(pid),\n\t\t}\n\t\tt.ctr.client.logger.WithFields(log.Fields{\n\t\t\t\"container\": t.ctr.id,\n\t\t\t\"event\": libcontainerdtypes.EventExecAdded,\n\t\t\t\"event-info\": ei,\n\t\t}).Info(\"sending event\")\n\t\terr := t.ctr.client.backend.ProcessEvent(t.ctr.id, libcontainerdtypes.EventExecAdded, ei)\n\t\tif err != nil {\n\t\t\tt.ctr.client.logger.WithError(err).WithFields(log.Fields{\n\t\t\t\t\"container\": t.ctr.id,\n\t\t\t\t\"event\": libcontainerdtypes.EventExecAdded,\n\t\t\t\t\"event-info\": ei,\n\t\t\t}).Error(\"failed to process event\")\n\t\t}\n\t\terr = t.ctr.client.backend.ProcessEvent(t.ctr.id, libcontainerdtypes.EventExecStarted, ei)\n\t\tif err != nil {\n\t\t\tt.ctr.client.logger.WithError(err).WithFields(log.Fields{\n\t\t\t\t\"container\": t.ctr.id,\n\t\t\t\t\"event\": libcontainerdtypes.EventExecStarted,\n\t\t\t\t\"event-info\": ei,\n\t\t\t}).Error(\"failed to process event\")\n\t\t}\n\t})\n\n\treturn p, nil\n}", "func cmdLine() string {\n\treturn \"go run mksyscall_aix_ppc64.go \" + strings.Join(os.Args[1:], \" \")\n}", "func (cl *Client) ExecString(cmd string, args ...interface{}) (string, error) {\n\tvar s string\n\terr := cl.Conn(func(c *Conn) error {\n\t\tvar err error\n\t\ts, err = c.ExecString(cmd, args...)\n\t\treturn err\n\t})\n\treturn s, err\n}", "func Exec(config *ssh.ClientConfig, addr string, workDir string, cmd string, nixConf string) (bytes.Buffer, error) {\n\tvar b bytes.Buffer // import \"bytes\"\n\n\t// Connect\n\tclient, err := ssh.Dial(\"tcp\", net.JoinHostPort(addr, \"22\"), config)\n\tif err != nil {\n\t\treturn b, err\n\t}\n\t// Create a session. It is one session per command.\n\tsession, err := client.NewSession()\n\tif err != nil {\n\t\treturn b, err\n\t}\n\tdefer session.Close()\n\n\tsession.Stderr = os.Stderr // get output\n\tsession.Stdout = &b // get output\n\t// you can also pass what gets input to the stdin, allowing you to pipe\n\t// content from client to server\n\t// session.Stdin = bytes.NewBufferString(\"My input\")\n\n\t// Finally, run the command\n\tfullCmd := \". ~/.nix-profile/etc/profile.d/nix.sh && cd \" + workDir + \" && nix-shell \" + nixConf + \" --command '\" + cmd + \"'\"\n\tfmt.Println(fullCmd)\n\terr = session.Run(fullCmd)\n\treturn b, err\n}", "func (p *Program) exec(c ExecCommand, fn ExecCallback) {\n\tif err := p.ReleaseTerminal(); err != nil {\n\t\t// If we can't release input, abort.\n\t\tif fn != nil {\n\t\t\tgo p.Send(fn(err))\n\t\t}\n\t\treturn\n\t}\n\n\tc.SetStdin(p.input)\n\tc.SetStdout(p.output)\n\tc.SetStderr(os.Stderr)\n\n\t// Execute system command.\n\tif err := c.Run(); err != nil {\n\t\t_ = p.RestoreTerminal() // also try to restore the terminal.\n\t\tif fn != nil {\n\t\t\tgo p.Send(fn(err))\n\t\t}\n\t\treturn\n\t}\n\n\t// Have the program re-capture input.\n\terr := p.RestoreTerminal()\n\tif fn != nil {\n\t\tgo p.Send(fn(err))\n\t}\n}", "func RunCmd(cmdstr string) string {\n\treturn RunAs(cmdstr, \"\")\n}", "func (a Adapter) execTestCmd(testCmd versionsCommon.DevfileCommand, containers []types.Container, show bool) (err error) {\n\tcontainerID := utils.GetContainerIDForAlias(containers, testCmd.Exec.Component)\n\tcompInfo := common.ComponentInfo{ContainerName: containerID}\n\terr = exec.ExecuteDevfileCommandSynchronously(&a.Client, *testCmd.Exec, testCmd.Exec.Id, compInfo, show, a.machineEventLogger, false)\n\treturn\n}", "func executeCmd(path string, args []string, env []string, dir string) ([]byte, error) {\n\t// Create context with timeout\n\tctx, cancel := context.WithTimeout(context.Background(), maxTimeoutMinutes*time.Minute)\n\tdefer cancel()\n\n\t// Create command\n\tcmd := execCommandContext(ctx, path, args...)\n\tcmd.Env = env\n\tcmd.Dir = dir\n\n\t// Execute command\n\treturn cmd.CombinedOutput()\n}", "func ExecuteCmd(cmd string) (string, error) {\n\tvar stdout, stderr bytes.Buffer\n\texe := exec.Command(\"sh\", \"-c\", cmd)\n\texe.Stderr = &stderr\n\texe.Stdout = &stdout\n\terr := exe.Run()\n\terrorResult := string(stderr.Bytes())\n\tif len(errorResult) != 0 && !strings.Contains(errorResult, \"deprecated\") {\n\t\treturn \"\", errors.New(errorResult)\n\t}\n\tif err != nil && !strings.Contains(errorResult, \"deprecated\") {\n\t\treturn \"\", errors.New(fmt.Sprintf(\"failed in executing the azure command: %s\", cmd))\n\t}\n\toutput := string(stdout.Bytes())\n\tif len(output) != 0 {\n\t\treturn output, nil\n\t}\n\treturn \"\", nil\n}", "func runExec(serviceName string, operation string) (string, error) {\n\tbytes, err := exec.Command(Configuration.ExecutorPath, serviceName, operation).CombinedOutput()\n\treturn string(bytes), err\n}", "func execCommand(name string, arg ...string) error {\n\tcmd := exec.Command(name, arg...)\n\t// For locating the GCC runtime library (libgcc_s.so.1):\n\tcmd.Env = append(os.Environ(), \"LD_LIBRARY_PATH=/ro/lib:/ro/lib64\")\n\tcmd.Stdin = os.Stdin\n\tcmd.Stdout = os.Stdout\n\tcmd.Stderr = os.Stderr\n\tif err := cmd.Run(); err != nil {\n\t\treturn fmt.Errorf(\"%v: %v\", cmd.Args, err)\n\t}\n\treturn nil\n}", "func Exec(t testing.TB, cmd *cobra.Command, stdIn io.Reader, args ...string) (string, string, error) {\n\tctx, cancel := context.WithCancel(context.Background())\n\tt.Cleanup(cancel)\n\n\treturn ExecCtx(ctx, cmd, stdIn, args...)\n}", "func Exec(rootCmd *RootCommand) (err error) {\n\terr = InternalExecFor(rootCmd, os.Args)\n\treturn\n}", "func (client *MockPodExecClient) RunPodExecCommand(ctx context.Context, streamOptions *remotecommand.StreamOptions, baseCmd []string) (string, string, error) {\n\n\tvar mockPodExecReturnContext *MockPodExecReturnContext = &MockPodExecReturnContext{}\n\tvar command string\n\t// This is to prevent the crash in the case where streamOptions.Stdin is anything other than *strings.Reader\n\t// In most of the cases the base command will be /bin/sh but if it is something else, it can be reading from\n\t// a io.Reader pipe. For e.g. tarring a file, writing to a write pipe and then untarring it on the pod by reading\n\t// from the reader pipe.\n\tif baseCmd[0] == \"/bin/sh\" {\n\t\tvar cmdStr string\n\t\tstreamOptionsCmd := streamOptions.Stdin.(*strings.Reader)\n\t\tfor i := 0; i < int(streamOptionsCmd.Size()); i++ {\n\t\t\tcmd, _, _ := streamOptionsCmd.ReadRune()\n\t\t\tcmdStr = cmdStr + string(cmd)\n\t\t}\n\n\t\tmockPodExecReturnContext, command = client.GetMockPodExecReturnContextAndKey(ctx, cmdStr)\n\t\tif mockPodExecReturnContext == nil {\n\t\t\terr := fmt.Errorf(\"mockPodExecReturnContext is nil\")\n\t\t\treturn \"\", \"\", err\n\t\t}\n\t}\n\n\t// check if the command is already added or not in the list of GotCmdList\n\tvar found bool\n\tfor i := range client.GotCmdList {\n\t\tif command == client.GotCmdList[i] {\n\t\t\tfound = true\n\t\t\tbreak\n\t\t}\n\t}\n\tif !found {\n\t\tclient.GotCmdList = append(client.GotCmdList, command)\n\t}\n\n\treturn mockPodExecReturnContext.StdOut, mockPodExecReturnContext.StdErr, mockPodExecReturnContext.Err\n}", "func startShell(db *database.Database) error {\n\te := exec.NewExecutor()\n\n\tshell := shell.NewShell()\n\tfor {\n\t\tline, err := shell.ReadLine()\n\t\tif err != nil {\n\t\t\tif err != io.EOF {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\treturn nil\n\t\t}\n\n\t\tprogram, err := parser.Parse(line)\n\t\tif err != nil {\n\t\t\tfmt.Fprintf(os.Stderr, \"parsing error: %s\\n\", err)\n\t\t\tcontinue\n\t\t}\n\n\t\te.Execute(program, db)\n\t}\n}", "func ExecCommand(args ...string) ([]byte, error) {\n\te := New()\n\tcmd := e.ExecCommand(NoSandbox, false, args[0], args[1:]...)\n\treturn cmd.CombinedOutput()\n}", "func (ft *LsTask) Exec(ctx *Context, p *par.Params, out *io.PipeWriter) {\n\tlog.Info(\"LsTask.Execute\")\n\n\tpath, ok := p.Props[\"path\"]\n\n\t// if no passed in path use default\n\tif !ok {\n\t\tpath = ft.path\n\t}\n\n\tif path == \"\" {\n\t\tp.Status = par.StFail\n\t\tp.Response = \"no path specified\"\n\t\treturn\n\t}\n\n\t// this is mandatory node\n\tpath = filepath.Join(ctx.WorkspacePath, path)\n\n\tfiles, _ := ioutil.ReadDir(path)\n\tfor _, f := range files {\n\t\tp.Props[fmt.Sprint(f.Name())] = \"\"\n\t}\n\n\tp.Response = \"list directory done\"\n\tp.Status = par.StSuccess\n\n\treturn\n}", "func (r *RemoteShell) Execute(ctx context.Context, cmd string) ([]byte, error) {\n\tsshCmd, err := r.conn.CommandContext(ctx, cmd)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn sshCmd.CombinedOutput()\n}", "func execCommand(log bool, name string, args ...string) (bytes.Buffer, bytes.Buffer, error) {\n\tvar (\n\t\tstdout bytes.Buffer\n\t\tstderr bytes.Buffer\n\t)\n\n\tcmd := exec.Command(name, args...)\n\tcmd.Stdout = &stdout\n\tcmd.Stderr = &stderr\n\terr := cmd.Run()\n\tif log {\n\t\tLogf(\"run command '%s %v':\\n out=%s\\n err=%s\\n ret=%v\",\n\t\t\tname, args, strings.TrimSpace(stdout.String()), strings.TrimSpace(stderr.String()), err)\n\t}\n\n\treturn stdout, stderr, err\n}", "func executeCmd(cmd string, args ...string) error {\n\tcommand := exec.Command(cmd, args...) // #nosec\n\tbytes, err := command.CombinedOutput()\n\tif err != nil {\n\t\treturn fmt.Errorf(\"%s: %s\", err, bytes)\n\t}\n\treturn nil\n}", "func (si *shellInterface) RunCmd(context context.Context, operation string, script, env []string) (string, error) {\n\tfileName, _, err := si.runCmd(context, operation, script, env, false)\n\treturn fileName, err\n}", "func RunExecV(c string) string {\n x := exec.Command(\"bash\", \"-c\", c)\n out, err := x.CombinedOutput()\n if err != nil {\n log.Fatalf(\"Error: %s\\n\", err)\n }\n r := fmt.Sprintf(\"%s\", out)\n z := strings.Replace(r, \"\\n\", \"\", -1)\n return z\n}", "func wrapExecCommand(c *exec.Cmd) ExecCommand {\n\treturn &osExecCommand{Cmd: c}\n}", "func TestExecuteFile(t *testing.T) {\n\ttestfile := tests.Testdir + \"/ex1.sh\"\n\n\tvar out bytes.Buffer\n\tshell, cleanup := newTestShell(t)\n\tdefer cleanup()\n\n\tshell.SetNashdPath(tests.Nashcmd)\n\tshell.SetStdout(&out)\n\tshell.SetStderr(os.Stderr)\n\tshell.SetStdin(os.Stdin)\n\n\terr := shell.ExecuteFile(testfile)\n\tif err != nil {\n\t\tt.Error(err)\n\t\treturn\n\t}\n\n\tif string(out.Bytes()) != \"hello world\\n\" {\n\t\tt.Errorf(\"Wrong command output: '%s'\", string(out.Bytes()))\n\t\treturn\n\t}\n}" ]
[ "0.6343495", "0.61270344", "0.5937417", "0.5901834", "0.5735678", "0.5730066", "0.5700993", "0.567958", "0.562397", "0.56186557", "0.56005836", "0.5578034", "0.55685246", "0.55685246", "0.551668", "0.547903", "0.54406506", "0.543145", "0.54090106", "0.5396964", "0.53954333", "0.5393781", "0.536616", "0.53383523", "0.5328432", "0.53050506", "0.52905226", "0.52692825", "0.5264499", "0.52629906", "0.52462757", "0.5243395", "0.52373135", "0.52143687", "0.521003", "0.5187251", "0.5167108", "0.51610184", "0.515674", "0.5156591", "0.5149274", "0.51448876", "0.51443243", "0.512694", "0.51091427", "0.5095179", "0.5086441", "0.5082356", "0.5080105", "0.5077601", "0.5072062", "0.5061703", "0.5061572", "0.5059358", "0.5052012", "0.504573", "0.5043456", "0.50408673", "0.50301945", "0.5024451", "0.5013832", "0.50107396", "0.50081307", "0.50067604", "0.49829656", "0.4968925", "0.49608836", "0.49528262", "0.49483854", "0.49381652", "0.49289042", "0.49226406", "0.49184036", "0.4916918", "0.4914149", "0.49129495", "0.49110243", "0.4908439", "0.49067333", "0.49024412", "0.48989472", "0.489862", "0.48978055", "0.48977435", "0.4896054", "0.4885503", "0.48851648", "0.48718387", "0.4868867", "0.48688632", "0.48668665", "0.48663622", "0.48659384", "0.4864364", "0.4860309", "0.48555073", "0.48551026", "0.48507524", "0.4846796", "0.48439765" ]
0.75436246
0
ThreeWay attempts a threeway merge between two candidates and a common ancestor. It considers the three of them recursively, applying some simple rules to identify conflicts: If any of the three nodes are different NomsKinds: conflict If we are dealing with a map: If the same key is both removed and inserted wrt parent: conflict If the same key is inserted wrt parent, but with different values: conflict If we are dealing with a struct: If the same field is both removed and inserted wrt parent: conflict If the same field is inserted wrt parent, but with different values: conflict If we are dealing with a list: If the same index is both removed and inserted wrt parent: conflict If the same index is inserted wrt parent, but with different values: conflict If we are dealing with a set: If the same object is both removed and inserted wrt parent: conflict All other modifications are allowed. Currently, ThreeWay() only works on types.Map.
func ThreeWay(a, b, parent types.Value, vwr types.ValueReadWriter) (merged types.Value, err error) { if a == nil && b == nil { return parent, nil } else if a == nil { return parent, newMergeConflict("Cannot merge nil Value with %s.", b.Type().Describe()) } else if b == nil { return parent, newMergeConflict("Cannot merge %s with nil value.", a.Type().Describe()) } else if unmergeable(a, b) { return parent, newMergeConflict("Cannot merge %s with %s.", a.Type().Describe(), b.Type().Describe()) } return threeWayMerge(a, b, parent, vwr) }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (s *mergeBaseSuite) TestDoubleCommonAncestorInCrossMerge(c *C) {\n\trevs := []string{\"C\", \"D\"}\n\texpectedRevs := []string{\"CD1\", \"CD2\"}\n\ts.AssertMergeBase(c, revs, expectedRevs)\n}", "func createThreeWayMergePatch(found, child *unstructured.Unstructured) ([]byte, error) {\n\toriginal, err := getLastAppliedObject(found)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"unable to get last applied object: %v\", err)\n\t}\n\tfoundJSON, childJSON, originalJSON, err := getJSON(found, child, original)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"error getting JSON: %v\", err)\n\t}\n\n\tpatch, err := createThreeWayJSONMergePatch(originalJSON, childJSON, foundJSON)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"unable to create three way merge patch: %v\", err)\n\t}\n\treturn patch, nil\n}", "func ReadTreeThreeWay(c *Client, opt ReadTreeOptions, stage1, stage2, stage3 Treeish) (*Index, error) {\n\tidx, err := c.GitDir.ReadIndex()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tresetremovals, err := checkReadtreePrereqs(c, opt, idx)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\torigMap := idx.GetMap()\n\n\tbase, err := GetIndexMap(c, stage1)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tours, err := GetIndexMap(c, stage2)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\ttheirs, err := GetIndexMap(c, stage3)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// Create a slice which contins all objects in base, ours, or theirs\n\tvar allPaths []*IndexEntry\n\tfor path, _ := range base {\n\t\tallPaths = append(allPaths, &IndexEntry{PathName: path})\n\t}\n\tfor path, _ := range ours {\n\t\tallPaths = append(allPaths, &IndexEntry{PathName: path})\n\t}\n\tfor path, _ := range theirs {\n\t\tallPaths = append(allPaths, &IndexEntry{PathName: path})\n\t}\n\t// Sort to ensure directories come before files.\n\tsort.Sort(ByPath(allPaths))\n\n\t// Remove duplicates and exclude files that aren't part of the\n\t// sparse checkout rules if applicable.\n\tvar allObjects []IndexPath\n\tfor i := range allPaths {\n\t\tif i > 0 && allPaths[i].PathName == allPaths[i-1].PathName {\n\t\t\tcontinue\n\t\t}\n\t\tallObjects = append(allObjects, allPaths[i].PathName)\n\t}\n\tvar dirs []IndexPath\n\n\t// Checking for merge conflict with index. If this seems like a confusing mess, it's mostly\n\t// because it was written to pass the t1000-read-tree-m-3way test case from the official git\n\t// test suite.\n\t//\n\t// The logic can probably be cleaned up.\n\tfor path, orig := range origMap {\n\t\to, ok := ours[path]\n\t\tif !ok {\n\t\t\t// If it's been added to the index in the same state as Stage 3, and it's not in\n\t\t\t// stage 1 or 2 it's fine.\n\t\t\tif !base.Contains(path) && !ours.Contains(path) && samePath(origMap, theirs, path) {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\treturn idx, fmt.Errorf(\"Entry '%v' would be overwritten by a merge. Cannot merge.\", path)\n\t\t}\n\n\t\t// Variable names mirror the O/A/B from the test suite, with \"c\" for contains\n\t\toc := base.Contains(path)\n\t\tac := ours.Contains(path)\n\t\tbc := theirs.Contains(path)\n\n\t\tif oc && ac && bc {\n\t\t\toa := samePath(base, ours, path)\n\t\t\tob := samePath(base, theirs, path)\n\n\t\t\t// t1000-read-tree-m-3way.sh test 75 \"must match A in O && A && B && O!=A && O==B case.\n\t\t\t// (This means we can't error out if the Sha1s dont match.)\n\t\t\tif !oa && ob {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif oa && !ob {\n\t\t\t\t// Relevent cases:\n\t\t\t\t// Must match and be up-to-date in O && A && B && O==A && O!=B\n\t\t\t\t// May match B in O && A && B && O==A && O!=B\n\t\t\t\tb, ok := theirs[path]\n\t\t\t\tif ok && b.Sha1 == orig.Sha1 {\n\t\t\t\t\tcontinue\n\t\t\t\t} else if !path.IsClean(c, o.Sha1) {\n\t\t\t\t\treturn idx, fmt.Errorf(\"Entry '%v' would be overwritten by a merge. Cannot merge.\", path)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\t// Must match and be up-to-date in !O && A && B && A != B case test from AND\n\t\t// Must match and be up-to-date in O && A && B && A != B case test from\n\t\t// t1000-read-tree-m-3way.sh in official git\n\t\tif ac && bc && !samePath(ours, theirs, path) {\n\t\t\tif !path.IsClean(c, o.Sha1) {\n\t\t\t\treturn idx, fmt.Errorf(\"Entry '%v' would be overwritten by a merge. Cannot merge.\", path)\n\t\t\t}\n\t\t}\n\n\t\t// Must match and be up-to-date in O && A && !B && !B && O != A case AND\n\t\t// Must match and be up-to-date in O && A && !B && !B && O == A case from\n\t\t// t1000-read-tree-m-3way.sh in official git\n\t\tif oc && ac && !bc {\n\t\t\tif !path.IsClean(c, o.Sha1) {\n\t\t\t\treturn idx, fmt.Errorf(\"Entry '%v' would be overwritten by a merge. Cannot merge.\", path)\n\t\t\t}\n\t\t}\n\n\t\tif o.Sha1 != orig.Sha1 {\n\t\t\treturn idx, fmt.Errorf(\"Entry '%v' would be overwritten by a merge. Cannot merge.\", path)\n\t\t}\n\t}\n\tidx = NewIndex()\npaths:\n\tfor _, path := range allObjects {\n\t\t// Handle directory/file conflicts.\n\t\tif base.HasDir(path) || ours.HasDir(path) || theirs.HasDir(path) {\n\t\t\tif !opt.Merge && !opt.Reset {\n\t\t\t\t// If not merging, the file wins.\n\t\t\t\t// see http://www.stackoverflow.com/questions/52175720/how-does-git-read-tree-work-without-m-or-reset-option\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\t// Keep track of what was a directory so that other\n\t\t\t// other paths know if they had a conflict higher\n\t\t\t// up in the tree.\n\t\t\tdirs = append(dirs, path)\n\n\t\t\t// Add the non-directory version fo the appropriate stage\n\t\t\tif p, ok := base[path]; ok {\n\t\t\t\tidx.AddStage(c, path, p.Mode, p.Sha1, Stage1, p.Fsize, time.Now().UnixNano(), UpdateIndexOptions{Add: true})\n\t\t\t}\n\t\t\tif p, ok := ours[path]; ok {\n\t\t\t\tidx.AddStage(c, path, p.Mode, p.Sha1, Stage2, p.Fsize, time.Now().UnixNano(), UpdateIndexOptions{Add: true})\n\t\t\t}\n\t\t\tif p, ok := theirs[path]; ok {\n\t\t\t\tidx.AddStage(c, path, p.Mode, p.Sha1, Stage3, p.Fsize, time.Now().UnixNano(), UpdateIndexOptions{Add: true})\n\t\t\t}\n\t\t\tcontinue\n\t\t}\n\n\t\t// Handle the subfiles in any directory that had a conflict\n\t\t// by just adding them in the appropriate stage, because\n\t\t// there's no way for a directory and file to not be in\n\t\t// conflict.\n\t\tfor _, d := range dirs {\n\t\t\tif strings.HasPrefix(string(path), string(d+\"/\")) {\n\t\t\t\tif p, ok := base[path]; ok {\n\t\t\t\t\tif err := idx.AddStage(c, path, p.Mode, p.Sha1, Stage1, p.Fsize, time.Now().UnixNano(), UpdateIndexOptions{Add: true}); err != nil {\n\t\t\t\t\t\treturn nil, err\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\tif p, ok := ours[path]; ok {\n\t\t\t\t\tif err := idx.AddStage(c, path, p.Mode, p.Sha1, Stage2, p.Fsize, time.Now().UnixNano(), UpdateIndexOptions{Add: true, Replace: true}); err != nil {\n\t\t\t\t\t\treturn nil, err\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\tif p, ok := theirs[path]; ok {\n\t\t\t\t\tif err := idx.AddStage(c, path, p.Mode, p.Sha1, Stage3, p.Fsize, time.Now().UnixNano(), UpdateIndexOptions{Add: true}); err != nil {\n\t\t\t\t\t\treturn nil, err\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\tcontinue paths\n\t\t\t}\n\t\t}\n\n\t\t// From here on out, we assume everything is a file.\n\n\t\t// All three trees are the same, don't do anything to the index.\n\t\tif samePath(base, ours, path) && samePath(base, theirs, path) {\n\t\t\tif err := idx.AddStage(c, path, ours[path].Mode, ours[path].Sha1, Stage0, ours[path].Fsize, time.Now().UnixNano(), UpdateIndexOptions{Add: true}); err != nil {\n\t\t\t\tpanic(err)\n\t\t\t}\n\t\t\tcontinue\n\t\t}\n\n\t\t// If both stage2 and stage3 are the same, the work has been done in\n\t\t// both branches, so collapse to stage0 (use our changes)\n\t\tif samePath(ours, theirs, path) {\n\t\t\tif ours.Contains(path) {\n\t\t\t\tif err := idx.AddStage(c, path, ours[path].Mode, ours[path].Sha1, Stage0, ours[path].Fsize, time.Now().UnixNano(), UpdateIndexOptions{Add: true}); err != nil {\n\t\t\t\t\tpanic(err)\n\t\t\t\t}\n\t\t\t\tcontinue\n\t\t\t}\n\t\t}\n\n\t\t// If stage1 and stage2 are the same, our branch didn't do anything,\n\t\t// but theirs did, so take their changes.\n\t\tif samePath(base, ours, path) {\n\t\t\tif theirs.Contains(path) {\n\t\t\t\tif err := idx.AddStage(c, path, theirs[path].Mode, theirs[path].Sha1, Stage0, theirs[path].Fsize, time.Now().UnixNano(), UpdateIndexOptions{Add: true}); err != nil {\n\t\t\t\t\tpanic(err)\n\t\t\t\t}\n\t\t\t\tcontinue\n\t\t\t}\n\t\t}\n\n\t\t// If stage1 and stage3 are the same, we did something\n\t\t// but they didn't, so take our changes\n\t\tif samePath(base, theirs, path) {\n\t\t\tif ours.Contains(path) {\n\t\t\t\to := ours[path]\n\t\t\t\tif err := idx.AddStage(c, path, o.Mode, o.Sha1, Stage0, o.Fsize, time.Now().UnixNano(), UpdateIndexOptions{Add: true}); err != nil {\n\t\t\t\t\tpanic(err)\n\t\t\t\t}\n\t\t\t\tcontinue\n\t\t\t}\n\t\t}\n\n\t\t// We couldn't short-circuit out, so add all three stages.\n\n\t\t// Remove Stage0 if it exists. If it doesn't, then at worst we'll\n\t\t// remove a stage that we're about to add back.\n\t\tidx.RemoveFile(path)\n\n\t\tif b, ok := base[path]; ok {\n\t\t\tidx.AddStage(c, path, b.Mode, b.Sha1, Stage1, b.Fsize, time.Now().UnixNano(), UpdateIndexOptions{Add: true})\n\t\t}\n\t\tif o, ok := ours[path]; ok {\n\t\t\tidx.AddStage(c, path, o.Mode, o.Sha1, Stage2, o.Fsize, time.Now().UnixNano(), UpdateIndexOptions{Add: true})\n\t\t}\n\t\tif t, ok := theirs[path]; ok {\n\t\t\tidx.AddStage(c, path, t.Mode, t.Sha1, Stage3, t.Fsize, time.Now().UnixNano(), UpdateIndexOptions{Add: true})\n\t\t}\n\t}\n\n\tif err := checkMergeAndUpdate(c, opt, origMap, idx, resetremovals); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn idx, readtreeSaveIndex(c, opt, idx)\n}", "func (s *mergeBaseSuite) TestCommonAncestorInMergedOrphans(c *C) {\n\trevs := []string{\"A\", \"B\"}\n\texpectedRevs := []string{\"AB\"}\n\ts.AssertMergeBase(c, revs, expectedRevs)\n}", "func TestMerge3_Merge_fail(t *testing.T) {\n\t// TODO: make this test pass on windows -- currently failing due to comment whitespace changes\n\ttestutil.SkipWindows(t)\n\n\t_, datadir, _, ok := runtime.Caller(0)\n\tif !assert.True(t, ok) {\n\t\tt.FailNow()\n\t}\n\tdatadir = filepath.Join(filepath.Dir(datadir), \"testdata2\")\n\n\t// setup the local directory\n\tdir := t.TempDir()\n\n\tif !assert.NoError(t, copyutil.CopyDir(\n\t\tfilesys.MakeFsOnDisk(),\n\t\tfilepath.Join(datadir, \"dataset1-localupdates\"),\n\t\tfilepath.Join(dir, \"dataset1\"))) {\n\t\tt.FailNow()\n\t}\n\n\terr := filters.Merge3{\n\t\tOriginalPath: filepath.Join(datadir, \"dataset1\"),\n\t\tUpdatedPath: filepath.Join(datadir, \"dataset1-remoteupdates\"),\n\t\tDestPath: filepath.Join(dir, \"dataset1\"),\n\t\tMatcher: &filters.DefaultGVKNNMatcher{MergeOnPath: false},\n\t}.Merge()\n\tif !assert.Error(t, err) {\n\t\tt.FailNow()\n\t}\n}", "func lowestCommonAncestor2(root, p, q *TreeNode) *TreeNode {\n\tvar stk []*pair\n\tif root != nil {\n\t\tstk = append(stk, &pair{N: root, S: bothPending})\n\t}\n\n\tvar foundOne bool\n\tvar lca *TreeNode\n\n\tfor len(stk) != 0 {\n\t\tparent := stk[len(stk)-1]\n\n\t\tif parent.S == bothDone {\n\t\t\t// pop\n\t\t\tstk = stk[:len(stk)-1]\n\t\t\t// point to last ancestor\n\t\t\tif parent.N == lca && foundOne && len(stk) > 0 {\n\t\t\t\tlca = stk[len(stk)-1].N\n\t\t\t}\n\t\t\tcontinue\n\t\t}\n\n\t\tvar childNode *TreeNode\n\t\tif parent.S == bothPending {\n\t\t\tif parent.N == p || parent.N == q {\n\t\t\t\tif foundOne {\n\t\t\t\t\treturn lca\n\t\t\t\t}\n\n\t\t\t\tfoundOne = true\n\t\t\t\tlca = parent.N\n\t\t\t}\n\n\t\t\t// if both pending, traverse the left child\n\t\t\tchildNode = parent.N.Left\n\t\t} else {\n\t\t\tchildNode = parent.N.Right\n\t\t}\n\n\t\tparent.S++ // -> leftDone -> bothDone\n\n\t\tif childNode != nil {\n\t\t\tstk = append(stk, &pair{N: childNode, S: bothPending})\n\t\t}\n\t}\n\n\treturn nil\n}", "func (g *Graph) FindLowestCommonAncestor(refs ...string) (*Object, error) {\n\tif len(refs) < 2 {\n\t\treturn nil, fmt.Errorf(\"Not enough references given to find ancestor: Found %v but need at least 2\", len(refs))\n\t}\n\n\t// Extract the right reference and process errors or inexistent references\n\tvar leftRef, found, err = g.ReferenceAdapter.ReadReference(refs[0])\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"Error while reading reference %s\", refs[0])\n\t} else if !found {\n\t\treturn nil, fmt.Errorf(\"Cannot find reference %s\", refs[0])\n\t}\n\n\tvar leftID = leftRef.TargetID\n\tvar rightID ObjectID\n\n\tif len(refs) > 2 {\n\t\tvar recLeft, recErr = g.FindLowestCommonAncestor(refs[1:]...)\n\t\tif recErr != nil {\n\t\t\treturn nil, recErr\n\t\t} else if recLeft == nil {\n\t\t\treturn nil, fmt.Errorf(\"Cannot find lowest common ancestor\")\n\t\t} else {\n\t\t\trightID = recLeft.ID\n\t\t}\n\t} else {\n\t\tvar rightRef, found, refErr = g.ReferenceAdapter.ReadReference(refs[1])\n\t\tif refErr != nil {\n\t\t\treturn nil, fmt.Errorf(\"Error while reading reference %s\", refs[1])\n\t\t} else if !found {\n\t\t\treturn nil, fmt.Errorf(\"Cannot find reference %s\", refs[1])\n\t\t}\n\t\trightID = rightRef.TargetID\n\t}\n\n\t// Parses the graph to a tree beginning at the specified id\n\tleftNodes, _, err := g.toTree(leftID)\n\n\t// Find all intersection object with leftNodes\n\t// Function that analyzes whether the given object represents a collision\n\tvar isCollision = func(obj *Object) bool {\n\t\tvar _, exists = leftNodes[obj.ID]\n\t\treturn exists\n\t}\n\n\t// Records the node where a collision happens\n\tvar collisions = []*tree.TreeNode{}\n\tvar collisionRecorder = func(node *tree.TreeNode) {\n\t\tcollisions = append(collisions, node)\n\t}\n\n\t_, _, err = g.toCollisionTerminatedTree(rightID, isCollision, collisionRecorder)\n\n\tvar shortestCollisionPoint ObjectID\n\tvar shortestCollisionPathLenght int64\n\tshortestCollisionPathLenght = math.MaxInt64\n\n\t// Iterate over all collisions and find the one with the shortest path length\n\tfor _, k := range collisions {\n\t\tvar id = ObjectID(k.ID.(ObjectID))\n\t\tvar totalPathLength = k.Depth + leftNodes[id].Depth\n\t\tif totalPathLength < shortestCollisionPathLenght {\n\t\t\tshortestCollisionPathLenght = totalPathLength\n\t\t\tshortestCollisionPoint = id\n\t\t}\n\t}\n\n\tobj, err := g.ObjectAdapter.ReadObject(shortestCollisionPoint[:])\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"Cannot find object with id %#x\", shortestCollisionPoint[:4])\n\t}\n\n\treturn &obj, nil\n}", "func (cs *ColStore) makeWay(node1, node2 *ColStoreNode) {\n\tswitch {\n\tcase node1.Col.Max < node2.Col.Min:\n\t\t// The node2 starts after node1 ends, there's no overlap\n\t\t//\n\t\t// Node1 |----|\n\t\t// Node2 |----|\n\t\tif node1.Next != nil {\n\t\t\tif node1.Next.Col.Min <= node2.Col.Max {\n\t\t\t\tcs.makeWay(node1.Next, node2)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tcs.addNode(node1, node2, node1.Next)\n\t\t\treturn\n\t\t}\n\t\tcs.addNode(node1, node2, nil)\n\t\treturn\n\n\tcase node1.Col.Min > node2.Col.Max:\n\t\t// Node2 ends before node1 begins, there's no overlap\n\t\t//\n\t\t// Node1 |-----|\n\t\t// Node2 |----|\n\t\tif node1.Prev != nil {\n\t\t\tif node1.Prev.Col.Max >= node2.Col.Min {\n\t\t\t\tcs.makeWay(node1.Prev, node2)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tcs.addNode(node1.Prev, node2, node1)\n\t\t\treturn\n\t\t}\n\t\tcs.addNode(nil, node2, node1)\n\t\treturn\n\n\tcase node1.Col.Min == node2.Col.Min && node1.Col.Max == node2.Col.Max:\n\t\t// Exact match\n\t\t//\n\t\t// Node1 |xxx|\n\t\t// Node2 |---|\n\n\t\tprev := node1.Prev\n\t\tnext := node1.Next\n\t\tcs.removeNode(node1)\n\t\tcs.addNode(prev, node2, next)\n\t\t// Remove node may have set the root to nil\n\t\tif cs.Root == nil {\n\t\t\tcs.Root = node2\n\t\t}\n\t\treturn\n\n\tcase node1.Col.Min > node2.Col.Min && node1.Col.Max < node2.Col.Max:\n\t\t// Node2 envelopes node1\n\t\t//\n\t\t// Node1 |xx|\n\t\t// Node2 |----|\n\n\t\tprev := node1.Prev\n\t\tnext := node1.Next\n\t\tcs.removeNode(node1)\n\t\tswitch {\n\t\tcase prev == node2:\n\t\t\tnode2.Next = next\n\t\tcase next == node2:\n\t\t\tnode2.Prev = prev\n\t\tdefault:\n\t\t\tcs.addNode(prev, node2, next)\n\t\t}\n\n\t\tif node2.Prev != nil && node2.Prev.Col.Max >= node2.Col.Min {\n\t\t\tcs.makeWay(prev, node2)\n\t\t}\n\t\tif node2.Next != nil && node2.Next.Col.Min <= node2.Col.Max {\n\t\t\tcs.makeWay(next, node2)\n\t\t}\n\n\t\tif cs.Root == nil {\n\t\t\tcs.Root = node2\n\t\t}\n\n\tcase node1.Col.Min < node2.Col.Min && node1.Col.Max > node2.Col.Max:\n\t\t// Node2 bisects node1:\n\t\t//\n\t\t// Node1 |---xx---|\n\t\t// Node2 |--|\n\t\tnewCol := node1.Col.copyToRange(node2.Col.Max+1, node1.Col.Max)\n\t\tnewNode := &ColStoreNode{Col: newCol}\n\t\tcs.addNode(node1, newNode, node1.Next)\n\t\tnode1.Col.Max = node2.Col.Min - 1\n\t\tcs.addNode(node1, node2, newNode)\n\t\treturn\n\n\tcase node1.Col.Max >= node2.Col.Min && node1.Col.Min < node2.Col.Min:\n\t\t// Node2 overlaps node1 at some point above it's minimum:\n\t\t//\n\t\t// Node1 |----xx|\n\t\t// Node2 |-------|\n\t\tnext := node1.Next\n\t\tnode1.Col.Max = node2.Col.Min - 1\n\t\tif next == node2 {\n\t\t\treturn\n\t\t}\n\t\tcs.addNode(node1, node2, next)\n\t\tif next != nil && next.Col.Min <= node2.Col.Max {\n\t\t\tcs.makeWay(next, node2)\n\t\t}\n\t\treturn\n\n\tcase node1.Col.Min <= node2.Col.Max && node1.Col.Min > node2.Col.Min:\n\t\t// Node2 overlaps node1 at some point below it's maximum:\n\t\t//\n\t\t// Node1: |------|\n\t\t// Node2: |----xx|\n\t\tprev := node1.Prev\n\t\tnode1.Col.Min = node2.Col.Max + 1\n\t\tif prev == node2 {\n\t\t\treturn\n\t\t}\n\t\tcs.addNode(prev, node2, node1)\n\t\tif prev != nil && prev.Col.Max >= node2.Col.Min {\n\t\t\tcs.makeWay(node1.Prev, node2)\n\t\t}\n\t\treturn\n\t}\n\treturn\n}", "func checkConflictingNodes(ctx context.Context, client client.Interface, node *libapi.Node) (v4conflict, v6conflict bool, retErr error) {\n\t// Get the full set of nodes.\n\tvar nodes []libapi.Node\n\tif nodeList, err := client.Nodes().List(ctx, options.ListOptions{}); err != nil {\n\t\tlog.WithError(err).Errorf(\"Unable to query node configuration\")\n\t\tretErr = err\n\t\treturn\n\t} else {\n\t\tnodes = nodeList.Items\n\t}\n\n\tourIPv4, _, err := cnet.ParseCIDROrIP(node.Spec.BGP.IPv4Address)\n\tif err != nil && node.Spec.BGP.IPv4Address != \"\" {\n\t\tlog.WithError(err).Errorf(\"Error parsing IPv4 CIDR '%s' for node '%s'\", node.Spec.BGP.IPv4Address, node.Name)\n\t\tretErr = err\n\t\treturn\n\t}\n\tourIPv6, _, err := cnet.ParseCIDROrIP(node.Spec.BGP.IPv6Address)\n\tif err != nil && node.Spec.BGP.IPv6Address != \"\" {\n\t\tlog.WithError(err).Errorf(\"Error parsing IPv6 CIDR '%s' for node '%s'\", node.Spec.BGP.IPv6Address, node.Name)\n\t\tretErr = err\n\t\treturn\n\t}\n\n\tfor _, theirNode := range nodes {\n\t\tif theirNode.Spec.BGP == nil {\n\t\t\t// Skip nodes that don't have BGP configured. We know\n\t\t\t// that this node does have BGP since we only perform\n\t\t\t// this check after configuring BGP.\n\t\t\tcontinue\n\t\t}\n\n\t\ttheirIPv4, _, err := cnet.ParseCIDROrIP(theirNode.Spec.BGP.IPv4Address)\n\t\tif err != nil && theirNode.Spec.BGP.IPv4Address != \"\" {\n\t\t\tlog.WithError(err).Errorf(\"Error parsing IPv4 CIDR '%s' for node '%s'\", theirNode.Spec.BGP.IPv4Address, theirNode.Name)\n\t\t\tretErr = err\n\t\t\treturn\n\t\t}\n\n\t\ttheirIPv6, _, err := cnet.ParseCIDROrIP(theirNode.Spec.BGP.IPv6Address)\n\t\tif err != nil && theirNode.Spec.BGP.IPv6Address != \"\" {\n\t\t\tlog.WithError(err).Errorf(\"Error parsing IPv6 CIDR '%s' for node '%s'\", theirNode.Spec.BGP.IPv6Address, theirNode.Name)\n\t\t\tretErr = err\n\t\t\treturn\n\t\t}\n\n\t\t// If this is our node (based on the name), check if the IP\n\t\t// addresses have changed. If so warn the user as it could be\n\t\t// an indication of multiple nodes using the same name. This\n\t\t// is not an error condition as the IPs could actually change.\n\t\tif theirNode.Name == node.Name {\n\t\t\tif theirIPv4.IP != nil && ourIPv4.IP != nil && !theirIPv4.IP.Equal(ourIPv4.IP) {\n\t\t\t\tfields := log.Fields{\"node\": theirNode.Name, \"original\": theirIPv4.String(), \"updated\": ourIPv4.String()}\n\t\t\t\tlog.WithFields(fields).Warnf(\"IPv4 address has changed. This could happen if there are multiple nodes with the same name.\")\n\t\t\t}\n\t\t\tif theirIPv6.IP != nil && ourIPv6.IP != nil && !theirIPv6.IP.Equal(ourIPv6.IP) {\n\t\t\t\tfields := log.Fields{\"node\": theirNode.Name, \"original\": theirIPv6.String(), \"updated\": ourIPv6.String()}\n\t\t\t\tlog.WithFields(fields).Warnf(\"IPv6 address has changed. This could happen if there are multiple nodes with the same name.\")\n\t\t\t}\n\t\t\tcontinue\n\t\t}\n\n\t\t// Check that other nodes aren't using the same IP addresses.\n\t\t// This is an error condition.\n\t\tif theirIPv4.IP != nil && ourIPv4.IP != nil && theirIPv4.IP.Equal(ourIPv4.IP) {\n\t\t\tlog.Warnf(\"Calico node '%s' is already using the IPv4 address %s.\", theirNode.Name, ourIPv4.String())\n\t\t\tretErr = fmt.Errorf(\"IPv4 address conflict\")\n\t\t\tv4conflict = true\n\t\t}\n\n\t\tif theirIPv6.IP != nil && ourIPv6.IP != nil && theirIPv6.IP.Equal(ourIPv6.IP) {\n\t\t\tlog.Warnf(\"Calico node '%s' is already using the IPv6 address %s.\", theirNode.Name, ourIPv6.String())\n\t\t\tretErr = fmt.Errorf(\"IPv6 address conflict\")\n\t\t\tv6conflict = true\n\t\t}\n\t}\n\treturn\n}", "func (s *mergeBaseSuite) TestAncestorBeyondMerges(c *C) {\n\trevs := []string{\"M\", \"G\"}\n\ts.AssertAncestor(c, revs, true)\n\n\trevs = []string{\"G\", \"M\"}\n\ts.AssertAncestor(c, revs, false)\n}", "func mergeForeignKeyChanges(\n\tctx context.Context,\n\toldFks *doltdb.ForeignKeyCollection,\n\tnewRoot *doltdb.RootValue,\n\tnewFks *doltdb.ForeignKeyCollection,\n\tchangedRoot *doltdb.RootValue,\n\tchangedFks *doltdb.ForeignKeyCollection,\n\tforce bool,\n) (*doltdb.ForeignKeyCollection, error) {\n\tfksByTable := make(map[string][]doltdb.ForeignKey)\n\n\tconflicts := set.NewEmptyStrSet()\n\ttblNames, err := newRoot.GetTableNames(ctx)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tfor _, tblName := range tblNames {\n\t\toldFksForTable, _ := oldFks.KeysForTable(tblName)\n\t\tnewFksForTable, _ := newFks.KeysForTable(tblName)\n\t\tchangedFksForTable, _ := changedFks.KeysForTable(tblName)\n\n\t\toldHash := doltdb.CombinedHash(oldFksForTable)\n\t\tnewHash := doltdb.CombinedHash(newFksForTable)\n\t\tchangedHash := doltdb.CombinedHash(changedFksForTable)\n\n\t\tif oldHash == changedHash {\n\t\t\tfksByTable[tblName] = append(fksByTable[tblName], newFksForTable...)\n\t\t} else if oldHash == newHash {\n\t\t\tfksByTable[tblName] = append(fksByTable[tblName], changedFksForTable...)\n\t\t} else if force {\n\t\t\tfksByTable[tblName] = append(fksByTable[tblName], newFksForTable...)\n\t\t} else {\n\t\t\tconflicts.Add(tblName)\n\t\t}\n\t}\n\n\ttblNames, err = changedRoot.GetTableNames(ctx)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tfor _, tblName := range tblNames {\n\t\tif _, exists := fksByTable[tblName]; !exists {\n\t\t\toldKeys, _ := oldFks.KeysForTable(tblName)\n\t\t\toldHash := doltdb.CombinedHash(oldKeys)\n\n\t\t\tchangedKeys, _ := changedFks.KeysForTable(tblName)\n\t\t\tchangedHash := doltdb.CombinedHash(changedKeys)\n\n\t\t\tif oldHash == emptyHash {\n\t\t\t\tfksByTable[tblName] = append(fksByTable[tblName], changedKeys...)\n\t\t\t} else if force {\n\t\t\t\tfksByTable[tblName] = append(fksByTable[tblName], oldKeys...)\n\t\t\t} else if oldHash != changedHash {\n\t\t\t\tconflicts.Add(tblName)\n\t\t\t}\n\t\t}\n\t}\n\n\tif conflicts.Size() > 0 {\n\t\treturn nil, ErrCheckoutWouldOverwrite{conflicts.AsSlice()}\n\t}\n\n\tfks := make([]doltdb.ForeignKey, 0)\n\tfor _, v := range fksByTable {\n\t\tfks = append(fks, v...)\n\t}\n\n\treturn doltdb.NewForeignKeyCollection(fks...)\n}", "func merge(left, right *mapNode) *mapNode {\n\tswitch {\n\tcase left == nil:\n\t\treturn right.incref()\n\tcase right == nil:\n\t\treturn left.incref()\n\tcase left.weight > right.weight:\n\t\troot := left.shallowCloneWithRef()\n\t\troot.left = left.left.incref()\n\t\troot.right = merge(left.right, right)\n\t\treturn root\n\tdefault:\n\t\troot := right.shallowCloneWithRef()\n\t\troot.left = merge(left, right.left)\n\t\troot.right = right.right.incref()\n\t\treturn root\n\t}\n}", "func (fbo *folderBranchOps) forceStuckConflictForTesting(\n\tctx context.Context) (err error) {\n\tstartTime, timer := fbo.startOp(ctx, \"Forcing a stuck conflict\")\n\tdefer func() {\n\t\tfbo.endOp(\n\t\t\tctx, startTime, timer, \"Forcing a stuck conflict done: %+v\", err)\n\t}()\n\n\tlState := makeFBOLockState()\n\tfbo.mdWriterLock.Lock(lState)\n\tdefer fbo.mdWriterLock.Unlock(lState)\n\n\tif fbo.isUnmergedLocked(lState) {\n\t\treturn errors.New(\"Cannot force conflict when already unmerged\")\n\t}\n\n\t// Disable updates.\n\tunpauseUpdatesCh := make(chan struct{})\n\tselect {\n\tcase fbo.updatePauseChan <- unpauseUpdatesCh:\n\tcase <-ctx.Done():\n\t\treturn ctx.Err()\n\t}\n\tdefer func() { unpauseUpdatesCh <- struct{}{} }()\n\n\t// Make a no-op revision with an empty resolutionOp. Wait for it\n\t// to flush to the server.\n\torigHead, _ := fbo.getHead(ctx, lState, mdNoCommit)\n\tmergedGCOp := newGCOp(origHead.data.LastGCRevision)\n\terr = fbo.finalizeGCOpLocked(ctx, lState, mergedGCOp)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tjManager, _ := GetJournalManager(fbo.config)\n\tif jManager != nil {\n\t\terr := fbo.waitForJournalLocked(ctx, lState, jManager)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\t// Wait for the flush handler to finish, so we don't\n\t\t// accidentally swap in the upcoming MD on the conflict branch\n\t\t// over the \"merged\" one we just flushed, before the pointer\n\t\t// archiving step happens.\n\t\terr = fbo.mdFlushes.Wait(ctx)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\t// Roll back the local view to the original revision.\n\terr = func() error {\n\t\tfbo.headLock.Lock(lState)\n\t\tdefer fbo.headLock.Unlock(lState)\n\t\terr = fbo.setHeadLocked(ctx, lState, origHead, headTrusted, mdNoCommit)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tfbo.setLatestMergedRevisionLocked(\n\t\t\tctx, lState, origHead.Revision(), true)\n\t\treturn nil\n\t}()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// Set CR to always fail.\n\toldMode := fbo.cr.getFailModeForTesting()\n\tfbo.cr.setFailModeForTesting(alwaysFailCR)\n\tdefer func() { fbo.cr.setFailModeForTesting(oldMode) }()\n\n\t// Make fake conflicting files to trigger CR. Make one for each\n\t// attempt needed to result in stuck CR.\n\thandle := origHead.GetTlfHandle()\n\trootNode, err := fbo.nodeCache.GetOrCreate(\n\t\torigHead.data.Dir.BlockPointer,\n\t\tdata.NewPathPartString(string(handle.GetCanonicalName()),\n\t\t\tfbo.makeObfuscator()),\n\t\tnil, data.Dir)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tfor i := 0; i < maxConflictResolutionAttempts+1; i++ {\n\t\tfilename := fmt.Sprintf(\"FILE_FOR_STUCK_CONFLICT_%02d\", i)\n\t\t_, _, err := fbo.createEntryLocked(\n\t\t\tctx, lState, rootNode, rootNode.ChildName(filename), data.File,\n\t\t\tNoExcl)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\terr = fbo.syncAllLocked(ctx, lState, NoExcl)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tif jManager != nil && TLFJournalEnabled(fbo.config, fbo.id()) {\n\t\t\t// Can't use fbo.waitForJournalLocked here, since the\n\t\t\t// flushing won't actually complete.\n\t\t\terr := jManager.Wait(ctx, fbo.id())\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tnewHead, _ := fbo.getHead(ctx, lState, mdNoCommit)\n\t\t\tfbo.cr.Resolve(\n\t\t\t\tctx, newHead.Revision(), kbfsmd.RevisionUninitialized)\n\t\t}\n\n\t\terr = fbo.cr.Wait(ctx)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\t// Make sure we're stuck.\n\tisStuck, err := fbo.cr.isStuck()\n\tif err != nil {\n\t\treturn err\n\t}\n\tif !isStuck {\n\t\treturn errors.New(\"CR not stuck after trying to force conflict\")\n\t}\n\n\treturn nil\n}", "func (h *branchesService) mergeAmbiguousBranches(repo *Repo) {\n\tambiguousBranches := lo.Filter(repo.Branches, func(v *Branch, _ int) bool { return v.IsAmbiguousBranch })\n\tfor _, b := range ambiguousBranches {\n\t\ttip := repo.CommitByID(b.TipID)\n\n\t\t// Determine the parent commit this branch was created from\n\t\totherId := b.BottomID\n\t\tparentBranchCommit := repo.CommitByID(b.BottomID).FirstParent\n\t\tif parentBranchCommit != nil {\n\t\t\totherId = parentBranchCommit.Id\n\t\t}\n\n\t\t// Find the tip of the ambiguous commits (and the next commit)\n\t\tvar ambiguousTip *Commit\n\t\tvar ambiguousSecond *Commit\n\t\tfor c := tip; c != nil && c.Id != otherId; c = c.FirstParent {\n\t\t\tif c.Branch != b {\n\t\t\t\t// Still a normal branch commit (no longer part of the ambiguous branch)\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\t// tip of the ambiguous commits\n\t\t\tambiguousTip = c\n\t\t\tambiguousSecond = c.FirstParent\n\t\t\tc.IsAmbiguousTip = true\n\t\t\tc.IsAmbiguous = true\n\n\t\t\t// Determine the most likely branch (branch of the oldest child)\n\t\t\toldestChild := c.Children[0]\n\t\t\tchildBranches := []*Branch{}\n\t\t\tfor _, c := range c.Children {\n\t\t\t\tif c.AuthorTime.After(oldestChild.AuthorTime) {\n\t\t\t\t\toldestChild = c\n\t\t\t\t}\n\t\t\t\tchildBranches = append(childBranches, c.Branch)\n\t\t\t}\n\t\t\tc.Branch = oldestChild.Branch\n\t\t\tc.Branch.AmbiguousTipId = c.Id\n\t\t\tc.Branch.AmbiguousBranches = childBranches\n\t\t\tc.Branch.BottomID = c.Id\n\t\t\tbreak\n\t\t}\n\n\t\t// Set the branch of the rest of the ambiguous commits to same as the tip\n\t\tfor c := ambiguousSecond; c != nil && c.Id != otherId; c = c.FirstParent {\n\t\t\tc.Branch = ambiguousTip.Branch\n\t\t\tc.Branch.BottomID = c.Id\n\t\t\tc.IsAmbiguous = true\n\t\t}\n\n\t\t// Removing the ambiguous branch (no longer needed)\n\t\trepo.Branches = lo.Filter(repo.Branches, func(v *Branch, _ int) bool { return v != b })\n\t}\n}", "func (c *networkConfiguration) Conflicts(o *networkConfiguration) error {\n\tif o == nil {\n\t\treturn errors.New(\"same configuration\")\n\t}\n\n\t// Also empty, because only one network with empty name is allowed\n\tif c.BridgeName == o.BridgeName {\n\t\treturn errors.New(\"networks have same bridge name\")\n\t}\n\n\t// They must be in different subnets\n\tif (c.AddressIPv4 != nil && o.AddressIPv4 != nil) &&\n\t\t(c.AddressIPv4.Contains(o.AddressIPv4.IP) || o.AddressIPv4.Contains(c.AddressIPv4.IP)) {\n\t\treturn errors.New(\"networks have overlapping IPv4\")\n\t}\n\n\t// They must be in different v6 subnets\n\tif (c.AddressIPv6 != nil && o.AddressIPv6 != nil) &&\n\t\t(c.AddressIPv6.Contains(o.AddressIPv6.IP) || o.AddressIPv6.Contains(c.AddressIPv6.IP)) {\n\t\treturn errors.New(\"networks have overlapping IPv6\")\n\t}\n\n\treturn nil\n}", "func (t *Table) ResolveConflicts(ctx context.Context, pkTuples []types.Value) (invalid, notFound []types.Value, tbl *Table, err error) {\n\tremoved := 0\n\tconflictSchema, confIdx, err := t.GetConflicts(ctx)\n\tif err != nil {\n\t\treturn nil, nil, nil, err\n\t}\n\n\tif confIdx.Format() == types.Format_DOLT {\n\t\tpanic(\"resolve conflicts not implemented for new storage format\")\n\t}\n\n\tconfData := durable.NomsMapFromConflictIndex(confIdx)\n\n\tconfEdit := confData.Edit()\n\tfor _, pkTupleVal := range pkTuples {\n\t\tif has, err := confData.Has(ctx, pkTupleVal); err != nil {\n\t\t\treturn nil, nil, nil, err\n\t\t} else if has {\n\t\t\tremoved++\n\t\t\tconfEdit.Remove(pkTupleVal)\n\t\t} else {\n\t\t\tnotFound = append(notFound, pkTupleVal)\n\t\t}\n\t}\n\n\tif removed == 0 {\n\t\treturn invalid, notFound, tbl, ErrNoConflictsResolved\n\t}\n\n\tconflicts, err := confEdit.Map(ctx)\n\tif err != nil {\n\t\treturn nil, nil, nil, err\n\t}\n\n\tif conflicts.Len() == 0 {\n\t\ttable, err := t.table.ClearConflicts(ctx)\n\t\tif err != nil {\n\t\t\treturn nil, nil, nil, err\n\t\t}\n\t\treturn invalid, notFound, &Table{table: table}, nil\n\t}\n\n\ttable, err := t.table.SetConflicts(ctx, conflictSchema, durable.ConflictIndexFromNomsMap(conflicts, t.ValueReadWriter()))\n\tif err != nil {\n\t\treturn nil, nil, nil, err\n\t}\n\n\treturn invalid, notFound, &Table{table: table}, nil\n}", "func TestWidenChainAncestor(t *testing.T) {\n\ttf.UnitTest(t)\n\tctx := context.Background()\n\tbuilder, store, syncer := setup(ctx, t)\n\tgenesis := builder.RequireTipSet(store.GetHead())\n\n\tlink1 := builder.AppendOn(genesis, 2)\n\tlink2 := builder.AppendOn(link1, 3)\n\tlink3 := builder.AppendOn(link2, 1)\n\tlink4 := builder.AppendOn(link3, 2)\n\n\t// Build another block with parents link1, but not included in link2.\n\tlink2Alt := builder.AppendOn(link1, 1)\n\t// Build a tipset containing one block from link2, plus this new sibling.\n\tlink2UnionSubset := types.RequireNewTipSet(t, link2.At(0), link2Alt.At(0))\n\n\t// Sync the subset of link2 first\n\tassert.NoError(t, syncer.HandleNewTipSet(ctx, types.NewChainInfo(peer.ID(\"\"), link2UnionSubset.Key(), heightFromTip(t, link2UnionSubset)), true))\n\tverifyTip(t, store, link2UnionSubset, builder.StateForKey(link2UnionSubset.Key()))\n\tverifyHead(t, store, link2UnionSubset)\n\n\t// Sync chain with head at link4\n\trequire.NoError(t, syncer.HandleNewTipSet(ctx, types.NewChainInfo(peer.ID(\"\"), link4.Key(), heightFromTip(t, link4)), true))\n\tverifyTip(t, store, link4, builder.StateForKey(link4.Key()))\n\tverifyHead(t, store, link4)\n\n\t// Check that the widened tipset (link2UnionSubset U link2) is tracked\n\tlink2Union := types.RequireNewTipSet(t, link2.At(0), link2.At(1), link2.At(2), link2Alt.At(0))\n\tverifyTip(t, store, link2Union, builder.StateForKey(link2Union.Key()))\n}", "func lowestCommonAncestor1(root, p, q *TreeNode) *TreeNode {\n\tif root == nil {\n\t\treturn nil\n\t}\n\n\tvar stk []*TreeNode\n\tvar parent = make(map[*TreeNode]*TreeNode)\n\n\tstk = append(stk, root)\n\n\tvar gotCnt int\n\tfor gotCnt < 2 && len(stk) != 0 {\n\t\tnode := stk[len(stk)-1]\n\t\tstk = stk[:len(stk)-1]\n\n\t\tif node.Left != nil {\n\t\t\tif node.Left == p || node.Left == q {\n\t\t\t\tgotCnt++\n\t\t\t}\n\t\t\tparent[node.Left] = node\n\t\t\tstk = append(stk, node.Left)\n\t\t}\n\t\tif node.Right != nil {\n\t\t\tif node.Right == p || node.Right == q {\n\t\t\t\tgotCnt++\n\t\t\t}\n\t\t\tparent[node.Right] = node\n\t\t\tstk = append(stk, node.Right)\n\t\t}\n\t}\n\n\t// backtracking process.\n\tvar ancestors = make(map[*TreeNode]bool)\n\tfor p != nil {\n\t\tancestors[p] = true\n\t\tp = parent[p]\n\t}\n\tfor !ancestors[q] {\n\t\tq = parent[q]\n\t}\n\n\treturn q\n}", "func findConflicts(leaves []*Transaction) (map[string]*SyncBool, error) {\n\tvar conflicts = make(map[string]*SyncBool)\n\tfor i := 1; i < len(leaves); i++ {\n\t\tx, y := leaves[i-1].ID, leaves[i].ID\n\t\tk := len(x)\n\t\t// This was originally len(leaves)...\n\t\tfor idx := 0; idx < len(x); idx++ {\n\t\t\tvar a, b byte = x[idx], y[idx]\n\t\t\tif a != b {\n\t\t\t\tk = idx\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t\tconflicts[x[0:k]] = &SyncBool{lock: &sync.Mutex{}, visited: false}\n\t}\n\treturn conflicts, nil\n}", "func resolveDeps(m meta.RESTMapper, objects []unstructuredv1.Unstructured, uids []types.UID, depsIsDependencies bool) (NodeMap, error) {\n\tif len(uids) == 0 {\n\t\treturn NodeMap{}, nil\n\t}\n\t// Create global node maps of all objects, one mapped by node UIDs & the other\n\t// mapped by node keys. This step also helps deduplicate the list of provided\n\t// objects\n\tglobalMapByUID := map[types.UID]*Node{}\n\tglobalMapByKey := map[ObjectReferenceKey]*Node{}\n\tfor ix, o := range objects {\n\t\tgvk := o.GroupVersionKind()\n\t\tm, err := m.RESTMapping(gvk.GroupKind(), gvk.Version)\n\t\tif err != nil {\n\t\t\tklog.V(4).Infof(\"Failed to map resource \\\"%s\\\" to GVR\", gvk)\n\t\t\treturn nil, err\n\t\t}\n\t\tns := o.GetNamespace()\n\t\tnode := Node{\n\t\t\tUnstructured: &objects[ix],\n\t\t\tUID: o.GetUID(),\n\t\t\tName: o.GetName(),\n\t\t\tNamespace: ns,\n\t\t\tNamespaced: ns != \"\",\n\t\t\tGroup: m.Resource.Group,\n\t\t\tVersion: m.Resource.Version,\n\t\t\tKind: m.GroupVersionKind.Kind,\n\t\t\tResource: m.Resource.Resource,\n\t\t\tOwnerReferences: o.GetOwnerReferences(),\n\t\t\tDependencies: map[types.UID]RelationshipSet{},\n\t\t\tDependents: map[types.UID]RelationshipSet{},\n\t\t}\n\t\tuid, key := node.UID, node.GetObjectReferenceKey()\n\t\tif n, ok := globalMapByUID[uid]; ok {\n\t\t\tklog.V(4).Infof(\"Duplicated %s.%s resource \\\"%s\\\" in namespace \\\"%s\\\"\", n.Kind, n.Group, n.Name, n.Namespace)\n\t\t}\n\t\tglobalMapByUID[uid] = &node\n\t\tglobalMapByKey[key] = &node\n\n\t\tif node.Group == corev1.GroupName && node.Kind == \"Node\" {\n\t\t\t// Node events sent by the Kubelet uses the node's name as the\n\t\t\t// ObjectReference UID, so we include them as keys in our global map to\n\t\t\t// support lookup by nodename\n\t\t\tglobalMapByUID[types.UID(node.Name)] = &node\n\t\t\t// Node events sent by the kube-proxy uses the node's hostname as the\n\t\t\t// ObjectReference UID, so we include them as keys in our global map to\n\t\t\t// support lookup by hostname\n\t\t\tif hostname, ok := o.GetLabels()[corev1.LabelHostname]; ok {\n\t\t\t\tglobalMapByUID[types.UID(hostname)] = &node\n\t\t\t}\n\t\t}\n\t}\n\n\tresolveLabelSelectorToNodes := func(o ObjectLabelSelector) []*Node {\n\t\tvar result []*Node\n\t\tfor _, n := range globalMapByUID {\n\t\t\tif n.Group == o.Group && n.Kind == o.Kind && n.Namespace == o.Namespace {\n\t\t\t\tif ok := o.Selector.Matches(labels.Set(n.GetLabels())); ok {\n\t\t\t\t\tresult = append(result, n)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\treturn result\n\t}\n\tresolveSelectorToNodes := func(o ObjectSelector) []*Node {\n\t\tvar result []*Node\n\t\tfor _, n := range globalMapByUID {\n\t\t\tif n.Group == o.Group && n.Kind == o.Kind {\n\t\t\t\tif len(o.Namespaces) == 0 || o.Namespaces.Has(n.Namespace) {\n\t\t\t\t\tresult = append(result, n)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\treturn result\n\t}\n\tupdateRelationships := func(node *Node, rmap *RelationshipMap) {\n\t\tfor k, rset := range rmap.DependenciesByRef {\n\t\t\tif n, ok := globalMapByKey[k]; ok {\n\t\t\t\tfor r := range rset {\n\t\t\t\t\tnode.AddDependency(n.UID, r)\n\t\t\t\t\tn.AddDependent(node.UID, r)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\tfor k, rset := range rmap.DependentsByRef {\n\t\t\tif n, ok := globalMapByKey[k]; ok {\n\t\t\t\tfor r := range rset {\n\t\t\t\t\tn.AddDependency(node.UID, r)\n\t\t\t\t\tnode.AddDependent(n.UID, r)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\tfor k, rset := range rmap.DependenciesByLabelSelector {\n\t\t\tif ols, ok := rmap.ObjectLabelSelectors[k]; ok {\n\t\t\t\tfor _, n := range resolveLabelSelectorToNodes(ols) {\n\t\t\t\t\tfor r := range rset {\n\t\t\t\t\t\tnode.AddDependency(n.UID, r)\n\t\t\t\t\t\tn.AddDependent(node.UID, r)\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\tfor k, rset := range rmap.DependentsByLabelSelector {\n\t\t\tif ols, ok := rmap.ObjectLabelSelectors[k]; ok {\n\t\t\t\tfor _, n := range resolveLabelSelectorToNodes(ols) {\n\t\t\t\t\tfor r := range rset {\n\t\t\t\t\t\tn.AddDependency(node.UID, r)\n\t\t\t\t\t\tnode.AddDependent(n.UID, r)\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\tfor k, rset := range rmap.DependenciesBySelector {\n\t\t\tif os, ok := rmap.ObjectSelectors[k]; ok {\n\t\t\t\tfor _, n := range resolveSelectorToNodes(os) {\n\t\t\t\t\tfor r := range rset {\n\t\t\t\t\t\tnode.AddDependency(n.UID, r)\n\t\t\t\t\t\tn.AddDependent(node.UID, r)\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\tfor k, rset := range rmap.DependentsBySelector {\n\t\t\tif os, ok := rmap.ObjectSelectors[k]; ok {\n\t\t\t\tfor _, n := range resolveSelectorToNodes(os) {\n\t\t\t\t\tfor r := range rset {\n\t\t\t\t\t\tn.AddDependency(node.UID, r)\n\t\t\t\t\t\tnode.AddDependent(n.UID, r)\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\tfor uid, rset := range rmap.DependenciesByUID {\n\t\t\tif n, ok := globalMapByUID[uid]; ok {\n\t\t\t\tfor r := range rset {\n\t\t\t\t\tnode.AddDependency(n.UID, r)\n\t\t\t\t\tn.AddDependent(node.UID, r)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\tfor uid, rset := range rmap.DependentsByUID {\n\t\t\tif n, ok := globalMapByUID[uid]; ok {\n\t\t\t\tfor r := range rset {\n\t\t\t\t\tn.AddDependency(node.UID, r)\n\t\t\t\t\tnode.AddDependent(n.UID, r)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\n\t// Populate dependencies & dependents based on Owner-Dependent relationships\n\tfor _, node := range globalMapByUID {\n\t\tfor _, ref := range node.OwnerReferences {\n\t\t\tif n, ok := globalMapByUID[ref.UID]; ok {\n\t\t\t\tif ref.Controller != nil && *ref.Controller {\n\t\t\t\t\tnode.AddDependency(n.UID, RelationshipControllerRef)\n\t\t\t\t\tn.AddDependent(node.UID, RelationshipControllerRef)\n\t\t\t\t}\n\t\t\t\tnode.AddDependency(n.UID, RelationshipOwnerRef)\n\t\t\t\tn.AddDependent(node.UID, RelationshipOwnerRef)\n\t\t\t}\n\t\t}\n\t}\n\n\tvar rmap *RelationshipMap\n\tvar err error\n\tfor _, node := range globalMapByUID {\n\t\tswitch {\n\t\t// Populate dependencies & dependents based on PersistentVolume relationships\n\t\tcase node.Group == corev1.GroupName && node.Kind == \"PersistentVolume\":\n\t\t\trmap, err = getPersistentVolumeRelationships(node)\n\t\t\tif err != nil {\n\t\t\t\tklog.V(4).Infof(\"Failed to get relationships for persistentvolume named \\\"%s\\\": %s\", node.Name, err)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t// Populate dependencies & dependents based on PersistentVolumeClaim relationships\n\t\tcase node.Group == corev1.GroupName && node.Kind == \"PersistentVolumeClaim\":\n\t\t\trmap, err = getPersistentVolumeClaimRelationships(node)\n\t\t\tif err != nil {\n\t\t\t\tklog.V(4).Infof(\"Failed to get relationships for persistentvolumeclaim named \\\"%s\\\" in namespace \\\"%s\\\": %s\", node.Name, node.Namespace, err)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t// Populate dependencies & dependents based on Pod relationships\n\t\tcase node.Group == corev1.GroupName && node.Kind == \"Pod\":\n\t\t\trmap, err = getPodRelationships(node)\n\t\t\tif err != nil {\n\t\t\t\tklog.V(4).Infof(\"Failed to get relationships for pod named \\\"%s\\\" in namespace \\\"%s\\\": %s\", node.Name, node.Namespace, err)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t// Populate dependencies & dependents based on Service relationships\n\t\tcase node.Group == corev1.GroupName && node.Kind == \"Service\":\n\t\t\trmap, err = getServiceRelationships(node)\n\t\t\tif err != nil {\n\t\t\t\tklog.V(4).Infof(\"Failed to get relationships for service named \\\"%s\\\" in namespace \\\"%s\\\": %s\", node.Name, node.Namespace, err)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t// Populate dependencies & dependents based on ServiceAccount relationships\n\t\tcase node.Group == corev1.GroupName && node.Kind == \"ServiceAccount\":\n\t\t\trmap, err = getServiceAccountRelationships(node)\n\t\t\tif err != nil {\n\t\t\t\tklog.V(4).Infof(\"Failed to get relationships for serviceaccount named \\\"%s\\\" in namespace \\\"%s\\\": %s\", node.Name, node.Namespace, err)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t// Populate dependencies & dependents based on PodSecurityPolicy relationships\n\t\tcase node.Group == policyv1beta1.GroupName && node.Kind == \"PodSecurityPolicy\":\n\t\t\trmap, err = getPodSecurityPolicyRelationships(node)\n\t\t\tif err != nil {\n\t\t\t\tklog.V(4).Infof(\"Failed to get relationships for podsecuritypolicy named \\\"%s\\\": %s\", node.Name, err)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t// Populate dependencies & dependents based on PodDisruptionBudget relationships\n\t\tcase node.Group == policyv1.GroupName && node.Kind == \"PodDisruptionBudget\":\n\t\t\trmap, err = getPodDisruptionBudgetRelationships(node)\n\t\t\tif err != nil {\n\t\t\t\tklog.V(4).Infof(\"Failed to get relationships for poddisruptionbudget named \\\"%s\\\": %s\", node.Name, err)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t// Populate dependencies & dependents based on MutatingWebhookConfiguration relationships\n\t\tcase node.Group == admissionregistrationv1.GroupName && node.Kind == \"MutatingWebhookConfiguration\":\n\t\t\trmap, err = getMutatingWebhookConfigurationRelationships(node)\n\t\t\tif err != nil {\n\t\t\t\tklog.V(4).Infof(\"Failed to get relationships for mutatingwebhookconfiguration named \\\"%s\\\": %s\", node.Name, err)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t// Populate dependencies & dependents based on ValidatingWebhookConfiguration relationships\n\t\tcase node.Group == admissionregistrationv1.GroupName && node.Kind == \"ValidatingWebhookConfiguration\":\n\t\t\trmap, err = getValidatingWebhookConfigurationRelationships(node)\n\t\t\tif err != nil {\n\t\t\t\tklog.V(4).Infof(\"Failed to get relationships for validatingwebhookconfiguration named \\\"%s\\\": %s\", node.Name, err)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t// Populate dependencies & dependents based on APIService relationships\n\t\tcase node.Group == apiregistrationv1.GroupName && node.Kind == \"APIService\":\n\t\t\trmap, err = getAPIServiceRelationships(node)\n\t\t\tif err != nil {\n\t\t\t\tklog.V(4).Infof(\"Failed to get relationships for apiservice named \\\"%s\\\": %s\", node.Name, err)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t// Populate dependencies & dependents based on Event relationships\n\t\tcase (node.Group == eventsv1.GroupName || node.Group == corev1.GroupName) && node.Kind == \"Event\":\n\t\t\trmap, err = getEventRelationships(node)\n\t\t\tif err != nil {\n\t\t\t\tklog.V(4).Infof(\"Failed to get relationships for event named \\\"%s\\\" in namespace \\\"%s\\\": %s\", node.Name, node.Namespace, err)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t// Populate dependencies & dependents based on Ingress relationships\n\t\tcase (node.Group == networkingv1.GroupName || node.Group == extensionsv1beta1.GroupName) && node.Kind == \"Ingress\":\n\t\t\trmap, err = getIngressRelationships(node)\n\t\t\tif err != nil {\n\t\t\t\tklog.V(4).Infof(\"Failed to get relationships for ingress named \\\"%s\\\" in namespace \\\"%s\\\": %s\", node.Name, node.Namespace, err)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t// Populate dependencies & dependents based on IngressClass relationships\n\t\tcase node.Group == networkingv1.GroupName && node.Kind == \"IngressClass\":\n\t\t\trmap, err = getIngressClassRelationships(node)\n\t\t\tif err != nil {\n\t\t\t\tklog.V(4).Infof(\"Failed to get relationships for ingressclass named \\\"%s\\\": %s\", node.Name, err)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t// Populate dependencies & dependents based on NetworkPolicy relationships\n\t\tcase node.Group == networkingv1.GroupName && node.Kind == \"NetworkPolicy\":\n\t\t\trmap, err = getNetworkPolicyRelationships(node)\n\t\t\tif err != nil {\n\t\t\t\tklog.V(4).Infof(\"Failed to get relationships for networkpolicy named \\\"%s\\\": %s\", node.Name, err)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t// Populate dependencies & dependents based on RuntimeClass relationships\n\t\tcase node.Group == nodev1.GroupName && node.Kind == \"RuntimeClass\":\n\t\t\trmap, err = getRuntimeClassRelationships(node)\n\t\t\tif err != nil {\n\t\t\t\tklog.V(4).Infof(\"Failed to get relationships for runtimeclass named \\\"%s\\\": %s\", node.Name, err)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t// Populate dependencies & dependents based on ClusterRole relationships\n\t\tcase node.Group == rbacv1.GroupName && node.Kind == \"ClusterRole\":\n\t\t\trmap, err = getClusterRoleRelationships(node)\n\t\t\tif err != nil {\n\t\t\t\tklog.V(4).Infof(\"Failed to get relationships for clusterrole named \\\"%s\\\": %s\", node.Name, err)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t// Populate dependencies & dependents based on ClusterRoleBinding relationships\n\t\tcase node.Group == rbacv1.GroupName && node.Kind == \"ClusterRoleBinding\":\n\t\t\trmap, err = getClusterRoleBindingRelationships(node)\n\t\t\tif err != nil {\n\t\t\t\tklog.V(4).Infof(\"Failed to get relationships for clusterrolebinding named \\\"%s\\\": %s\", node.Name, err)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t// Populate dependencies & dependents based on Role relationships\n\t\tcase node.Group == rbacv1.GroupName && node.Kind == \"Role\":\n\t\t\trmap, err = getRoleRelationships(node)\n\t\t\tif err != nil {\n\t\t\t\tklog.V(4).Infof(\"Failed to get relationships for role named \\\"%s\\\" in namespace \\\"%s\\\": %s: %s\", node.Name, node.Namespace, err)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t// Populate dependencies & dependents based on RoleBinding relationships\n\t\tcase node.Group == rbacv1.GroupName && node.Kind == \"RoleBinding\":\n\t\t\trmap, err = getRoleBindingRelationships(node)\n\t\t\tif err != nil {\n\t\t\t\tklog.V(4).Infof(\"Failed to get relationships for rolebinding named \\\"%s\\\" in namespace \\\"%s\\\": %s: %s\", node.Name, node.Namespace, err)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t// Populate dependencies & dependents based on CSIStorageCapacity relationships\n\t\tcase node.Group == storagev1beta1.GroupName && node.Kind == \"CSIStorageCapacity\":\n\t\t\trmap, err = getCSIStorageCapacityRelationships(node)\n\t\t\tif err != nil {\n\t\t\t\tklog.V(4).Infof(\"Failed to get relationships for csistoragecapacity named \\\"%s\\\": %s: %s\", node.Name, err)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t// Populate dependencies & dependents based on CSINode relationships\n\t\tcase node.Group == storagev1.GroupName && node.Kind == \"CSINode\":\n\t\t\trmap, err = getCSINodeRelationships(node)\n\t\t\tif err != nil {\n\t\t\t\tklog.V(4).Infof(\"Failed to get relationships for csinode named \\\"%s\\\": %s: %s\", node.Name, err)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t// Populate dependencies & dependents based on StorageClass relationships\n\t\tcase node.Group == storagev1.GroupName && node.Kind == \"StorageClass\":\n\t\t\trmap, err = getStorageClassRelationships(node)\n\t\t\tif err != nil {\n\t\t\t\tklog.V(4).Infof(\"Failed to get relationships for storageclass named \\\"%s\\\": %s: %s\", node.Name, err)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t// Populate dependencies & dependents based on VolumeAttachment relationships\n\t\tcase node.Group == storagev1.GroupName && node.Kind == \"VolumeAttachment\":\n\t\t\trmap, err = getVolumeAttachmentRelationships(node)\n\t\t\tif err != nil {\n\t\t\t\tklog.V(4).Infof(\"Failed to get relationships for volumeattachment named \\\"%s\\\": %s: %s\", node.Name, err)\n\t\t\t\tcontinue\n\t\t\t}\n\t\tdefault:\n\t\t\tcontinue\n\t\t}\n\t\tupdateRelationships(node, rmap)\n\t}\n\n\t// Create submap containing the provided objects & either their dependencies\n\t// or dependents from the global map\n\tvar depth uint\n\tnodeMap, uidQueue, uidSet := NodeMap{}, []types.UID{}, map[types.UID]struct{}{}\n\tfor _, uid := range uids {\n\t\tif node := globalMapByUID[uid]; node != nil {\n\t\t\tnodeMap[uid] = node\n\t\t\tuidQueue = append(uidQueue, uid)\n\t\t}\n\t}\n\tdepth, uidQueue = 0, append(uidQueue, \"\")\n\tfor {\n\t\tif len(uidQueue) <= 1 {\n\t\t\tbreak\n\t\t}\n\t\tuid := uidQueue[0]\n\t\tif uid == \"\" {\n\t\t\tdepth, uidQueue = depth+1, append(uidQueue[1:], \"\")\n\t\t\tcontinue\n\t\t}\n\n\t\t// Guard against possible cycles\n\t\tif _, ok := uidSet[uid]; ok {\n\t\t\tuidQueue = uidQueue[1:]\n\t\t\tcontinue\n\t\t} else {\n\t\t\tuidSet[uid] = struct{}{}\n\t\t}\n\n\t\tif node := nodeMap[uid]; node != nil {\n\t\t\t// Allow nodes to keep the smallest depth. For example, if a node has a\n\t\t\t// depth of 1 & 7 in the relationship tree, we keep 1 so that when\n\t\t\t// printing the tree with a depth of 2, the node will still be printed\n\t\t\tif node.Depth == 0 || depth < node.Depth {\n\t\t\t\tnode.Depth = depth\n\t\t\t}\n\t\t\tdeps := node.GetDeps(depsIsDependencies)\n\t\t\tdepUIDs, ix := make([]types.UID, len(deps)), 0\n\t\t\tfor depUID := range deps {\n\t\t\t\tnodeMap[depUID] = globalMapByUID[depUID]\n\t\t\t\tdepUIDs[ix] = depUID\n\t\t\t\tix++\n\t\t\t}\n\t\t\tuidQueue = append(uidQueue[1:], depUIDs...)\n\t\t}\n\t}\n\n\tklog.V(4).Infof(\"Resolved %d deps for %d objects\", len(nodeMap)-1, len(uids))\n\treturn nodeMap, nil\n}", "func (ct *ClusterTopology) Merge(topology *ClusterTopology) {\n\tlog.Println(\"Merging topologies ...\")\n\tfor _, node := range topology.Nodes {\n\t\tlog.Printf(\"Evaluating node %s ...\\r\\n\", node.Node.Name)\n\t\tct.addNode(node)\n\t}\n\tct.AddNode(currentNode) // restore current node if needed\n\tct.buildHashcode()\n}", "func (d *differ) establishCorrespondence(old *types.Named, new types.Type) bool {\n\toldname := old.Obj()\n\toldc := d.correspondMap[oldname]\n\tif oldc == nil {\n\t\t// For now, assume the types don't correspond unless they are from the old\n\t\t// and new packages, respectively.\n\t\t//\n\t\t// This is too conservative. For instance,\n\t\t// [old] type A = q.B; [new] type A q.C\n\t\t// could be OK if in package q, B is an alias for C.\n\t\t// Or, using p as the name of the current old/new packages:\n\t\t// [old] type A = q.B; [new] type A int\n\t\t// could be OK if in q,\n\t\t// [old] type B int; [new] type B = p.A\n\t\t// In this case, p.A and q.B name the same type in both old and new worlds.\n\t\t// Note that this case doesn't imply circular package imports: it's possible\n\t\t// that in the old world, p imports q, but in the new, q imports p.\n\t\t//\n\t\t// However, if we didn't do something here, then we'd incorrectly allow cases\n\t\t// like the first one above in which q.B is not an alias for q.C\n\t\t//\n\t\t// What we should do is check that the old type, in the new world's package\n\t\t// of the same path, doesn't correspond to something other than the new type.\n\t\t// That is a bit hard, because there is no easy way to find a new package\n\t\t// matching an old one.\n\t\tif newn, ok := new.(*types.Named); ok {\n\t\t\tif old.Obj().Pkg() != d.old || newn.Obj().Pkg() != d.new {\n\t\t\t\treturn old.Obj().Id() == newn.Obj().Id()\n\t\t\t}\n\t\t}\n\t\t// If there is no correspondence, create one.\n\t\td.correspondMap[oldname] = new\n\t\t// Check that the corresponding types are compatible.\n\t\td.checkCompatibleDefined(oldname, old, new)\n\t\treturn true\n\t}\n\treturn types.Identical(oldc, new)\n}", "func (c *causality) detectConflict(keys [][]byte) (bool, int) {\n\tif len(keys) == 0 {\n\t\treturn false, 0\n\t}\n\n\tfirstIdx := -1\n\tfor _, key := range keys {\n\t\tif idx, ok := c.relations[string(key)]; ok {\n\t\t\tif firstIdx == -1 {\n\t\t\t\tfirstIdx = idx\n\t\t\t} else if firstIdx != idx {\n\t\t\t\treturn true, -1\n\t\t\t}\n\t\t}\n\t}\n\n\treturn firstIdx != -1, firstIdx\n}", "func TestMerge3_Merge_path(t *testing.T) {\n\t// TODO: make this test pass on windows -- currently failing due to comment whitespace changes\n\ttestutil.SkipWindows(t)\n\n\t_, datadir, _, ok := runtime.Caller(0)\n\tif !assert.True(t, ok) {\n\t\tt.FailNow()\n\t}\n\tdatadir = filepath.Join(filepath.Dir(datadir), \"testdata2\")\n\n\t// setup the local directory\n\tdir := t.TempDir()\n\n\tif !assert.NoError(t, copyutil.CopyDir(\n\t\tfilesys.MakeFsOnDisk(),\n\t\tfilepath.Join(datadir, \"dataset1-localupdates\"),\n\t\tfilepath.Join(dir, \"dataset1\"))) {\n\t\tt.FailNow()\n\t}\n\n\terr := filters.Merge3{\n\t\tOriginalPath: filepath.Join(datadir, \"dataset1\"),\n\t\tUpdatedPath: filepath.Join(datadir, \"dataset1-remoteupdates\"),\n\t\tDestPath: filepath.Join(dir, \"dataset1\"),\n\t}.Merge()\n\tif !assert.NoError(t, err) {\n\t\tt.FailNow()\n\t}\n\n\tdiffs, err := copyutil.Diff(\n\t\tfilepath.Join(dir, \"dataset1\"),\n\t\tfilepath.Join(datadir, \"dataset1-expected\"))\n\tif !assert.NoError(t, err) {\n\t\tt.FailNow()\n\t}\n\tif !assert.Empty(t, diffs.List()) {\n\t\tt.FailNow()\n\t}\n}", "func (c *causality) detectConflict(keys []string) bool {\n\tif len(keys) == 0 {\n\t\treturn false\n\t}\n\n\tvar existedRelation string\n\tfor _, key := range keys {\n\t\tif val, ok := c.relations[key]; ok {\n\t\t\tif existedRelation != \"\" && val != existedRelation {\n\t\t\t\treturn true\n\t\t\t}\n\t\t\texistedRelation = val\n\t\t}\n\t}\n\n\treturn false\n}", "func (tangle *Tangle) propagateBranchPreferredChangesToTangle(cachedBranch *branchmanager.CachedBranch, preferred bool) {\n\tcachedBranch.Consume(func(branch *branchmanager.Branch) {\n\t\tif !branch.IsAggregated() {\n\t\t\ttransactionID, _, err := transaction.IDFromBytes(branch.ID().Bytes())\n\t\t\tif err != nil {\n\t\t\t\tpanic(err) // this should never ever happen\n\t\t\t}\n\n\t\t\t_, err = tangle.setTransactionPreferred(transactionID, preferred, EventSourceBranchManager)\n\t\t\tif err != nil {\n\t\t\t\ttangle.Events.Error.Trigger(err)\n\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t})\n}", "func lowestCommonAncestor(root, p, q *TreeNode) *TreeNode {\n\tif root == nil || root == p || root == q {\n\t\treturn root\n\t}\n\tleft, right := lowestCommonAncestor(root.Left, p, q), lowestCommonAncestor(root.Right, p, q)\n\tswitch {\n\tcase left == nil:\n\t\t{\n\t\t\treturn right\n\t\t}\n\tcase right == nil:\n\t\t{\n\t\t\treturn left\n\t\t}\n\tcase left != nil && right != nil:\n\t\t{\n\t\t\treturn root\n\t\t}\n\tdefault:\n\t\treturn root\n\t}\n}", "func TestMergeLengthThree(t *testing.T) {\n\tA := []int{6, 2, 3}\n\tR := []int{2, 3, 6}\n\n\tif r := merge(A, 0, 0, 2); !reflect.DeepEqual(r, R) {\n\t\tt.Error(\"Expected\", R, \"got\", r)\n\t}\n}", "func (b *BlockChain) getCommonAncestor(bestHeader, prevTip *util.Header) (*util.Header, error) {\n\tvar err error\n\trollback := func(parent *util.Header, n int) (*util.Header, error) {\n\t\tfor i := 0; i < n; i++ {\n\t\t\tparent, err = b.db.Headers().GetPrevious(parent)\n\t\t\tif err != nil {\n\t\t\t\treturn parent, err\n\t\t\t}\n\t\t}\n\t\treturn parent, nil\n\t}\n\n\tmajority := bestHeader\n\tminority := prevTip\n\tif bestHeader.Height > prevTip.Height {\n\t\tmajority, err = rollback(majority, int(bestHeader.Height-prevTip.Height))\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t} else if prevTip.Height > bestHeader.Height {\n\t\tminority, err = rollback(minority, int(prevTip.Height-bestHeader.Height))\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\tfor {\n\t\tmajorityHash := majority.Hash()\n\t\tminorityHash := minority.Hash()\n\t\tif majorityHash.IsEqual(minorityHash) {\n\t\t\treturn majority, nil\n\t\t}\n\t\tmajority, err = b.db.Headers().GetPrevious(majority)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tminority, err = b.db.Headers().GetPrevious(minority)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n}", "func topDownV3(l1 *html.Node, l2Types map[string]bool, l3Types map[string]bool) {\n\n\tif l1.Type != html.ElementNode &&\n\t\tl1.Type != html.DocumentNode {\n\t\treturn // cannot assign to - do not unable to have children\n\t}\n\tif l1.Data == \"span\" || l1.Data == \"a\" {\n\t\treturn // want not condense into\n\t}\n\n\t// dig two levels deep\n\n\t// isolate l2,l3\n\tl2s := []*html.Node{}\n\tl3s := map[*html.Node][]*html.Node{}\n\n\tfor l2 := l1.FirstChild; l2 != nil; l2 = l2.NextSibling {\n\n\t\tl2s = append(l2s, l2)\n\t\t// l2s = append([]*html.Node{l2}, l2s...) // order inversion\n\n\t\tfor l3 := l2.FirstChild; l3 != nil; l3 = l3.NextSibling {\n\t\t\tl3s[l2] = append(l3s[l2], l3)\n\t\t\t// l3s[l2] = append(map[*html.Node][]*html.Node{l2: []*html.Node{l3}}, l3s[l2]...) // order inversion\n\t\t}\n\t}\n\n\tpostponedRemoval := map[*html.Node]bool{}\n\n\t//\n\t//\n\t// check types for each l2 subtree distinctively\n\tfor _, l2 := range l2s {\n\n\t\tl2Match := l2.Type == html.ElementNode && l2Types[l2.Data] // l2 is a div\n\n\t\tl3Match := true\n\t\tfor _, l3 := range l3s[l2] {\n\t\t\tl3Match = l3Match && (l3.Type == html.ElementNode && l3Types[l3.Data])\n\t\t}\n\n\t\t// act\n\t\tif l2Match && l3Match {\n\n\t\t\t// detach l3 from l2\n\t\t\tfor _, l3 := range l3s[l2] {\n\t\t\t\t// if ml3[l3] > 0 {\n\t\t\t\t// \tfmt.Printf(\"rmd_%v_%v \", ml3[l3], l3.Data)\n\t\t\t\t// }\n\t\t\t\tl2.RemoveChild(l3)\n\t\t\t\t// ml3[l3]++\n\t\t\t}\n\n\t\t\t// Since we still need l2 below\n\t\t\t// We have to postpone detaching l2 from l1\n\t\t\t// to the bottom\n\t\t\t// NOT HERE: l1.RemoveChild(l2)\n\t\t\tpostponedRemoval[l2] = true\n\n\t\t\tfor _, l3 := range l3s[l2] {\n\t\t\t\t// attach l3 to l1\n\n\t\t\t\tif l3.Data != \"a\" && l3.Data != \"span\" {\n\t\t\t\t\tl1.InsertBefore(l3, l2)\n\t\t\t\t} else {\n\t\t\t\t\twrap := new(html.Node)\n\t\t\t\t\twrap.Type = html.ElementNode\n\t\t\t\t\twrap.Data = \"p\"\n\n\t\t\t\t\twrap.Attr = []html.Attribute{html.Attribute{Key: \"cfrm\", Val: \"noth\"}}\n\t\t\t\t\twrap.AppendChild(l3)\n\t\t\t\t\t// NOT wrap.FirstChild = l3\n\t\t\t\t\tl1.InsertBefore(wrap, l2)\n\t\t\t\t}\n\t\t\t}\n\n\t\t}\n\n\t}\n\n\tfor k, _ := range postponedRemoval {\n\t\tl1.RemoveChild(k) // detach l2 from l1\n\t}\n\n}", "func (s *mergeBaseSuite) TestIndependentWithPairOfAncestors(c *C) {\n\trevs := []string{\"C\", \"D\", \"M\", \"N\"}\n\texpectedRevs := []string{\"C\", \"D\"}\n\ts.AssertIndependents(c, revs, expectedRevs)\n}", "func TestSyncOverlapWithFilter(t *testing.T) {\n\tctx := context.Background()\n\tr := fstest.NewRun(t)\n\n\tfi, err := filter.NewFilter(nil)\n\trequire.NoError(t, err)\n\trequire.NoError(t, fi.Add(false, \"/rclone-sync-test/\"))\n\trequire.NoError(t, fi.Add(false, \"*/layer2/\"))\n\tfi.Opt.ExcludeFile = []string{\".ignore\"}\n\tfilterCtx := filter.ReplaceConfig(ctx, fi)\n\n\tsubRemoteName := r.FremoteName + \"/rclone-sync-test\"\n\tFremoteSync, err := fs.NewFs(ctx, subRemoteName)\n\trequire.NoError(t, FremoteSync.Mkdir(ctx, \"\"))\n\trequire.NoError(t, err)\n\n\tsubRemoteName2 := r.FremoteName + \"/rclone-sync-test-include/layer2\"\n\tFremoteSync2, err := fs.NewFs(ctx, subRemoteName2)\n\trequire.NoError(t, FremoteSync2.Mkdir(ctx, \"\"))\n\trequire.NoError(t, err)\n\n\tsubRemoteName3 := r.FremoteName + \"/rclone-sync-test-ignore-file\"\n\tFremoteSync3, err := fs.NewFs(ctx, subRemoteName3)\n\trequire.NoError(t, FremoteSync3.Mkdir(ctx, \"\"))\n\trequire.NoError(t, err)\n\tr.WriteObject(context.Background(), \"rclone-sync-test-ignore-file/.ignore\", \"-\", t1)\n\n\tcheckErr := func(err error) {\n\t\trequire.Error(t, err)\n\t\tassert.True(t, fserrors.IsFatalError(err))\n\t\tassert.Equal(t, fs.ErrorOverlapping.Error(), err.Error())\n\t\taccounting.GlobalStats().ResetCounters()\n\t}\n\n\tcheckNoErr := func(err error) {\n\t\trequire.NoError(t, err)\n\t}\n\n\taccounting.GlobalStats().ResetCounters()\n\tcheckNoErr(Sync(filterCtx, FremoteSync, r.Fremote, false))\n\tcheckErr(Sync(ctx, FremoteSync, r.Fremote, false))\n\tcheckNoErr(Sync(filterCtx, r.Fremote, FremoteSync, false))\n\tcheckErr(Sync(ctx, r.Fremote, FremoteSync, false))\n\tcheckErr(Sync(filterCtx, r.Fremote, r.Fremote, false))\n\tcheckErr(Sync(ctx, r.Fremote, r.Fremote, false))\n\tcheckErr(Sync(filterCtx, FremoteSync, FremoteSync, false))\n\tcheckErr(Sync(ctx, FremoteSync, FremoteSync, false))\n\n\tcheckNoErr(Sync(filterCtx, FremoteSync2, r.Fremote, false))\n\tcheckErr(Sync(ctx, FremoteSync2, r.Fremote, false))\n\tcheckNoErr(Sync(filterCtx, r.Fremote, FremoteSync2, false))\n\tcheckErr(Sync(ctx, r.Fremote, FremoteSync2, false))\n\tcheckErr(Sync(filterCtx, FremoteSync2, FremoteSync2, false))\n\tcheckErr(Sync(ctx, FremoteSync2, FremoteSync2, false))\n\n\tcheckNoErr(Sync(filterCtx, FremoteSync3, r.Fremote, false))\n\tcheckErr(Sync(ctx, FremoteSync3, r.Fremote, false))\n\t// Destination is excluded so this test makes no sense\n\t// checkNoErr(Sync(filterCtx, r.Fremote, FremoteSync3, false))\n\tcheckErr(Sync(ctx, r.Fremote, FremoteSync3, false))\n\tcheckErr(Sync(filterCtx, FremoteSync3, FremoteSync3, false))\n\tcheckErr(Sync(ctx, FremoteSync3, FremoteSync3, false))\n}", "func areReferencesConflictingAcrossBlocks(parentsBlocks ParentBlockIDs) bool {\n\tfor blockID := range parentsBlocks[WeakParentType] {\n\t\tif _, exists := parentsBlocks[StrongParentType][blockID]; exists {\n\t\t\treturn true\n\t\t}\n\n\t\tif _, exists := parentsBlocks[ShallowLikeParentType][blockID]; exists {\n\t\t\treturn true\n\t\t}\n\t}\n\n\treturn false\n}", "func buildIntermediate(nl []*Node, t *MerkleTree) (*Node, error) {\n\tvar nodes []*Node\n\n\tfor i := 0; i < len(nl); i += 2 {\n\t\thashMap := GetHashStrategies()\n\t\th := hashMap[t.HashStrategy]\n\t\tvar left, right int = i, i + 1\n\t\tif i+1 == len(nl) {\n\t\t\tright = i\n\t\t}\n\t\tchash := append(nl[left].Hash, nl[right].Hash...)\n\t\tif _, err := h.Write(chash); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tn := &Node{\n\t\t\tLeft: nl[left],\n\t\t\tRight: nl[right],\n\t\t\tHash: h.Sum(nil),\n\t\t\ttree: t,\n\t\t}\n\t\tnodes = append(nodes, n)\n\t\tnl[left].parent = n\n\t\tnl[right].parent = n\n\t\tif len(nl) == 2 {\n\t\t\treturn n, nil\n\t\t}\n\t}\n\treturn buildIntermediate(nodes, t)\n}", "func TestSwapNodesTwiceDepth1(t *testing.T) {\n\n\tindexes := [][]int32{\n\t\t{2, 3},\n\t\t{-1, -1},\n\t\t{-1, -1},\n\t}\n\n\tqueries := []int32{\n\t\t1,\n\t\t1,\n\t}\n\n\texpected := [][]int32{\n\t\t{3, 1, 2},\n\t\t{2, 1, 3},\n\t}\n\n\tactual := swapNodes(indexes, queries)\n\tassert.Equal(t, expected, actual)\n}", "func ancestorWith(node *Node, isBuffered bool, udata userdata, push func(*Node, uint32),\n\tpushBuf func(*Node, interface{}, uint32)) error {\n\t//\n\tif node == nil {\n\t\treturn nil\n\t}\n\tpredicate := udata.filterdata.(Predicate)\n\tanc := node.Parent()\n\tserial := udata.serial\n\tfor anc != nil {\n\t\tmatchedNode, err := predicate(anc, node)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif matchedNode != nil {\n\t\t\tpush(matchedNode, serial) // put ancestor on output channel for next pipeline stage\n\t\t\treturn nil\n\t\t}\n\t\tanc = anc.Parent()\n\t}\n\treturn nil // no matching ancestor found, not an error\n}", "func (m *Manifest) Merge(pn string, parents []string) {\n\tif strSliceHas(parents, pn) {\n\t\treturn\n\t}\n\tp := m.Latest(pn)\n\tm.OutPackages[pn] = *p\n\tif wants, ok := (*p)[\"wants\"]; ok {\n\t\twants := wants.(map[string]interface{})\n\t\tfor dn := range wants {\n\t\t\tm.Merge(dn, append(parents, pn))\n\t\t}\n\t}\n}", "func (n *internalNode) childMergeAppendTo(newNode *internalNode, childResult *deleteResult, pos int, exist bool) {\n\t// copy and append children, skip two children; insert the modified one\n\tskipChildPosStart := -1\n\tif childResult.rtype == dRTypeMergeWithLeft {\n\t\tskipChildPosStart = pos - 1\n\t} else if childResult.rtype == dRTypeMergeWithRight {\n\t\tskipChildPosStart = pos\n\t} else {\n\t\tpanic(\"unexpect merge type\")\n\t}\n\tfor i := 0; i < skipChildPosStart; i++ {\n\t\tnewNode.children = append(newNode.children, n.children[i])\n\t}\n\tnewNode.children = append(newNode.children, childResult.modified)\n\tfor i := skipChildPosStart + 2; i < len(n.children); i++ {\n\t\tnewNode.children = append(newNode.children, n.children[i])\n\t}\n\n\t// copy and append keys, skip the merged key; update key before skip pos if necessary\n\tskipKeyPos := -1\n\tif childResult.rtype == dRTypeMergeWithLeft {\n\t\tskipKeyPos = pos - 1\n\t} else if childResult.rtype == dRTypeMergeWithRight {\n\t\tskipKeyPos = pos\n\t} else {\n\t\tpanic(\"unexpected merge type\")\n\t}\n\tfor i := 0; i < skipKeyPos; i++ {\n\t\tnewNode.keys = append(newNode.keys, n.keys[i])\n\t}\n\tif exist && childResult.rtype == dRTypeMergeWithRight {\n\t\tnewNode.keys[len(newNode.keys)-1] = childResult.modified.leftMostKey()\n\t}\n\tfor i := skipKeyPos + 1; i < len(n.keys); i++ {\n\t\tnewNode.keys = append(newNode.keys, n.keys[i])\n\t}\n}", "func (k *Key) HasAncestor(other *Key) bool {\n\tif !k.kc.Matches(other.kc) {\n\t\treturn false\n\t}\n\tif len(k.toks) < len(other.toks) {\n\t\treturn false\n\t}\n\tfor i, tok := range other.toks {\n\t\tif tok != k.toks[i] {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}", "func (cv checkValidator) validateDiff(ctx *sql.Context, diff tree.ThreeWayDiff) (int, error) {\n\tconflictCount := 0\n\n\tvar valueTuple val.Tuple\n\tvar valueDesc val.TupleDesc\n\tswitch diff.Op {\n\tcase tree.DiffOpLeftDelete, tree.DiffOpRightDelete, tree.DiffOpConvergentDelete:\n\t\t// no need to validate check constraints for deletes\n\t\treturn 0, nil\n\tcase tree.DiffOpDivergentDeleteConflict, tree.DiffOpDivergentModifyConflict:\n\t\t// Don't bother validating divergent conflicts, just let them get reported as conflicts\n\t\treturn 0, nil\n\tcase tree.DiffOpLeftAdd, tree.DiffOpLeftModify:\n\t\tvalueTuple = diff.Left\n\t\tvalueDesc = cv.tableMerger.leftSch.GetValueDescriptor()\n\tcase tree.DiffOpRightAdd, tree.DiffOpRightModify:\n\t\tvalueTuple = diff.Right\n\t\tvalueDesc = cv.tableMerger.rightSch.GetValueDescriptor()\n\tcase tree.DiffOpConvergentAdd, tree.DiffOpConvergentModify:\n\t\t// both sides made the same change, just take the left\n\t\tvalueTuple = diff.Left\n\t\tvalueDesc = cv.tableMerger.leftSch.GetValueDescriptor()\n\tcase tree.DiffOpDivergentModifyResolved:\n\t\tvalueTuple = diff.Merged\n\t\tvalueDesc = cv.tableMerger.leftSch.GetValueDescriptor()\n\t}\n\n\tfor checkName, checkExpression := range cv.checkExpressions {\n\t\t// If the row came from the right side of the merge, then remap it (if necessary) to the final schema.\n\t\t// This isn't necessary for left-side changes, because we already migrated the primary index data to\n\t\t// the merged schema, and we skip keyless tables, since their value tuples require different mapping\n\t\t// logic and we don't currently support merges to keyless tables that contain schema changes anyway.\n\t\tnewTuple := valueTuple\n\t\tif !cv.valueMerger.keyless && (diff.Op == tree.DiffOpRightAdd || diff.Op == tree.DiffOpRightModify) {\n\t\t\tnewTupleBytes := remapTuple(valueTuple, valueDesc, cv.valueMerger.rightMapping)\n\t\t\tnewTuple = val.NewTuple(cv.valueMerger.syncPool, newTupleBytes...)\n\t\t}\n\n\t\trow, err := buildRow(ctx, diff.Key, newTuple, cv.sch, cv.tableMerger)\n\t\tif err != nil {\n\t\t\treturn 0, err\n\t\t}\n\n\t\tresult, err := checkExpression.Eval(ctx, row)\n\t\tif err != nil {\n\t\t\treturn 0, err\n\t\t}\n\n\t\t// MySQL treats NULL as TRUE for a check constraint\n\t\tif result == nil {\n\t\t\tresult = true\n\t\t}\n\n\t\t// Coerce into a boolean; technically, this shouldn't be\n\t\t// necessary, since check constraint expressions should always\n\t\t// be of a boolean type, but Dolt has allowed this previously.\n\t\t// https://github.com/dolthub/dolt/issues/6411\n\t\tbooleanResult, err := types.ConvertToBool(result)\n\t\tif err != nil {\n\t\t\treturn 0, fmt.Errorf(\"unable to convert check constraint expression (%s) into boolean value: %v\", checkName, err.Error())\n\t\t}\n\n\t\tif booleanResult {\n\t\t\t// If a check constraint returns TRUE (or NULL), then the check constraint is fulfilled\n\t\t\t// https://dev.mysql.com/doc/refman/8.0/en/create-table-check-constraints.html\n\t\t\tcontinue\n\t\t} else {\n\t\t\tconflictCount++\n\t\t\tmeta, err := newCheckCVMeta(cv.sch, checkName)\n\t\t\tif err != nil {\n\t\t\t\treturn 0, err\n\t\t\t}\n\t\t\tif err = cv.insertArtifact(ctx, diff.Key, newTuple, meta); err != nil {\n\t\t\t\treturn conflictCount, err\n\t\t\t}\n\t\t}\n\t}\n\n\treturn conflictCount, nil\n}", "func (t *Indexed) merge(l, r *Node) (root *Node) {\n\tif l == nil {\n\t\treturn r\n\t}\n\tif r == nil {\n\t\treturn l\n\t}\n\tif l.key > r.key {\n\t\tl, r = r, l\n\t}\n\tif l.p > r.p {\n\t\tsubR := t.merge(l.right, r)\n\t\tsubR.SetParent(l)\n\t\tl.right = subR\n\t\treturn l\n\t}\n\tsubL := t.merge(l, r.left)\n\tsubL.SetParent(r)\n\tr.left = subL\n\treturn r\n}", "func struct2way(g cfa.Graph, dom cfa.DominatorTree) []*primitive.Primitive {\n\tvar prims []*primitive.Primitive\n\t// unresoved := {}\n\tunresolved := newStack()\n\t// for (all nodes m in descending order)\n\tfor _, m := range descRevPostOrder(NodesOf(g.Nodes())) {\n\t\t// if ((nodeType(m) == 2-way) \\land (inHeadLatch(m) == False))\n\t\tmSuccs := g.From(m.ID())\n\t\tif mSuccs.Len() == 2 && !isLoopHead(m) && !m.IsLoopLatch {\n\t\t\t// if (\\exists n, n = max{i | immedDom(i) = m \\land #inEdges(i) >= 2})\n\t\t\tvar follow *Node\n\t\t\tfor _, i := range dom.DominatedBy(m.ID()) {\n\t\t\t\tii := i.(*Node)\n\t\t\t\tif g.To(ii.ID()).Len() < 2 {\n\t\t\t\t\t// Follow node has at least 2 in-edges.\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\tif follow == nil || follow.RevPostNum < ii.RevPostNum {\n\t\t\t\t\tfollow = ii\n\t\t\t\t}\n\t\t\t}\n\t\t\tif follow != nil {\n\t\t\t\t// Create primitive.\n\t\t\t\tprim := &primitive.Primitive{\n\t\t\t\t\tPrim: \"if\",\n\t\t\t\t\tEntry: m.DOTID(),\n\t\t\t\t\tNodes: map[string]string{\n\t\t\t\t\t\t\"cond\": m.DOTID(),\n\t\t\t\t\t\t\"follow\": follow.DOTID(),\n\t\t\t\t\t},\n\t\t\t\t}\n\t\t\t\t// follow(m) = n\n\t\t\t\tm.Follow = follow\n\t\t\t\t// for (all x \\in unresolved)\n\t\t\t\tfor i := 0; !unresolved.empty(); i++ {\n\t\t\t\t\tx := unresolved.pop()\n\t\t\t\t\t// follow(x) = n\n\t\t\t\t\tx.Follow = follow\n\t\t\t\t\t//unresolved = unresolved - {x}\n\n\t\t\t\t\t// Add loop body nodes to primitive.\n\t\t\t\t\tname := fmt.Sprintf(\"body_%d\", i)\n\t\t\t\t\tprim.Nodes[name] = x.DOTID()\n\t\t\t\t}\n\t\t\t\tprims = append(prims, prim)\n\t\t\t} else {\n\t\t\t\t// unresolved = unresolved \\union {m}\n\t\t\t\tunresolved.push(m)\n\t\t\t}\n\t\t}\n\t}\n\treturn prims\n}", "func compat(a, b *Type, seenA, seenB map[*Type]bool) bool { //nolint:gocyclo\n\t// Normalize and break cycles from recursive types.\n\ta, b = a.NonOptional(), b.NonOptional()\n\tif a == b || seenA[a] || seenB[b] {\n\t\treturn true\n\t}\n\tseenA[a], seenB[b] = true, true\n\t// Handle Any\n\tif a.Kind() == Any || b.Kind() == Any {\n\t\treturn true\n\t}\n\t// Handle simple scalars\n\tif ax, bx := a.Kind() == Bool, b.Kind() == Bool; ax || bx {\n\t\treturn ax && bx\n\t}\n\tif ax, bx := ttIsStringEnum(a), ttIsStringEnum(b); ax || bx {\n\t\treturn ax && bx\n\t}\n\tif ax, bx := a.Kind().IsNumber(), b.Kind().IsNumber(); ax || bx {\n\t\treturn ax && bx\n\t}\n\tif ax, bx := a.Kind() == TypeObject, b.Kind() == TypeObject; ax || bx {\n\t\treturn ax && bx\n\t}\n\t// Handle composites\n\tswitch a.Kind() {\n\tcase Array, List:\n\t\tswitch b.Kind() {\n\t\tcase Array, List:\n\t\t\treturn compat(a.Elem(), b.Elem(), seenA, seenB)\n\t\t}\n\t\treturn false\n\tcase Set:\n\t\tif b.Kind() == Set {\n\t\t\treturn compat(a.Key(), b.Key(), seenA, seenB)\n\t\t}\n\t\treturn false\n\tcase Map:\n\t\tif b.Kind() == Map {\n\t\t\treturn compat(a.Key(), b.Key(), seenA, seenB) && compat(a.Elem(), b.Elem(), seenA, seenB)\n\t\t}\n\t\treturn false\n\tcase Struct:\n\t\tif b.Kind() == Struct {\n\t\t\tif ttIsEmptyStruct(a) || ttIsEmptyStruct(b) {\n\t\t\t\treturn true // empty struct is compatible with all other structs\n\t\t\t}\n\t\t\treturn compatFields(a, b, seenA, seenB)\n\t\t}\n\t\treturn false\n\tcase Union:\n\t\tif b.Kind() == Union {\n\t\t\treturn compatFields(a, b, seenA, seenB)\n\t\t}\n\t\treturn false\n\tdefault:\n\t\tpanic(fmt.Errorf(\"vdl: Compatible unhandled types %q %q\", a, b))\n\t}\n}", "func Intersection(candidates []*Tree) (res *Tree) {\n\tn := len(candidates)\n\tif n == 0 {\n\t\treturn nil\n\t}\n\n\tif n == 1 {\n\t\treturn candidates[0]\n\t}\n\n\tchA := make([]chan *Tree, n/2)\n\tchB := make([]chan *Tree, n/2)\n\tchRet := make([]chan *Tree, n/2)\n\n\t// Start a go routine that builds intersection of each pair of candidates\n\tfor i := 0; i < n/2; i++ {\n\t\tchA[i] = make(chan *Tree)\n\t\tchB[i] = make(chan *Tree)\n\t\tchRet[i] = make(chan *Tree)\n\t\tgo func(chA chan *Tree, chB chan *Tree, chRes chan *Tree) {\n\t\t\ta := <-chA\n\t\t\tb := <-chB\n\t\t\tif a == nil || b == nil {\n\t\t\t\tchRes <- nil\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tlog.Infof(\"finding common elements in %d and %d elements\", a.Count, b.Count)\n\t\t\tchRes <- a.Intersection(b)\n\t\t}(chA[i], chB[i], chRet[i])\n\t\tchA[i] <- candidates[i*2]\n\t\tchB[i] <- candidates[i*2+1]\n\t}\n\n\tresults := make([]*Tree, 0)\n\n\t// If amount of candidate trees is uneven we have to add last tree to results\n\tif n%2 == 1 {\n\t\tresults = append(results, candidates[n-1])\n\t}\n\n\t// Fetch results\n\tfor i := 0; i < n/2; i++ {\n\t\tresults = append(results, <-chRet[i])\n\t}\n\n\t// If we only have one tree left over, we're done\n\tif len(results) != 1 {\n\t\treturn Intersection(results)\n\t}\n\n\treturn results[0]\n}", "func (keys MapState) denyPreferredInsertWithChanges(newKey Key, newEntry MapStateEntry, adds, deletes MapState) {\n\tallCpy := allKey\n\tallCpy.TrafficDirection = newKey.TrafficDirection\n\t// If we have a deny \"all\" we don't accept any kind of map entry\n\tif v, ok := keys[allCpy]; ok && v.IsDeny {\n\t\treturn\n\t}\n\n\tif newEntry.IsDeny {\n\t\t// case for an existing allow L4-only and we are inserting deny L3-only\n\t\tswitch {\n\t\tcase newKey.DestPort == 0 && newKey.Nexthdr == 0 && newKey.Identity != 0:\n\t\t\tl4OnlyAllows := MapState{}\n\t\t\tfor k, v := range keys {\n\t\t\t\tif newKey.TrafficDirection == k.TrafficDirection &&\n\t\t\t\t\t!v.IsDeny &&\n\t\t\t\t\tk.Identity == 0 {\n\t\t\t\t\t// create a deny L3-L4 with the same allowed L4 port and proto\n\t\t\t\t\tnewKeyCpy := newKey\n\t\t\t\t\tnewKeyCpy.DestPort = k.DestPort\n\t\t\t\t\tnewKeyCpy.Nexthdr = k.Nexthdr\n\t\t\t\t\tkeys.addKeyWithChanges(newKeyCpy, newEntry, adds, deletes)\n\n\t\t\t\t\tl4OnlyAllows[k] = v\n\t\t\t\t}\n\t\t\t}\n\t\t\t// Delete all L3-L4 if we are inserting a deny L3-only and\n\t\t\t// there aren't allow L4-only for the existing deny L3-L4\n\t\t\tfor k := range keys {\n\t\t\t\tif k.TrafficDirection == newKey.TrafficDirection &&\n\t\t\t\t\tk.DestPort != 0 && k.Nexthdr != 0 &&\n\t\t\t\t\tk.Identity == newKey.Identity {\n\n\t\t\t\t\tkCpy := k\n\t\t\t\t\tkCpy.Identity = 0\n\t\t\t\t\tif _, ok := l4OnlyAllows[kCpy]; !ok {\n\t\t\t\t\t\tkeys.deleteKeyWithChanges(k, nil, adds, deletes)\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\tcase allCpy == newKey:\n\t\t\t// If we adding a deny \"all\" entry, then we will remove all entries\n\t\t\t// from the map state for that direction.\n\t\t\tfor k := range keys {\n\t\t\t\tif k.TrafficDirection == allCpy.TrafficDirection {\n\t\t\t\t\tkeys.deleteKeyWithChanges(k, nil, adds, deletes)\n\t\t\t\t}\n\t\t\t}\n\t\tdefault:\n\t\t\t// Do not insert 'newKey' if the map state already denies traffic\n\t\t\t// which is a superset of (or equal to) 'newKey'\n\t\t\tnewKeyCpy := newKey\n\t\t\tnewKeyCpy.DestPort = 0\n\t\t\tnewKeyCpy.Nexthdr = 0\n\t\t\tv, ok := keys[newKeyCpy]\n\t\t\tif ok && v.IsDeny {\n\t\t\t\t// Found a L3-only Deny so we won't accept any L3-L4 policies\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\n\t\tkeys.addKeyWithChanges(newKey, newEntry, adds, deletes)\n\t\treturn\n\t} else if newKey.Identity == 0 && newKey.DestPort != 0 {\n\t\t// case for an existing deny L3-only and we are inserting allow L4\n\t\tfor k, v := range keys {\n\t\t\tif newKey.TrafficDirection == k.TrafficDirection {\n\t\t\t\tif v.IsDeny && k.Identity != 0 && k.DestPort == 0 && k.Nexthdr == 0 {\n\t\t\t\t\t// create a deny L3-L4 with the same deny L3\n\t\t\t\t\tnewKeyCpy := newKey\n\t\t\t\t\tnewKeyCpy.Identity = k.Identity\n\t\t\t\t\tkeys.addKeyWithChanges(newKeyCpy, v, adds, deletes)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\tkeys.addKeyWithChanges(newKey, newEntry, adds, deletes)\n\t\treturn\n\t}\n\t// branch for adding a new allow L3-L4\n\n\tnewKeyCpy := newKey\n\tnewKeyCpy.DestPort = 0\n\tnewKeyCpy.Nexthdr = 0\n\tv, ok := keys[newKeyCpy]\n\tif ok && v.IsDeny {\n\t\t// Found a L3-only Deny so we won't accept any L3-L4 allow policies\n\t\treturn\n\t}\n\n\tkeys.RedirectPreferredInsert(newKey, newEntry, adds, deletes)\n}", "func (s *mergeBaseSuite) TestMergeBaseWithAncestor(c *C) {\n\trevs := []string{\"Q\", \"N\"}\n\texpectedRevs := []string{\"N\"}\n\ts.AssertMergeBase(c, revs, expectedRevs)\n}", "func (s *DisjointSet) Merge(x, y int) {\n\tpx := s.Find(x)\n\tpy := s.Find(y)\n\n\tif px < py {\n\t\ts.parent[py] = px\n\t} else {\n\t\ts.parent[px] = py\n\t}\n}", "func mergeTrafficMaps(trafficMap graph.TrafficMap, ns string, nsTrafficMap graph.TrafficMap) {\n\tfor nsId, nsNode := range nsTrafficMap {\n\t\tif node, isDup := trafficMap[nsId]; isDup {\n\t\t\tif nsNode.Namespace == ns {\n\t\t\t\t// prefer nsNode (see above comment), so do a swap\n\t\t\t\ttrafficMap[nsId] = nsNode\n\t\t\t\ttemp := node\n\t\t\t\tnode = nsNode\n\t\t\t\tnsNode = temp\n\t\t\t}\n\t\t\tfor _, nsEdge := range nsNode.Edges {\n\t\t\t\tisDupEdge := false\n\t\t\t\tfor _, e := range node.Edges {\n\t\t\t\t\tif nsEdge.Dest.ID == e.Dest.ID && nsEdge.Metadata[graph.ProtocolKey] == e.Metadata[graph.ProtocolKey] {\n\t\t\t\t\t\tisDupEdge = true\n\t\t\t\t\t\tbreak\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\tif !isDupEdge {\n\t\t\t\t\tnode.Edges = append(node.Edges, nsEdge)\n\t\t\t\t\t// add traffic for the new edge\n\t\t\t\t\tgraph.AddOutgoingEdgeToMetadata(node.Metadata, nsEdge.Metadata)\n\t\t\t\t}\n\t\t\t}\n\t\t} else {\n\t\t\ttrafficMap[nsId] = nsNode\n\t\t}\n\t}\n}", "func TestMergeStructPointers(t *testing.T) {\n\ttype test struct {\n\t\tin1 v3_2.Config\n\t\tin2 v3_2.Config\n\t\tout v3_2.Config\n\t\ttranscript Transcript\n\t}\n\n\ttests := []test{\n\t\t{\n\t\t\tin1: v3_2.Config{\n\t\t\t\tStorage: v3_2.Storage{\n\t\t\t\t\tLuks: []v3_2.Luks{\n\t\t\t\t\t\t// nested struct pointers, one override\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tClevis: &v3_2.Clevis{\n\t\t\t\t\t\t\t\tCustom: &v3_2.Custom{\n\t\t\t\t\t\t\t\t\tConfig: \"cfg\",\n\t\t\t\t\t\t\t\t\tPin: \"pin\",\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\tThreshold: util.IntToPtr(1),\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\tDevice: util.StrToPtr(\"/dev/foo\"),\n\t\t\t\t\t\t\tName: \"bar\",\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\tin2: v3_2.Config{\n\t\t\t\tStorage: v3_2.Storage{\n\t\t\t\t\tLuks: []v3_2.Luks{\n\t\t\t\t\t\t// nested struct pointers\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tClevis: &v3_2.Clevis{\n\t\t\t\t\t\t\t\tThreshold: util.IntToPtr(2),\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\tName: \"bar\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\t// struct pointer containing nil struct pointer\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tClevis: &v3_2.Clevis{\n\t\t\t\t\t\t\t\tTpm2: util.BoolToPtr(true),\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\tDevice: util.StrToPtr(\"/dev/baz\"),\n\t\t\t\t\t\t\tName: \"bleh\",\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\tout: v3_2.Config{\n\t\t\t\tStorage: v3_2.Storage{\n\t\t\t\t\tLuks: []v3_2.Luks{\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tClevis: &v3_2.Clevis{\n\t\t\t\t\t\t\t\tCustom: &v3_2.Custom{\n\t\t\t\t\t\t\t\t\tConfig: \"cfg\",\n\t\t\t\t\t\t\t\t\tPin: \"pin\",\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\tThreshold: util.IntToPtr(2),\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\tDevice: util.StrToPtr(\"/dev/foo\"),\n\t\t\t\t\t\t\tName: \"bar\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tClevis: &v3_2.Clevis{\n\t\t\t\t\t\t\t\tTpm2: util.BoolToPtr(true),\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\tDevice: util.StrToPtr(\"/dev/baz\"),\n\t\t\t\t\t\t\tName: \"bleh\",\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\ttranscript: Transcript{[]Mapping{\n\t\t\t\t{path.New(TAG_PARENT, \"storage\", \"luks\", 0, \"clevis\", \"custom\", \"config\"), path.New(TAG_RESULT, \"storage\", \"luks\", 0, \"clevis\", \"custom\", \"config\")},\n\t\t\t\t{path.New(TAG_PARENT, \"storage\", \"luks\", 0, \"clevis\", \"custom\", \"pin\"), path.New(TAG_RESULT, \"storage\", \"luks\", 0, \"clevis\", \"custom\", \"pin\")},\n\t\t\t\t{path.New(TAG_PARENT, \"storage\", \"luks\", 0, \"clevis\", \"custom\"), path.New(TAG_RESULT, \"storage\", \"luks\", 0, \"clevis\", \"custom\")},\n\t\t\t\t{path.New(TAG_CHILD, \"storage\", \"luks\", 0, \"clevis\", \"threshold\"), path.New(TAG_RESULT, \"storage\", \"luks\", 0, \"clevis\", \"threshold\")},\n\t\t\t\t{path.New(TAG_PARENT, \"storage\", \"luks\", 0, \"clevis\"), path.New(TAG_RESULT, \"storage\", \"luks\", 0, \"clevis\")},\n\t\t\t\t{path.New(TAG_CHILD, \"storage\", \"luks\", 0, \"clevis\"), path.New(TAG_RESULT, \"storage\", \"luks\", 0, \"clevis\")},\n\t\t\t\t{path.New(TAG_PARENT, \"storage\", \"luks\", 0, \"device\"), path.New(TAG_RESULT, \"storage\", \"luks\", 0, \"device\")},\n\t\t\t\t{path.New(TAG_CHILD, \"storage\", \"luks\", 0, \"name\"), path.New(TAG_RESULT, \"storage\", \"luks\", 0, \"name\")},\n\t\t\t\t{path.New(TAG_PARENT, \"storage\", \"luks\", 0), path.New(TAG_RESULT, \"storage\", \"luks\", 0)},\n\t\t\t\t{path.New(TAG_CHILD, \"storage\", \"luks\", 0), path.New(TAG_RESULT, \"storage\", \"luks\", 0)},\n\t\t\t\t{path.New(TAG_CHILD, \"storage\", \"luks\", 1, \"clevis\", \"tpm2\"), path.New(TAG_RESULT, \"storage\", \"luks\", 1, \"clevis\", \"tpm2\")},\n\t\t\t\t{path.New(TAG_CHILD, \"storage\", \"luks\", 1, \"clevis\"), path.New(TAG_RESULT, \"storage\", \"luks\", 1, \"clevis\")},\n\t\t\t\t{path.New(TAG_CHILD, \"storage\", \"luks\", 1, \"device\"), path.New(TAG_RESULT, \"storage\", \"luks\", 1, \"device\")},\n\t\t\t\t{path.New(TAG_CHILD, \"storage\", \"luks\", 1, \"name\"), path.New(TAG_RESULT, \"storage\", \"luks\", 1, \"name\")},\n\t\t\t\t{path.New(TAG_CHILD, \"storage\", \"luks\", 1), path.New(TAG_RESULT, \"storage\", \"luks\", 1)},\n\t\t\t\t{path.New(TAG_PARENT, \"storage\", \"luks\"), path.New(TAG_RESULT, \"storage\", \"luks\")},\n\t\t\t\t{path.New(TAG_CHILD, \"storage\", \"luks\"), path.New(TAG_RESULT, \"storage\", \"luks\")},\n\t\t\t\t{path.New(TAG_PARENT, \"storage\"), path.New(TAG_RESULT, \"storage\")},\n\t\t\t\t{path.New(TAG_CHILD, \"storage\"), path.New(TAG_RESULT, \"storage\")},\n\t\t\t}},\n\t\t},\n\t}\n\n\tfor i, test := range tests {\n\t\touti, transcript := MergeStructTranscribe(test.in1, test.in2)\n\t\tout := outi.(v3_2.Config)\n\n\t\tassert.Equal(t, test.out, out, \"#%d bad merge\", i)\n\t\tassert.Equal(t, test.transcript, transcript, \"#%d bad transcript\", i)\n\t}\n}", "func fixup_m(n *Node) {\n\tm := M(n)\n\tm_new := Max(M(n.left), M(n.right))\n\n\t// if current 'm' is not decided by n, just return.\n\tif m == m_new {\n\t\treturn\n\t}\n\n\tfor n.parent != nil {\n\t\tn.parent.m = Max(n.parent.high, Max(m_new, M(sibling(n))))\n\n\t\tif M(n.parent) > m {\n\t\t\tbreak // since node n does not affect\n\t\t\t// the result anymore, we break.\n\t\t}\n\t\tn = n.parent\n\t}\n}", "func TestCompareTreeJson(t *testing.T) {\n\tgoURL := \"http://localhost:8000/tree?&path=/lustre/scratch118/compgen&depth=3\"\n\tcppURL := \"http://localhost:9999/api/v2?&path=/lustre/scratch118/compgen&depth=2\"\n\n\ttolerance := .01 // using relDif to check floating points near enough equal\n\tcountSame := 0\n\n\tres, err := http.Get(goURL)\n\tif err != nil {\n\t\tt.Errorf(\"Server not running for go version: %s\", err.Error())\n\t\treturn\n\t}\n\n\tjNew, err := ioutil.ReadAll(res.Body)\n\tif err != nil {\n\t\tt.Errorf(err.Error())\n\t}\n\tres.Body.Close()\n\n\tres, err = http.Get(cppURL)\n\tif err != nil {\n\t\tt.Errorf(\"Server not running for C++ version: %s\", err.Error())\n\t\treturn\n\t}\n\tjOld, err := ioutil.ReadAll(res.Body)\n\tif err != nil {\n\t\tt.Errorf(err.Error())\n\t}\n\tres.Body.Close()\n\n\tfmt.Printf(\"C++ json length %d, Go version length %d ... should be similar though number format makes a difference \\n\\n\", len(jOld), len(jNew))\n\tnodesOld := make(map[string]string)\n\tnodesNew := make(map[string]string)\n\terr = nodeJSON(jOld, nodesOld, true)\n\tif err != nil {\n\t\tt.Errorf(err.Error())\n\t}\n\terr = nodeJSON(jNew, nodesNew, true)\n\tif err != nil {\n\t\tt.Errorf(err.Error())\n\t}\n\n\tfmt.Printf(\"C++ number of nodes %d, Go %d \\n\\n\", len(nodesOld), len(nodesNew))\n\n\t//fmt.Println(nodesOld, nodesNew)\n\n\tcontentOld := make(map[string]string)\n\tfor k, v := range nodesOld {\n\t\tpathOld := k\n\t\t_, ok := nodesNew[k]\n\t\tif !ok {\n\t\t\tfmt.Printf(\"C++ has node %s, missing in Go \\n\", pathOld)\n\t\t\tcontinue\n\t\t}\n\t\tvar data interface{}\n\t\terr = json.Unmarshal([]byte(v), &data)\n\t\tif err != nil {\n\t\t\tt.Errorf(err.Error())\n\t\t}\n\t\tm0, ok := data.(map[string]interface{}) // from here, break down of data for the node\n\t\tif ok {\n\n\t\t\tfor k1, v1 := range m0 { // type (eg ctime)\n\t\t\t\tm2 := v1.(map[string]interface{})\n\t\t\t\tfor k2, v2 := range m2 { // group\n\t\t\t\t\tm3 := v2.(map[string]interface{})\n\t\t\t\t\tfor k3, v3 := range m3 { // user\n\t\t\t\t\t\tm4 := v3.(map[string]interface{})\n\t\t\t\t\t\tfor k, v := range m4 { // tag\n\n\t\t\t\t\t\t\t//outputOld = append(outputOld, fmt.Sprintf(\"C++ has: %s, %s, %s, %s, %s, %s, %s,%s \\n\", kOuter, path, k0, k1, k2, k3, k, v))\n\t\t\t\t\t\t\tkey := fmt.Sprintf(\"%s,%s,%s,%s,%s\", pathOld, k1, k2, k3, k)\n\t\t\t\t\t\t\tcontentOld[key] = v.(string)\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\n\t//sort.Strings(outputOld)\n\t//fmt.Println(outputOld)\n\n\t//outputNew := []string{}\n\tcontentNew := make(map[string]string)\n\n\tfor k, v := range nodesNew {\n\t\tpathNew := k\n\t\t_, ok := nodesOld[k]\n\t\tif !ok {\n\t\t\tfmt.Printf(\"Go has node %s, missing in C++ \\n\", pathNew)\n\t\t\tcontinue\n\t\t}\n\t\tvar data interface{}\n\t\terr = json.Unmarshal([]byte(v), &data)\n\t\tif err != nil {\n\t\t\tt.Errorf(err.Error())\n\t\t}\n\t\tm0, ok := data.(map[string]interface{}) // from here, break down of data for the node\n\t\tif ok {\n\t\t\tfor k1, v1 := range m0 {\n\t\t\t\tm2 := v1.(map[string]interface{})\n\t\t\t\tfor k2, v2 := range m2 {\n\t\t\t\t\tm3 := v2.(map[string]interface{})\n\t\t\t\t\tfor k3, v3 := range m3 {\n\t\t\t\t\t\tm4 := v3.(map[string]interface{})\n\t\t\t\t\t\tfor k, v := range m4 {\n\n\t\t\t\t\t\t\t//outputNew = append(outputNew, fmt.Sprintf(\"Go has: %s, %s, %s, %s, %s, %s, %s,%s \\n\", kOuter, pathNew, k0, k1, k2, k3, k, v))\n\t\t\t\t\t\t\tkey := fmt.Sprintf(\"%s,%s,%s,%s,%s\", pathNew, k1, k2, k3, k)\n\n\t\t\t\t\t\t\t// keep different ones and count same ones\n\t\t\t\t\t\t\texists, ok := contentOld[key]\n\t\t\t\t\t\t\t// are they numbers and near enough with rounding?\n\t\t\t\t\t\t\tvar s1, s2 float64\n\t\t\t\t\t\t\tif ok {\n\n\t\t\t\t\t\t\t\tif sOld, err := strconv.ParseFloat(exists, 64); err == nil {\n\t\t\t\t\t\t\t\t\t//fmt.Printf(\"Old %T, %v\\n\", sOld, sOld)\n\t\t\t\t\t\t\t\t\ts1 = sOld\n\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t\tif sNew, err := strconv.ParseFloat(v.(string), 64); err == nil {\n\t\t\t\t\t\t\t\t\t//fmt.Printf(\"New %T, %v\\n\", sNew, sNew)\n\t\t\t\t\t\t\t\t\ts2 = sNew\n\t\t\t\t\t\t\t\t}\n\n\t\t\t\t\t\t\t\t//fmt.Println(relDif(s1, s2))\n\t\t\t\t\t\t\t}\n\n\t\t\t\t\t\t\tif !ok || relDif(s1, s2) > tolerance { // not found, or too different\n\t\t\t\t\t\t\t\tcontentNew[key] = v.(string)\n\t\t\t\t\t\t\t} else { // in both, acceptable difference, remove from both\n\t\t\t\t\t\t\t\tdelete(contentOld, key)\n\t\t\t\t\t\t\t\tdelete(contentNew, key)\n\t\t\t\t\t\t\t\tcountSame++\n\t\t\t\t\t\t\t}\n\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\n\t//sort.Strings(outputNew)\n\t//fmt.Println(outputNew)\n\n\tfmt.Println(fmt.Sprintf(\"\\n Within tolerance matches : %d out of %d\\n\", countSame, countSame+len(contentOld)))\n\n\tmissingFromGo := []string{}\n\tmissingFromCpp := []string{}\n\tnonMatch := []string{}\n\n\tfor k, v := range contentNew {\n\t\texisting, ok := contentOld[k]\n\t\tif !ok {\n\t\t\tmissingFromCpp = append(missingFromCpp, fmt.Sprintf(\"missing from C++ at %s, Go has %s\", k, v))\n\t\t} else {\n\t\t\tnonMatch = append(nonMatch, fmt.Sprintf(\"different at %s, C++ %s, Go %s\", k, existing, v))\n\t\t}\n\t}\n\tfor k, v := range contentOld {\n\t\t_, ok := contentNew[k]\n\t\tif !ok {\n\t\t\tmissingFromGo = append(missingFromGo, fmt.Sprintf(\"missing from Go at %s, C++ has %s\", k, v))\n\t\t}\n\n\t}\n\n\tfmt.Println(\"Missing from Go version\")\n\tsort.StringSlice(missingFromCpp).Sort()\n\tsort.StringSlice(missingFromGo).Sort()\n\tsort.StringSlice(nonMatch).Sort()\n\n\tfor i := range missingFromGo {\n\t\tfmt.Println(missingFromGo[i])\n\t}\n\tfmt.Println(\"_________________\")\n\tfmt.Println(\"Missing from C++ version\")\n\tif len(missingFromCpp) == 0 {\n\t\tfmt.Println(\"None\")\n\t} else {\n\t\tfor i := range missingFromCpp {\n\t\t\tfmt.Println(missingFromCpp[i])\n\t\t}\n\t}\n\tfmt.Println(\"_________________\")\n\tfmt.Println(\"Non Matches\")\n\tfor i := range nonMatch {\n\t\tfmt.Println(nonMatch[i])\n\t}\n\n}", "func (suite *PouchDaemonSuite) TestDaemonNestObjectConflict(c *check.C) {\n\tdcfg, err := StartDefaultDaemon(map[string]interface{}{\n\t\t\"TLS\": map[string]string{\n\t\t\t\"tlscacert\": \"ca\",\n\t\t\t\"tlscert\": \"cert\",\n\t\t\t\"tlskey\": \"key\",\n\t\t},\n\t}, \"--tlscacert\", \"ca\")\n\tdcfg.KillDaemon()\n\tc.Assert(err, check.NotNil)\n}", "func (s *mergeBaseSuite) TestDoubleCommonInSubFeatureBranches(c *C) {\n\trevs := []string{\"G\", \"Q\"}\n\texpectedRevs := []string{\"GQ1\", \"GQ2\"}\n\ts.AssertMergeBase(c, revs, expectedRevs)\n}", "func TestMerge(t *testing.T) {\n\tpncounter1 := PNCounter{\n\t\tAdd: gcounter.GCounter{map[string]int{\"node1\": 3, \"node2\": 5, \"node3\": 7}},\n\t\tDelete: gcounter.GCounter{map[string]int{\"node1\": 3, \"node2\": 5, \"node3\": 7}},\n\t}\n\tpncounter2 := PNCounter{\n\t\tAdd: gcounter.GCounter{map[string]int{\"node1\": 4, \"node2\": 6, \"node3\": 8}},\n\t\tDelete: gcounter.GCounter{map[string]int{\"node1\": 3, \"node2\": 5, \"node3\": 7}},\n\t}\n\tpncounter3 := PNCounter{\n\t\tAdd: gcounter.GCounter{map[string]int{\"node1\": 2, \"node2\": 4, \"node3\": 9}},\n\t\tDelete: gcounter.GCounter{map[string]int{\"node1\": 2, \"node2\": 4, \"node3\": 9}},\n\t}\n\n\tpncounterExpected := PNCounter{\n\t\tAdd: gcounter.GCounter{map[string]int{\"node1\": 4, \"node2\": 6, \"node3\": 9}},\n\t\tDelete: gcounter.GCounter{map[string]int{\"node1\": 3, \"node2\": 5, \"node3\": 9}},\n\t}\n\n\tpncounterActual := Merge(pncounter1, pncounter2, pncounter3)\n\n\tcountExpected := 2\n\tcountActual := pncounterActual.GetTotal()\n\n\tassert.Equal(t, pncounterExpected, pncounterActual)\n\tassert.Equal(t, countExpected, countActual)\n\n\tpncounter = pncounter.Clear(testNode)\n}", "func (self *PhysicsP2) CreateLockConstraint3O(bodyA interface{}, bodyB interface{}, offset []interface{}, angle int, maxForce int) *PhysicsP2LockConstraint{\n return &PhysicsP2LockConstraint{self.Object.Call(\"createLockConstraint\", bodyA, bodyB, offset, angle, maxForce)}\n}", "func moveModifiedTables(ctx context.Context, oldRoot, newRoot, changedRoot *doltdb.RootValue, conflicts *set.StrSet, force bool) (map[string]hash.Hash, error) {\n\tresultMap := make(map[string]hash.Hash)\n\ttblNames, err := newRoot.GetTableNames(ctx)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tfor _, tblName := range tblNames {\n\t\toldHash, _, err := oldRoot.GetTableHash(ctx, tblName)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tnewHash, _, err := newRoot.GetTableHash(ctx, tblName)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tchangedHash, _, err := changedRoot.GetTableHash(ctx, tblName)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tif oldHash == changedHash {\n\t\t\tresultMap[tblName] = newHash\n\t\t} else if oldHash == newHash {\n\t\t\tresultMap[tblName] = changedHash\n\t\t} else if force {\n\t\t\tresultMap[tblName] = newHash\n\t\t} else {\n\t\t\tconflicts.Add(tblName)\n\t\t}\n\t}\n\n\ttblNames, err = changedRoot.GetTableNames(ctx)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tfor _, tblName := range tblNames {\n\t\tif _, exists := resultMap[tblName]; !exists {\n\t\t\toldHash, _, err := oldRoot.GetTableHash(ctx, tblName)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\n\t\t\tchangedHash, _, err := changedRoot.GetTableHash(ctx, tblName)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\n\t\t\tif oldHash == emptyHash {\n\t\t\t\tresultMap[tblName] = changedHash\n\t\t\t} else if force {\n\t\t\t\tresultMap[tblName] = oldHash\n\t\t\t} else if oldHash != changedHash {\n\t\t\t\tconflicts.Add(tblName)\n\t\t\t}\n\t\t}\n\t}\n\n\treturn resultMap, nil\n}", "func TestMapNestBug(t *testing.T) {\n\tt.Parallel()\n\n\t// Original data:\n\t// (aa: {(a: 11), (a: 10)}):{(c: 3)}\n\t// (aa: {(a: 11), (a: 10)}):{(c: 1)}\n\ta := KV(\n\t\tNewMap(\n\t\t\tKV(\"aa\", NewSet(\n\t\t\t\tNewMap(KV(\"a\", 11)),\n\t\t\t\tNewMap(KV(\"a\", 10)),\n\t\t\t)),\n\t\t),\n\t\tNewSet(NewMap(KV(\"c\", 3))),\n\t)\n\trequire.Contains(t,\n\t\t[]string{\n\t\t\t\"(aa: {(a: 10), (a: 11)}):{(c: 3)}\",\n\t\t\t\"(aa: {(a: 11), (a: 10)}):{(c: 3)}\",\n\t\t}, a.String())\n\tb := KV(\n\t\tNewMap(\n\t\t\tKV(\"aa\", NewSet(\n\t\t\t\tNewMap(KV(\"a\", 11)),\n\t\t\t\tNewMap(KV(\"a\", 10)),\n\t\t\t)),\n\t\t),\n\t\tNewSet(NewMap(KV(\"c\", 1))),\n\t)\n\trequire.Contains(t,\n\t\t[]string{\n\t\t\t\"(aa: {(a: 10), (a: 11)}):{(c: 1)}\",\n\t\t\t\"(aa: {(a: 11), (a: 10)}):{(c: 1)}\",\n\t\t}, b.String())\n\tassert.Equal(t, a.Hash(0) == b.Hash(0), a.Equal(b))\n\n\t// The bug actually caused an endless loop, but there's not way to assert\n\t// for that\n\tNewMap(a).Update(NewMap(b))\n}", "func TestHeaviestIsWidenedAncestor(t *testing.T) {\n\ttf.UnitTest(t)\n\tctx := context.Background()\n\tbuilder, store, syncer := setup(ctx, t)\n\tgenesis := builder.RequireTipSet(store.GetHead())\n\n\tlink1 := builder.AppendOn(genesis, 2)\n\tlink2 := builder.AppendOn(link1, 3)\n\tlink3 := builder.AppendOn(link2, 1)\n\tlink4 := builder.AppendOn(link3, 2)\n\n\tforkLink2 := builder.AppendOn(link1, 4)\n\tforkLink3 := builder.AppendOn(forkLink2, 1)\n\n\t// Sync main chain\n\tassert.NoError(t, syncer.HandleNewTipSet(ctx, types.NewChainInfo(peer.ID(\"\"), link4.Key(), heightFromTip(t, link4)), true))\n\n\t// Sync fork chain\n\tassert.NoError(t, syncer.HandleNewTipSet(ctx, types.NewChainInfo(peer.ID(\"\"), forkLink3.Key(), heightFromTip(t, forkLink3)), true))\n\n\t// Assert that widened chain is the new head\n\twideBlocks := link2.ToSlice()\n\twideBlocks = append(wideBlocks, forkLink2.ToSlice()...)\n\twideTs := types.RequireNewTipSet(t, wideBlocks...)\n\n\tverifyTip(t, store, wideTs, builder.ComputeState(wideTs))\n\tverifyHead(t, store, wideTs)\n}", "func merge(existing, kind *Config) error {\n\t// verify assumptions about kubeadm / kind kubeconfigs\n\tif err := checkKubeadmExpectations(kind); err != nil {\n\t\treturn err\n\t}\n\n\t// insert or append cluster entry\n\tshouldAppend := true\n\tfor i := range existing.Clusters {\n\t\tif existing.Clusters[i].Name == kind.Clusters[0].Name {\n\t\t\texisting.Clusters[i] = kind.Clusters[0]\n\t\t\tshouldAppend = false\n\t\t}\n\t}\n\tif shouldAppend {\n\t\texisting.Clusters = append(existing.Clusters, kind.Clusters[0])\n\t}\n\n\t// insert or append user entry\n\tshouldAppend = true\n\tfor i := range existing.Users {\n\t\tif existing.Users[i].Name == kind.Users[0].Name {\n\t\t\texisting.Users[i] = kind.Users[0]\n\t\t\tshouldAppend = false\n\t\t}\n\t}\n\tif shouldAppend {\n\t\texisting.Users = append(existing.Users, kind.Users[0])\n\t}\n\n\t// insert or append context entry\n\tshouldAppend = true\n\tfor i := range existing.Contexts {\n\t\tif existing.Contexts[i].Name == kind.Contexts[0].Name {\n\t\t\texisting.Contexts[i] = kind.Contexts[0]\n\t\t\tshouldAppend = false\n\t\t}\n\t}\n\tif shouldAppend {\n\t\texisting.Contexts = append(existing.Contexts, kind.Contexts[0])\n\t}\n\n\t// set the current context\n\texisting.CurrentContext = kind.CurrentContext\n\n\t// TODO: We should not need this, but it allows broken clients that depend\n\t// on apiVersion and kind to work. Notably the upstream javascript client.\n\t// See: https://github.com/kubernetes-sigs/kind/issues/1242\n\tif len(existing.OtherFields) == 0 {\n\t\t// TODO: Should we be deep-copying? for now we don't need to\n\t\t// and doing so would be a pain (re and de-serialize maybe?) :shrug:\n\t\texisting.OtherFields = kind.OtherFields\n\t}\n\n\treturn nil\n}", "func (p *NakedTriple) findThreeMatchingLookups(lookups []*map[int]bool) ([]*map[int]bool, []int) {\n\tlength := len(lookups)\n\n\tfor a := 0; a < length; a++ {\n\t\tfor b := a + 1; b < length; b++ {\n\t\t\tfor c := b + 1; c < length; c++ {\n\t\t\t\tif values := findNakedValues(3, lookups[a], lookups[b], lookups[c]); values != nil {\n\t\t\t\t\treturn []*map[int]bool{\n\t\t\t\t\t\tlookups[a],\n\t\t\t\t\t\tlookups[b],\n\t\t\t\t\t\tlookups[c],\n\t\t\t\t\t}, values\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\n\treturn nil, nil\n}", "func Thread(articles []ParsedArticle) map[*Container]bool {\n\tid_table = make(idTable)\n\n\t// 1\n\tfor _, message := range articles {\n\t\t// 1A\n\t\tcontainer := containerById(message.Id)\n\n\t\tif container.Article == nil {\n\t\t\tcontainer.Article = new(ParsedArticle)\n\t\t\t*container.Article = message\n\t\t}\n\n\t\t// 1B\n\t\tfor i := 0; i < len(message.References)-1; i++ {\n\t\t\tcontainer1 := containerById(message.References[i])\n\t\t\tcontainer2 := containerById(message.References[i+1])\n\n\t\t\tif container1.Parent == nil && container2.Parent == nil &&\n\t\t\t\tmayLink(container1, container2) {\n\t\t\t\tcontainer2.Parent = container1\n\t\t\t}\n\t\t}\n\n\t\t// 1C\n\t\tif l := len(message.References); l > 0 {\n\t\t\tif last := containerById(message.References[l-1]); mayLink(container, last) {\n\t\t\t\tcontainer.Parent = last\n\t\t\t}\n\t\t} else {\n\t\t\tcontainer.Parent = nil\n\t\t}\n\t}\n\n\t// we „forgot“ to set Child and Next links\n\n\t// Child links\n\tfor _, container := range id_table {\n\t\tif parent := container.Parent; parent != nil && parent.Child == nil {\n\t\t\tparent.Child = container\n\t\t}\n\t}\n\n\t// Next links\n\tfor _, container := range id_table {\n\t\tif parent := container.Parent; parent != nil && parent.Child != container {\n\t\t\totherChild := parent.Child\n\t\t\tfor otherChild.Next != nil && otherChild != container {\n\t\t\t\totherChild = otherChild.Next\n\t\t\t}\n\n\t\t\tif otherChild != container {\n\t\t\t\totherChild.Next = container\n\t\t\t}\n\t\t}\n\t}\n\n\t// 2\n\trootSet := make(map[*Container]bool)\n\n\tfor _, message := range articles {\n\t\tcontainer := containerById(message.Id)\n\n\t\tfor container.Parent != nil {\n\t\t\tcontainer = container.Parent\n\t\t}\n\n\t\trootSet[container] = true\n\t}\n\n\t// 3\n\tid_table = nil\n\n\t// 4\n\t//\n\t// we use WalkContainers as replacement for recursion\n\n\trepeat := false\n\n\t// for whatever reason, doing this once isn't sufficient\n\tfor repeat {\n\t\trepeat = false\n\t\tch := make(chan *DepthContainer)\n\t\tgo WalkContainers(rootSet, ch)\n\n\t\tfor d := range ch {\n\t\t\tcontainer := d.Cont\n\t\t\t// 4A\n\t\t\tif container.Article == nil {\n\t\t\t\tif container.Child == nil && container.Next == nil {\n\t\t\t\t\tdelete(rootSet, container)\n\t\t\t\t\trepeat = true\n\t\t\t\t\t// delete from parent's child list, if existing\n\t\t\t\t\tdeleteFromParentsList(container)\n\t\t\t\t}\n\t\t\t}\n\n\t\t\t// 4B\n\t\t\tif container.Article == nil && container.Child != nil {\n\t\t\t\t// remove this container\n\t\t\t\trepeat = true\n\t\t\t\tdelete(rootSet, container)\n\n\t\t\t\t// promote single child to root set\n\t\t\t\tif container.Child.Next == nil {\n\t\t\t\t\trootSet[container.Child] = true\n\t\t\t\t} else if container.Parent != nil {\n\t\t\t\t\t// promote non-single child to non-root\n\t\t\t\t\tparent := container.Parent\n\t\t\t\t\tlast := parent.Child\n\t\t\t\t\tfor last.Next != nil {\n\t\t\t\t\t\tlast = last.Next\n\t\t\t\t\t}\n\n\t\t\t\t\tlast.Next = container.Child\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\n\t// 5\n\n\t// A\n\tsubject_table := make(subjectTable)\n\n\t// B\n\tfor this := range rootSet {\n\t\tsubject := findSubject(this)\n\t\tif subject != \"\" {\n\t\t\told, ok := subject_table[subject]\n\t\t\tif !ok ||\n\t\t\t\t(this.Article == nil || old.Article != nil) ||\n\t\t\t\tisFollowup(old.Article.Subject) && !isFollowup(this.Article.Subject) {\n\t\t\t\tsubject_table[subject] = this\n\t\t\t}\n\t\t}\n\t}\n\n\t// C\n\t////for this := range rootSet {\n\t////\tsubject := findSubject(this)\n\t////\tthat, ok := subject_table[subject]\n\t////\tif !ok || this == that {\n\t////\t\tcontinue\n\t////\t}\n\n\t////\t// (a)\n\t////\t// both are dummies\n\t////\tif this.Article == nil && that.Article == nil {\n\t////\t\t// append this' children to that's children\n\t////\t\tlast := that.Child\n\t////\t\tfor last.Next != nil {\n\t////\t\t\tlast = last.Next\n\t////\t\t}\n\n\t////\t\tlast.Next = this.Child\n\n\t////\t\t// and delete this\n\t////\t\tdelete(rootSet, this)\n\t////\t\tsubject_table[subject] = that\n\t////\t} else if ((this.Article == nil) && (that.Article != nil)) ||\n\t////\t\t((this.Article != nil) && (that.Article == nil)) {\n\t////\t\t// (b)\n\t////\t\t// one is empty, another one is not\n\t////\t\tif this.Article == nil {\n\t////\t\t\tthis, that = that, this\n\t////\t\t}\n\n\t////\t\t// that is empty, this isn't\n\n\t////\t\tsubject_table[subject] = that\n\t////\t\tmakeToChildOf(this, that)\n\n\t////\t} else if that.Article != nil && !isFollowup(that.Article.Subject) &&\n\t////\t\tthis.Article != nil && isFollowup(this.Article.Subject) {\n\t////\t\t// (c)\n\t////\t\t// that is a follow-up, this isn't\n\t////\t\tmakeToChildOf(this, that)\n\t////\t\tsubject_table[subject] = that\n\t////\t} else if that.Article != nil && isFollowup(that.Article.Subject) &&\n\t////\t\tthis.Article != nil && !isFollowup(this.Article.Subject) {\n\t////\t\t// (d)\n\t////\t\t// misordered\n\t////\t\tmakeToChildOf(that, this)\n\t////\t} else {\n\t////\t\t// (e)\n\t////\t\t// otherwise\n\t////\t\tnewId := fmt.Sprintf(\"id%s@random.id\", rand.Int())\n\n\t////\t\tcontainer := &Container{\n\t////\t\t\tId: MessageId(newId),\n\t////\t\t}\n\n\t////\t\t// container\n\t////\t\t// ↓\n\t////\t\t// this→⋯→last→that\n\n\t////\t\tthis.Parent = container\n\t////\t\tthat.Parent = container\n\n\t////\t\tcontainer.Child = this\n\t////\t\tlast := this\n\t////\t\tfor last.Next != nil {\n\t////\t\t\tlast = last.Next\n\t////\t\t}\n\n\t////\t\tlast.Next = that\n\t////\t}\n\t////}\n\n\t// 6 (nothing)\n\n\t// 7\n\n\tch := make(chan *DepthContainer)\n\tgo WalkContainers(rootSet, ch)\n\n\tfor container := range ch {\n\t\tsortSiblings(container.Cont)\n\t}\n\n\t// the algorithm ends here; we need additional work\n\n\t// add Secondary links according to depth-first traversal\n\tch = make(chan *DepthContainer)\n\tgo WalkContainers(rootSet, ch)\n\n\tvar first *DepthContainer\n\tfor first = range ch {\n\t\tif first != nil && first.Cont != nil {\n\t\t\tbreak\n\t\t}\n\t}\n\n\tfor second := range ch {\n\t\tif second == nil || second.Cont == nil || second.Cont.Article != nil {\n\t\t\tfirst.Cont.Secondary = second.Cont\n\t\t\tfirst = second\n\t\t}\n\t}\n\n\treturn rootSet\n}", "func mergeOverlappingHunks(oldHunks, newHunks []*diff.Hunk) (*diff.Hunk, error) {\n\tresultHunk, currentOrgI, err := configureResultHunk(oldHunks, newHunks)\n\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"configuring result hunk: %w\", err)\n\t}\n\n\t// Indexes of hunks\n\tcurrentOldHunkI, currentNewHunkJ := 0, 0\n\t// Indexes of lines in body hunks\n\t// if indexes == -1 -- we don't have relevant hunk, which contains changes nearby currentOrgI\n\ti, j := -1, -1\n\n\t// Body of hunks\n\tvar newBody []string\n\tvar oldHunkBody, newHunkBody []string\n\n\t// Iterating through the hunks in the order they're appearing in origin file.\n\t// Using number of line in origin (currentOrgI) as an anchor to process line by line.\n\t// By using currentOrgI as anchor it is easier to see how changes have been applied step by step.\n\n\t// Merge, while there are hunks to process\n\tfor currentOldHunkI < len(oldHunks) || currentNewHunkJ < len(newHunks) {\n\n\t\t// Entering next hunk in oldHunks\n\t\tif currentOldHunkI < len(oldHunks) && i == -1 && currentOrgI == oldHunks[currentOldHunkI].OrigStartLine {\n\t\t\ti = 0\n\t\t\toldHunkBody = strings.Split(strings.TrimSuffix(string(oldHunks[currentOldHunkI].Body), \"\\n\"), \"\\n\")\n\t\t}\n\n\t\t// Entering next hunk in newHunks\n\t\tif currentNewHunkJ < len(newHunks) && j == -1 && currentOrgI == newHunks[currentNewHunkJ].OrigStartLine {\n\t\t\tj = 0\n\t\t\tnewHunkBody = strings.Split(strings.TrimSuffix(string(newHunks[currentNewHunkJ].Body), \"\\n\"), \"\\n\")\n\t\t}\n\n\t\tswitch {\n\t\tcase i == -1 && j == -1:\n\t\tcase i >= 0 && j == -1:\n\t\t\t// Changes are only in oldHunk\n\t\t\tnewBody = append(newBody, revertedLine(oldHunkBody[i]))\n\t\t\t// In case current line haven't been added, we have processed anchor line.\n\t\t\tif !strings.HasPrefix(oldHunkBody[i], \"+\") {\n\t\t\t\t// Updating index of anchor line.\n\t\t\t\tcurrentOrgI++\n\t\t\t}\n\t\t\ti++\n\n\t\tcase i == -1 && j >= 0:\n\t\t\t// Changes are only in newHunk\n\t\t\tnewBody = append(newBody, newHunkBody[j])\n\t\t\t// In case current line haven't been added, we have processed anchor line.\n\t\t\tif !strings.HasPrefix(newHunkBody[j], \"+\") {\n\t\t\t\t// Updating index of anchor line.\n\t\t\t\tcurrentOrgI++\n\t\t\t}\n\t\t\tj++\n\n\t\tdefault:\n\t\t\t// Changes are in old and new hunks.\n\t\t\tswitch {\n\t\t\t// Firstly proceeding added lines,\n\t\t\t// because added lines are between previous currentOrgI and currentOrgI.\n\t\t\tcase strings.HasPrefix(oldHunkBody[i], \"+\") || strings.HasPrefix(newHunkBody[j], \"+\"):\n\t\t\t\tnewBody = append(newBody, interAddedLines(&i, &j, &oldHunkBody, &newHunkBody)...)\n\t\t\tdefault:\n\t\t\t\t// Checking if original content is the same\n\t\t\t\tif oldHunkBody[i][1:] != newHunkBody[j][1:] {\n\t\t\t\t\treturn nil, fmt.Errorf(\n\t\t\t\t\t\t\"line in original %d in oldDiff (%q) and newDiff (%q): %w\",\n\t\t\t\t\t\tcurrentOrgI, oldHunkBody[i][1:], newHunkBody[j][1:], ErrContentMismatch)\n\t\t\t\t}\n\t\t\t\tswitch {\n\t\t\t\tcase strings.HasPrefix(oldHunkBody[i], \" \") && strings.HasPrefix(newHunkBody[j], \" \"):\n\t\t\t\t\tnewBody = append(newBody, oldHunkBody[i])\n\t\t\t\tcase strings.HasPrefix(oldHunkBody[i], \"-\") && strings.HasPrefix(newHunkBody[j], \" \"):\n\t\t\t\t\tnewBody = append(newBody, revertedLine(oldHunkBody[i]))\n\t\t\t\tcase strings.HasPrefix(oldHunkBody[i], \" \") && strings.HasPrefix(newHunkBody[j], \"-\"):\n\t\t\t\t\tnewBody = append(newBody, newHunkBody[j])\n\t\t\t\t\t// If both have deleted same line, no need to append it to newBody\n\t\t\t\t}\n\n\t\t\t\t// Updating currentOrgI since we have processed anchor line.\n\t\t\t\tcurrentOrgI++\n\t\t\t\ti++\n\t\t\t\tj++\n\t\t\t}\n\t\t}\n\n\t\tif i >= len(oldHunkBody) {\n\t\t\t// Proceed whole oldHunkBody\n\t\t\ti = -1\n\t\t\tcurrentOldHunkI++\n\t\t}\n\n\t\tif j >= len(newHunkBody) {\n\t\t\t// Proceed whole newHunkBody\n\t\t\tj = -1\n\t\t\tcurrentNewHunkJ++\n\t\t}\n\t}\n\n\tresultHunk.Body = []byte(strings.Join(newBody, \"\\n\") + \"\\n\")\n\n\tfor _, line := range newBody {\n\t\tif !strings.HasPrefix(line, \" \") {\n\t\t\t// resultHunkBody contains some changes\n\t\t\treturn resultHunk, nil\n\t\t}\n\t}\n\n\treturn nil, nil\n}", "func TestThreePeerNetworkUpdatesTopology(t *testing.T) {\n\ttn := newTestNetwork(t) //.withOTLP(OTLPAddress, \"TestThreePeerNetworkUpdatesTopology\")\n\tdefer tn.stop()\n\n\ttn.startLPServer(8081).\n\t\tstartLPServer(8082).\n\t\tconnect(8082, 8081).\n\t\tstartLPServer(8083).\n\t\tconnect(8083, 8082).\n\t\tassertNetworkTopology(8081, 8082, 8083)\n}", "func (t *DIC3) InsertOrUpdate(triplet *Request, reply *Response) error {\n\tfmt.Println(\"in InsertOrUpdate : \", triplet.KeyRel.KeyA, triplet.KeyRel.RelA, triplet.Val)\n\n\thashid := getKeyRelHash(triplet.KeyRel.KeyA, triplet.KeyRel.RelA)\n\tif belongsto(hashid, predecessor.Chordid, chordid) == true {\n\t\tkeyRel := DKey{triplet.KeyRel.KeyA, triplet.KeyRel.RelA}\n\t\tv, ok := dict3[keyRel]\n\t\tif !ok {\n\t\t\t//Insert\n\t\t\tfmt.Println(\"Inserting.....\")\n\t\t\t//dict3[keyRel] = triplet.Val\n\t\t\tinsertTripletToDict3(triplet.KeyRel, triplet.Val, triplet.Permission)\n\t\t} else {\n\t\t\t//Update\n\t\t\tfmt.Println(\"Updating.....\")\n\t\t\taccess := v[\"permission\"].(string)\n\t\t\tif strings.EqualFold(\"RW\", access) {\n\t\t\t\tv[\"content\"] = triplet.Val\n\t\t\t\tv[\"size\"] = reflect.TypeOf(triplet.Val).Size()\n\t\t\t\tv[\"modified\"] = time.Now().Format(time.RFC3339)\n\t\t\t\t_, ok = dict3[DKey{triplet.KeyRel.KeyA, triplet.KeyRel.RelA}]\n\t\t\t} else {\n\t\t\t\tfmt.Println(\"No RW access\")\n\t\t\t}\n\t\t}\n\n\t\treply.Done = ok\n\t\treply.Err = nil\n\t\treturn nil\n\t}\n\tnp := nearestPredecessor(hashid)\n\tclient, err := jsonrpc.Dial(protocol, np.Address)\n\tif err != nil {\n\t\tlog.Fatal(\"dialing:\", err)\n\t}\n\tvar reply2 Response\n\tRpcCall := client.Go(\"DIC3.InsertOrUpdate\", triplet, &reply2, nil)\n\treplyCall := <-RpcCall.Done\n\tif replyCall != nil {\n\t}\n\n\treply.Done = reply2.Done\n\treply.Err = reply2.Err\n\tfmt.Println(reply)\n\tclient.Close()\n\treturn nil\n\n}", "func updateForest(nnms []string) error {\n\tpnm := \"\" // The root has no parent\n\tfor _, nnm := range nnms {\n\t\t// Update our forest with the inferred hierarchy\n\t\tif ns, exists := forest[nnm]; exists {\n\t\t\tif ns.parent != pnm {\n\t\t\t\treturn fmt.Errorf(\"namespace %q has conflicting parents: %q and %q\\n\", nnm, ns.parent, pnm)\n\t\t\t}\n\t\t} else {\n\t\t\tns = &nsInfo{}\n\t\t\tns.parent = pnm\n\t\t\tforest[nnm] = ns\n\t\t}\n\n\t\t// Update the parent for the next iteration\n\t\tpnm = nnm\n\t}\n\treturn nil\n}", "func (s *mergeBaseSuite) TestIndependentWithRepeatedAncestors(c *C) {\n\trevs := []string{\"A\", \"A\", \"M\", \"M\", \"N\"}\n\texpectedRevs := []string{\"A\", \"N\"}\n\ts.AssertIndependents(c, revs, expectedRevs)\n}", "func TestMultipleRefTypeIssues(t *testing.T) {\n\tclassName := func(suffix string) string {\n\t\treturn \"MultiRefTypeBug\" + suffix\n\t}\n\tdefer deleteObjectClass(t, className(\"TargetOne\"))\n\tdefer deleteObjectClass(t, className(\"TargetTwo\"))\n\tdefer deleteObjectClass(t, className(\"Source\"))\n\n\tconst (\n\t\ttargetOneID strfmt.UUID = \"155c5914-6594-4cde-b3ab-f8570b561965\"\n\t\ttargetTwoID strfmt.UUID = \"ebf85a07-6b34-4e3b-b7c5-077f904fc955\"\n\t)\n\n\tt.Run(\"import schema\", func(t *testing.T) {\n\t\tcreateObjectClass(t, &models.Class{\n\t\t\tClass: className(\"TargetOne\"),\n\t\t\tProperties: []*models.Property{\n\t\t\t\t{\n\t\t\t\t\tName: \"name\",\n\t\t\t\t\tDataType: []string{\"text\"},\n\t\t\t\t},\n\t\t\t},\n\t\t})\n\n\t\tcreateObjectClass(t, &models.Class{\n\t\t\tClass: className(\"TargetTwo\"),\n\t\t\tProperties: []*models.Property{\n\t\t\t\t{\n\t\t\t\t\tName: \"name\",\n\t\t\t\t\tDataType: []string{\"text\"},\n\t\t\t\t},\n\t\t\t},\n\t\t})\n\n\t\tcreateObjectClass(t, &models.Class{\n\t\t\tClass: className(\"Source\"),\n\t\t\tProperties: []*models.Property{\n\t\t\t\t{\n\t\t\t\t\tName: \"name\",\n\t\t\t\t\tDataType: []string{\"text\"},\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tName: \"toTargets\",\n\t\t\t\t\tDataType: []string{className(\"TargetOne\"), className(\"TargetTwo\")},\n\t\t\t\t},\n\t\t\t},\n\t\t})\n\t})\n\n\tt.Run(\"import data\", func(t *testing.T) {\n\t\tcreateObject(t, &models.Object{\n\t\t\tClass: className(\"TargetOne\"),\n\t\t\tID: targetOneID,\n\t\t\tProperties: map[string]interface{}{\n\t\t\t\t\"name\": \"target a\",\n\t\t\t},\n\t\t})\n\n\t\tcreateObject(t, &models.Object{\n\t\t\tClass: className(\"TargetTwo\"),\n\t\t\tID: targetTwoID,\n\t\t\tProperties: map[string]interface{}{\n\t\t\t\t\"name\": \"target b\",\n\t\t\t},\n\t\t})\n\n\t\tcreateObject(t, &models.Object{\n\t\t\tClass: className(\"Source\"),\n\t\t\tProperties: map[string]interface{}{\n\t\t\t\t\"name\": \"source without refs\",\n\t\t\t},\n\t\t})\n\n\t\tcreateObject(t, &models.Object{\n\t\t\tClass: className(\"Source\"),\n\t\t\tProperties: map[string]interface{}{\n\t\t\t\t\"name\": \"source with ref to One\",\n\t\t\t\t\"toTargets\": []interface{}{\n\t\t\t\t\tmap[string]interface{}{\n\t\t\t\t\t\t\"beacon\": fmt.Sprintf(\"weaviate://localhost/%s\", targetOneID),\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t})\n\n\t\tcreateObject(t, &models.Object{\n\t\t\tClass: className(\"Source\"),\n\t\t\tProperties: map[string]interface{}{\n\t\t\t\t\"name\": \"source with ref to Two\",\n\t\t\t\t\"toTargets\": []interface{}{\n\t\t\t\t\tmap[string]interface{}{\n\t\t\t\t\t\t\"beacon\": fmt.Sprintf(\"weaviate://localhost/%s\", targetTwoID),\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t})\n\n\t\tcreateObject(t, &models.Object{\n\t\t\tClass: className(\"Source\"),\n\t\t\tProperties: map[string]interface{}{\n\t\t\t\t\"name\": \"source with ref to both\",\n\t\t\t\t\"toTargets\": []interface{}{\n\t\t\t\t\tmap[string]interface{}{\n\t\t\t\t\t\t\"beacon\": fmt.Sprintf(\"weaviate://localhost/%s\", targetOneID),\n\t\t\t\t\t},\n\t\t\t\t\tmap[string]interface{}{\n\t\t\t\t\t\t\"beacon\": fmt.Sprintf(\"weaviate://localhost/%s\", targetTwoID),\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t})\n\t})\n\n\tt.Run(\"verify different scenarios through GraphQL\", func(t *testing.T) {\n\t\tt.Run(\"requesting no references\", func(t *testing.T) {\n\t\t\tquery := fmt.Sprintf(`\n\t\t{\n\t\t\tGet {\n\t\t\t\t%s {\n\t\t\t\t\tname\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\t`, className(\"Source\"))\n\t\t\tresult := graphqlhelper.AssertGraphQL(t, helper.RootAuth, query)\n\t\t\tactual := result.Get(\"Get\", className(\"Source\")).AsSlice()\n\t\t\texpected := []interface{}{\n\t\t\t\tmap[string]interface{}{\"name\": \"source with ref to One\"},\n\t\t\t\tmap[string]interface{}{\"name\": \"source with ref to Two\"},\n\t\t\t\tmap[string]interface{}{\"name\": \"source with ref to both\"},\n\t\t\t\tmap[string]interface{}{\"name\": \"source without refs\"},\n\t\t\t}\n\n\t\t\tassert.ElementsMatch(t, expected, actual)\n\t\t})\n\n\t\tt.Run(\"requesting references of type One without additional { id }\", func(t *testing.T) {\n\t\t\tquery := fmt.Sprintf(`\n\t\t{\n\t\t\tGet {\n\t\t\t\t%s {\n\t\t\t\t\tname\n\t\t\t\t\ttoTargets {\n\t\t\t\t\t ... on %s {\n\t\t\t\t\t\t name\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\t`, className(\"Source\"), className(\"TargetOne\"))\n\t\t\tresult := graphqlhelper.AssertGraphQL(t, helper.RootAuth, query)\n\t\t\tactual := result.Get(\"Get\", className(\"Source\")).AsSlice()\n\t\t\texpected := []interface{}{\n\t\t\t\tmap[string]interface{}{\n\t\t\t\t\t\"name\": \"source with ref to One\",\n\t\t\t\t\t\"toTargets\": []interface{}{\n\t\t\t\t\t\tmap[string]interface{}{\n\t\t\t\t\t\t\t\"name\": \"target a\",\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tmap[string]interface{}{\n\t\t\t\t\t\"name\": \"source with ref to Two\",\n\t\t\t\t\t\"toTargets\": nil,\n\t\t\t\t},\n\t\t\t\tmap[string]interface{}{\n\t\t\t\t\t\"name\": \"source with ref to both\",\n\t\t\t\t\t\"toTargets\": []interface{}{\n\t\t\t\t\t\tmap[string]interface{}{\n\t\t\t\t\t\t\t\"name\": \"target a\",\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tmap[string]interface{}{\n\t\t\t\t\t\"name\": \"source without refs\",\n\t\t\t\t\t\"toTargets\": nil,\n\t\t\t\t},\n\t\t\t}\n\n\t\t\tassert.ElementsMatch(t, expected, actual)\n\t\t})\n\n\t\tt.Run(\"requesting references of type One with additional { id }\", func(t *testing.T) {\n\t\t\tquery := fmt.Sprintf(`\n\t\t{\n\t\t\tGet {\n\t\t\t\t%s {\n\t\t\t\t\tname\n\t\t\t\t\ttoTargets {\n\t\t\t\t\t ... on %s {\n\t\t\t\t\t\t name\n\t\t\t\t\t\t\t_additional { id }\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\t`, className(\"Source\"), className(\"TargetOne\"))\n\t\t\tresult := graphqlhelper.AssertGraphQL(t, helper.RootAuth, query)\n\t\t\tactual := result.Get(\"Get\", className(\"Source\")).AsSlice()\n\t\t\texpected := []interface{}{\n\t\t\t\tmap[string]interface{}{\n\t\t\t\t\t\"name\": \"source with ref to One\",\n\t\t\t\t\t\"toTargets\": []interface{}{\n\t\t\t\t\t\tmap[string]interface{}{\n\t\t\t\t\t\t\t\"name\": \"target a\",\n\t\t\t\t\t\t\t\"_additional\": map[string]interface{}{\n\t\t\t\t\t\t\t\t\"id\": targetOneID.String(),\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tmap[string]interface{}{\n\t\t\t\t\t\"name\": \"source with ref to Two\",\n\t\t\t\t\t\"toTargets\": nil,\n\t\t\t\t},\n\t\t\t\tmap[string]interface{}{\n\t\t\t\t\t\"name\": \"source with ref to both\",\n\t\t\t\t\t\"toTargets\": []interface{}{\n\t\t\t\t\t\tmap[string]interface{}{\n\t\t\t\t\t\t\t\"name\": \"target a\",\n\t\t\t\t\t\t\t\"_additional\": map[string]interface{}{\n\t\t\t\t\t\t\t\t\"id\": targetOneID.String(),\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tmap[string]interface{}{\n\t\t\t\t\t\"name\": \"source without refs\",\n\t\t\t\t\t\"toTargets\": nil,\n\t\t\t\t},\n\t\t\t}\n\n\t\t\tassert.ElementsMatch(t, expected, actual)\n\t\t})\n\n\t\tt.Run(\"requesting references of type Two without additional { id }\", func(t *testing.T) {\n\t\t\tquery := fmt.Sprintf(`\n\t\t{\n\t\t\tGet {\n\t\t\t\t%s {\n\t\t\t\t\tname\n\t\t\t\t\ttoTargets {\n\t\t\t\t\t ... on %s {\n\t\t\t\t\t\t name\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\t`, className(\"Source\"), className(\"TargetTwo\"))\n\t\t\tresult := graphqlhelper.AssertGraphQL(t, helper.RootAuth, query)\n\t\t\tactual := result.Get(\"Get\", className(\"Source\")).AsSlice()\n\t\t\texpected := []interface{}{\n\t\t\t\tmap[string]interface{}{\n\t\t\t\t\t\"name\": \"source with ref to Two\",\n\t\t\t\t\t\"toTargets\": []interface{}{\n\t\t\t\t\t\tmap[string]interface{}{\n\t\t\t\t\t\t\t\"name\": \"target b\",\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tmap[string]interface{}{\n\t\t\t\t\t\"name\": \"source with ref to One\",\n\t\t\t\t\t\"toTargets\": nil,\n\t\t\t\t},\n\t\t\t\tmap[string]interface{}{\n\t\t\t\t\t\"name\": \"source with ref to both\",\n\t\t\t\t\t\"toTargets\": []interface{}{\n\t\t\t\t\t\tmap[string]interface{}{\n\t\t\t\t\t\t\t\"name\": \"target b\",\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tmap[string]interface{}{\n\t\t\t\t\t\"name\": \"source without refs\",\n\t\t\t\t\t\"toTargets\": nil,\n\t\t\t\t},\n\t\t\t}\n\n\t\t\tassert.ElementsMatch(t, expected, actual)\n\t\t})\n\n\t\tt.Run(\"requesting references of type Two with additional { id }\", func(t *testing.T) {\n\t\t\tquery := fmt.Sprintf(`\n\t\t{\n\t\t\tGet {\n\t\t\t\t%s {\n\t\t\t\t\tname\n\t\t\t\t\ttoTargets {\n\t\t\t\t\t ... on %s {\n\t\t\t\t\t\t name\n\t\t\t\t\t\t\t_additional { id }\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\t`, className(\"Source\"), className(\"TargetTwo\"))\n\t\t\tresult := graphqlhelper.AssertGraphQL(t, helper.RootAuth, query)\n\t\t\tactual := result.Get(\"Get\", className(\"Source\")).AsSlice()\n\t\t\texpected := []interface{}{\n\t\t\t\tmap[string]interface{}{\n\t\t\t\t\t\"name\": \"source with ref to Two\",\n\t\t\t\t\t\"toTargets\": []interface{}{\n\t\t\t\t\t\tmap[string]interface{}{\n\t\t\t\t\t\t\t\"name\": \"target b\",\n\t\t\t\t\t\t\t\"_additional\": map[string]interface{}{\n\t\t\t\t\t\t\t\t\"id\": targetTwoID.String(),\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tmap[string]interface{}{\n\t\t\t\t\t\"name\": \"source with ref to One\",\n\t\t\t\t\t\"toTargets\": nil,\n\t\t\t\t},\n\t\t\t\tmap[string]interface{}{\n\t\t\t\t\t\"name\": \"source with ref to both\",\n\t\t\t\t\t\"toTargets\": []interface{}{\n\t\t\t\t\t\tmap[string]interface{}{\n\t\t\t\t\t\t\t\"name\": \"target b\",\n\t\t\t\t\t\t\t\"_additional\": map[string]interface{}{\n\t\t\t\t\t\t\t\t\"id\": targetTwoID.String(),\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tmap[string]interface{}{\n\t\t\t\t\t\"name\": \"source without refs\",\n\t\t\t\t\t\"toTargets\": nil,\n\t\t\t\t},\n\t\t\t}\n\n\t\t\tassert.ElementsMatch(t, expected, actual)\n\t\t})\n\n\t\tt.Run(\"requesting references of both types without additional { id }\",\n\t\t\tfunc(t *testing.T) {\n\t\t\t\tquery := fmt.Sprintf(`\n\t\t{\n\t\t\tGet {\n\t\t\t\t%s {\n\t\t\t\t\tname\n\t\t\t\t\ttoTargets {\n\t\t\t\t\t ... on %s {\n\t\t\t\t\t\t name\n\t\t\t\t\t\t}\n\t\t\t\t\t ... on %s {\n\t\t\t\t\t\t name\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\t`, className(\"Source\"), className(\"TargetOne\"), className(\"TargetTwo\"))\n\t\t\t\tresult := graphqlhelper.AssertGraphQL(t, helper.RootAuth, query)\n\t\t\t\tactual := result.Get(\"Get\", className(\"Source\")).AsSlice()\n\t\t\t\texpected := []interface{}{\n\t\t\t\t\tmap[string]interface{}{\n\t\t\t\t\t\t\"name\": \"source with ref to Two\",\n\t\t\t\t\t\t\"toTargets\": []interface{}{\n\t\t\t\t\t\t\tmap[string]interface{}{\n\t\t\t\t\t\t\t\t\"name\": \"target b\",\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\tmap[string]interface{}{\n\t\t\t\t\t\t\"name\": \"source with ref to One\",\n\t\t\t\t\t\t\"toTargets\": []interface{}{\n\t\t\t\t\t\t\tmap[string]interface{}{\n\t\t\t\t\t\t\t\t\"name\": \"target a\",\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\tmap[string]interface{}{\n\t\t\t\t\t\t\"name\": \"source with ref to both\",\n\t\t\t\t\t\t\"toTargets\": []interface{}{\n\t\t\t\t\t\t\tmap[string]interface{}{\n\t\t\t\t\t\t\t\t\"name\": \"target a\",\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\tmap[string]interface{}{\n\t\t\t\t\t\t\t\t\"name\": \"target b\",\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\tmap[string]interface{}{\n\t\t\t\t\t\t\"name\": \"source without refs\",\n\t\t\t\t\t\t\"toTargets\": nil,\n\t\t\t\t\t},\n\t\t\t\t}\n\n\t\t\t\tassert.ElementsMatch(t, expected, actual)\n\t\t\t})\n\n\t\tt.Run(\"requesting references of type Two with additional { id }\", func(t *testing.T) {\n\t\t\tquery := fmt.Sprintf(`\n\t\t{\n\t\t\tGet {\n\t\t\t\t%s {\n\t\t\t\t\tname\n\t\t\t\t\ttoTargets {\n\t\t\t\t\t ... on %s {\n\t\t\t\t\t\t name\n\t\t\t\t\t\t\t_additional { id }\n\t\t\t\t\t\t}\n\t\t\t\t\t ... on %s {\n\t\t\t\t\t\t name\n\t\t\t\t\t\t\t_additional { id }\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\t`, className(\"Source\"), className(\"TargetOne\"), className(\"TargetTwo\"))\n\t\t\tresult := graphqlhelper.AssertGraphQL(t, helper.RootAuth, query)\n\t\t\tactual := result.Get(\"Get\", className(\"Source\")).AsSlice()\n\t\t\texpected := []interface{}{\n\t\t\t\tmap[string]interface{}{\n\t\t\t\t\t\"name\": \"source with ref to Two\",\n\t\t\t\t\t\"toTargets\": []interface{}{\n\t\t\t\t\t\tmap[string]interface{}{\n\t\t\t\t\t\t\t\"name\": \"target b\",\n\t\t\t\t\t\t\t\"_additional\": map[string]interface{}{\n\t\t\t\t\t\t\t\t\"id\": targetTwoID.String(),\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tmap[string]interface{}{\n\t\t\t\t\t\"name\": \"source with ref to One\",\n\t\t\t\t\t\"toTargets\": []interface{}{\n\t\t\t\t\t\tmap[string]interface{}{\n\t\t\t\t\t\t\t\"name\": \"target a\",\n\t\t\t\t\t\t\t\"_additional\": map[string]interface{}{\n\t\t\t\t\t\t\t\t\"id\": targetOneID.String(),\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tmap[string]interface{}{\n\t\t\t\t\t\"name\": \"source with ref to both\",\n\t\t\t\t\t\"toTargets\": []interface{}{\n\t\t\t\t\t\tmap[string]interface{}{\n\t\t\t\t\t\t\t\"name\": \"target a\",\n\t\t\t\t\t\t\t\"_additional\": map[string]interface{}{\n\t\t\t\t\t\t\t\t\"id\": targetOneID.String(),\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tmap[string]interface{}{\n\t\t\t\t\t\t\t\"name\": \"target b\",\n\t\t\t\t\t\t\t\"_additional\": map[string]interface{}{\n\t\t\t\t\t\t\t\t\"id\": targetTwoID.String(),\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tmap[string]interface{}{\n\t\t\t\t\t\"name\": \"source without refs\",\n\t\t\t\t\t\"toTargets\": nil,\n\t\t\t\t},\n\t\t\t}\n\n\t\t\tassert.ElementsMatch(t, expected, actual)\n\t\t})\n\t})\n\n\tt.Run(\"cleanup\", func(t *testing.T) {\n\t\tdeleteObjectClass(t, className(\"Source\"))\n\t\tdeleteObjectClass(t, className(\"TargetOne\"))\n\t\tdeleteObjectClass(t, className(\"TargetTwo\"))\n\t})\n}", "func Merge(original interface{}, update interface{}) (merged interface{}, err error) {\n\n\tif original == nil {\n\t\treturn update, nil\n\t}\n\n\tif update == nil {\n\t\treturn original, nil\n\t}\n\n\t// call the recursive merge\n\treturn merge(original, update)\n}", "func (obj object) MergeWith(other Object, conflictResolver func(v1, v2 *Term) (*Term, bool)) (Object, bool) {\n\tresult := NewObject()\n\tstop := obj.Until(func(k, v *Term) bool {\n\t\tv2 := other.Get(k)\n\t\t// The key didn't exist in other, keep the original value\n\t\tif v2 == nil {\n\t\t\tresult.Insert(k, v)\n\t\t\treturn false\n\t\t}\n\n\t\t// The key exists in both, resolve the conflict if possible\n\t\tmerged, stop := conflictResolver(v, v2)\n\t\tif !stop {\n\t\t\tresult.Insert(k, merged)\n\t\t}\n\t\treturn stop\n\t})\n\n\tif stop {\n\t\treturn nil, false\n\t}\n\n\t// Copy in any values from other for keys that don't exist in obj\n\tother.Foreach(func(k, v *Term) {\n\t\tif v2 := obj.Get(k); v2 == nil {\n\t\t\tresult.Insert(k, v)\n\t\t}\n\t})\n\treturn result, true\n}", "func (me *TrieNode) insert(node *TrieNode, insert, update bool, prematchedBits uint) (newHead *TrieNode, err error) {\n\tdefer func() {\n\t\tif err == nil && newHead != nil {\n\t\t\tnode.size = 1\n\t\t\tnode.h = 1\n\t\t\tnode.isActive = true\n\t\t\tnewHead.setSize()\n\t\t}\n\t}()\n\n\tif me == nil {\n\t\tif !insert {\n\t\t\treturn me, fmt.Errorf(\"the key doesn't exist to update\")\n\t\t}\n\t\treturn node, nil\n\t}\n\n\t// Test containership both ways\n\ttrie_contains, node_contains, reversed, common, child := compare(&me.TrieKey, &node.TrieKey, prematchedBits)\n\tswitch {\n\tcase trie_contains && node_contains:\n\t\t// They have the same key\n\t\tif me.isActive && !update {\n\t\t\treturn me, fmt.Errorf(\"a node with that key already exists\")\n\t\t}\n\t\tif !me.isActive && !insert {\n\t\t\treturn me, fmt.Errorf(\"the key doesn't exist to update\")\n\t\t}\n\t\tnode.children = me.children\n\t\treturn node, nil\n\n\tcase trie_contains && !node_contains:\n\t\t// Trie node's key contains the new node's key. Insert it.\n\t\tnewChild, err := me.children[child].insert(node, insert, update, me.Length)\n\t\tif err == nil {\n\t\t\tme.children[child] = newChild\n\t\t}\n\t\treturn me, err\n\n\tcase !trie_contains && node_contains:\n\t\tif !insert {\n\t\t\treturn me, fmt.Errorf(\"the key doesn't exist to update\")\n\t\t}\n\t\t// New node's key contains the trie node's key. Insert new node as the parent of the trie.\n\t\tnode.children[child] = me\n\t\treturn node, nil\n\n\tdefault:\n\t\tif !insert {\n\t\t\treturn me, fmt.Errorf(\"the key doesn't exist to update\")\n\t\t}\n\t\t// Keys are disjoint. Create a new (inactive) parent node to join them side-by-side.\n\t\tvar children [2]*TrieNode\n\n\t\tif (child == 1) != reversed { // (child == 1) XOR reversed\n\t\t\tchildren[0], children[1] = me, node\n\t\t} else {\n\t\t\tchildren[0], children[1] = node, me\n\t\t}\n\n\t\tnumBytes := bitsToBytes(common)\n\t\tbits := make([]byte, numBytes)\n\t\tcopy(bits, me.Bits)\n\n\t\t// zero out the bits that are not in common in the last byte\n\t\tnumBits := common % 8\n\t\tif numBits != 0 {\n\t\t\tbits[numBytes-1] &= ^(byte(0xff) >> numBits)\n\t\t}\n\n\t\treturn &TrieNode{\n\t\t\tTrieKey: TrieKey{\n\t\t\t\tBits: bits,\n\t\t\t\tLength: common,\n\t\t\t},\n\t\t\tchildren: children,\n\t\t}, nil\n\t}\n}", "func getForkOldNewCommon(ctx context.Context, t *testing.T, chainStore *chain.Store, blockSource *th.TestFetcher, dstP *SyncerTestParams, a, b, c uint) (types.TipSet, types.TipSet, types.TipSet) {\n\t// Add a - 1 tipsets to the head of the chainStore.\n\trequireGrowChain(ctx, t, blockSource, chainStore, a, dstP)\n\tcommonAncestor := requireHeadTipset(t, chainStore)\n\n\tif c > 0 {\n\t\t// make the first fork tipset (need to do manually to set nonce)\n\t\tsigner, ki := types.NewMockSignersAndKeyInfo(1)\n\t\tminerWorker, err := ki[0].Address()\n\t\trequire.NoError(t, err)\n\t\tfakeChildParams := th.FakeChildParams{\n\t\t\tParent: commonAncestor,\n\t\t\tGenesisCid: dstP.genCid,\n\t\t\tSigner: signer,\n\t\t\tMinerWorker: minerWorker,\n\t\t\tStateRoot: dstP.genStateRoot,\n\t\t\tNonce: uint64(1),\n\t\t}\n\n\t\tfirstForkBlock := th.RequireMkFakeChild(t, fakeChildParams)\n\t\trequirePutBlocks(t, blockSource, firstForkBlock)\n\t\tfirstForkTs := th.RequireNewTipSet(t, firstForkBlock)\n\t\tfirstForkTsas := &chain.TipSetAndState{\n\t\t\tTipSet: firstForkTs,\n\t\t\tTipSetStateRoot: dstP.genStateRoot,\n\t\t}\n\t\trequire.NoError(t, chainStore.PutTipSetAndState(ctx, firstForkTsas))\n\t\terr = chainStore.SetHead(ctx, firstForkTs)\n\t\trequire.NoError(t, err)\n\n\t\t// grow the fork by (c - 1) blocks (c total)\n\t\trequireGrowChain(ctx, t, blockSource, chainStore, c-1, dstP)\n\t}\n\n\toldHead := requireHeadTipset(t, chainStore)\n\n\t// go back and complete the original chain\n\terr := chainStore.SetHead(ctx, commonAncestor)\n\trequire.NoError(t, err)\n\trequireGrowChain(ctx, t, blockSource, chainStore, b, dstP)\n\tnewHead := requireHeadTipset(t, chainStore)\n\n\treturn oldHead, newHead, commonAncestor\n}", "func Merge(base, overrides interface{}) error {\n\tbaseBytes, err := json.Marshal(base)\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"failed to convert current object to byte sequence\")\n\t}\n\n\toverrideBytes, err := json.Marshal(overrides)\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"failed to convert current object to byte sequence\")\n\t}\n\n\tpatchMeta, err := strategicpatch.NewPatchMetaFromStruct(base)\n\tif err != nil {\n\t\treturn errors.WrapIf(err, \"failed to produce patch meta from struct\")\n\t}\n\tpatch, err := strategicpatch.CreateThreeWayMergePatch(overrideBytes, overrideBytes, baseBytes, patchMeta, true)\n\tif err != nil {\n\t\treturn errors.WrapIf(err, \"failed to create three way merge patch\")\n\t}\n\n\tmerged, err := strategicpatch.StrategicMergePatchUsingLookupPatchMeta(baseBytes, patch, patchMeta)\n\tif err != nil {\n\t\treturn errors.WrapIf(err, \"failed to apply patch\")\n\t}\n\n\tvalueOfBase := reflect.Indirect(reflect.ValueOf(base))\n\tinto := reflect.New(valueOfBase.Type())\n\tif err := json.Unmarshal(merged, into.Interface()); err != nil {\n\t\treturn err\n\t}\n\tif !valueOfBase.CanSet() {\n\t\treturn errors.New(\"unable to set unmarshalled value into base object\")\n\t}\n\tvalueOfBase.Set(reflect.Indirect(into))\n\treturn nil\n}", "func TestMergePermanent(t *testing.T) {\n\ta := NewEvent(\"1970-01-01T00:00:00Z\", map[int64]interface{}{-1: int64(30), -2: \"foo\"})\n\tb := NewEvent(\"1970-01-01T00:00:00Z\", map[int64]interface{}{-1: 20, -10: \"bat\", 3: \"baz\"})\n\ta.MergePermanent(b)\n\tif a.Data[-1] != int64(30) || a.Data[-2] != \"foo\" || a.Data[-10] != nil || a.Data[3] != \"baz\" {\n\t\tt.Fatalf(\"Invalid permanent merge: %v\", a.Data)\n\t}\n}", "func merge(A map[string]interface{}, i interface{}) interface{} {\n\tswitch t := i.(type) {\n\tcase map[string]interface{}:\n\t\tfor k, v := range t {\n\t\t\tAk, ok := A[k]\n\t\t\tif !ok {\n\t\t\t\t// if A doesn't contain this key, initialise\n\t\t\t\tswitch v.(type) {\n\t\t\t\tcase map[string]interface{}:\n\t\t\t\t\t// if the value is a map, initialise a new map to be merged\n\t\t\t\t\tAk = make(map[string]interface{})\n\t\t\t\tdefault:\n\t\t\t\t\t// otherwise just merge into this A\n\t\t\t\t\tA[k] = v\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t}\n\t\t\tB, ok := Ak.(map[string]interface{})\n\t\t\tif !ok {\n\t\t\t\t// A[k] is not a map, so just overwrite\n\t\t\t\tA[k] = v\n\t\t\t}\n\t\t\t// if A contains the key, and the current value is also a map, recurse on that map\n\t\t\tA[k] = merge(B, v)\n\t\t}\n\t\treturn A\n\t}\n\t// if a leaf is reached, just return it\n\treturn i\n}", "func TestSwapNodesOnceDepth2(t *testing.T) {\n\n\tindexes := [][]int32{\n\t\t{2, 3},\n\t\t{4, 5},\n\t\t{-1, -1},\n\t\t{-1, -1},\n\t\t{-1, -1},\n\t}\n\n\tqueries := []int32{\n\t\t2,\n\t}\n\n\texpected := [][]int32{\n\t\t{5, 2, 4, 1, 3},\n\t}\n\n\tactual := swapNodes(indexes, queries)\n\tassert.Equal(t, expected, actual)\n}", "func (s *mergeBaseSuite) TestAncestorSame(c *C) {\n\trevs := []string{\"A\", \"A\"}\n\ts.AssertAncestor(c, revs, true)\n}", "func mergeNeo4jNdoes(node *nodeInfo, timeStamp string) string {\n\tvcsaNodesCmd := \"\"\n\tcypherNodesCmd := \"\"\n\n\tif node.VcsaURL != \"\" {\n\t\tvcsaNodesCmd = \", vcsa:'\" + node.VcsaURL + \"'\"\n\t}\n\n\tcypherNodesCmd = \"merge (\" + node.Label + \":\" + node.Label + \" {domainId:'\" + node.DomainID + \"', name:'\" + node.Name + \"'\" + vcsaNodesCmd + \"}) set \" + node.Label + \".time='\" + timeStamp + \"' \"\n\n\treturn cypherNodesCmd\n}", "func JSON(a, b []byte) (map[string]Type, error) {\n\tamap, err := pointerSet(a)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tbmap, err := pointerSet(b)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// Compute a - b and a ∩ b\n\trv := map[string]Type{}\n\tvar common []string\n\tfor v := range amap {\n\t\tif bmap[v] {\n\t\t\tcommon = append(common, v)\n\t\t} else {\n\t\t\trv[v] = MissingB\n\t\t}\n\t}\n\n\t// Compute b - a\n\tfor v := range bmap {\n\t\tif !amap[v] {\n\t\t\trv[v] = MissingA\n\t\t}\n\t}\n\n\t// Find only the longest paths of a ∩ b and verify they are\n\t// the same. e.g. if /x/y/z is different between a and b,\n\t// then only consider /x/y/z, not /x/y or /x or / or \"\"\n\tupstream := map[string]bool{}\n\tsort.Slice(common, func(i, j int) bool { return len(common[j]) < len(common[i]) })\n\tfor _, v := range common {\n\t\tif upstream[v] {\n\t\t\tcontinue\n\t\t}\n\t\tfor _, u := range upstreamPaths(v) {\n\t\t\tupstream[u] = true\n\t\t}\n\n\t\tvar aval, bval interface{}\n\t\tmust(jsonpointer.FindDecode(a, v, &aval))\n\t\tmust(jsonpointer.FindDecode(b, v, &bval))\n\t\tif !reflect.DeepEqual(aval, bval) {\n\t\t\trv[v] = DifferentValue\n\t\t}\n\t}\n\n\treturn rv, nil\n}", "func TestSyncOverlap(t *testing.T) {\n\tctx := context.Background()\n\tr := fstest.NewRun(t)\n\n\tsubRemoteName := r.FremoteName + \"/rclone-sync-test\"\n\tFremoteSync, err := fs.NewFs(ctx, subRemoteName)\n\trequire.NoError(t, err)\n\n\tcheckErr := func(err error) {\n\t\trequire.Error(t, err)\n\t\tassert.True(t, fserrors.IsFatalError(err))\n\t\tassert.Equal(t, fs.ErrorOverlapping.Error(), err.Error())\n\t}\n\n\tcheckErr(Sync(ctx, FremoteSync, r.Fremote, false))\n\tcheckErr(Sync(ctx, r.Fremote, FremoteSync, false))\n\tcheckErr(Sync(ctx, r.Fremote, r.Fremote, false))\n\tcheckErr(Sync(ctx, FremoteSync, FremoteSync, false))\n}", "func TestFetchOrderedTwoParents(t *testing.T) {\n\ttester := util.CreateComponentTester(t, model.CreateEmptyParameters())\n\tdefer tester.Clean()\n\n\trepParent2 := tester.CreateDir(\"parent2\")\n\trepParent1 := tester.CreateDir(\"parent1\")\n\trepDesc := tester.CreateDir(\"descriptor\")\n\n\tc1Rep := tester.CreateDir(\"comp1\")\n\tcomp1Content := `\nvars:\n key1: val1_comp1\n key2: val2_comp1\n key3: val3_comp1\n key4: val4_comp1\n key5: val5_comp1\n key6: val6_comp1\n key7: val7_comp1\n key8: val8_comp1\n key9: val9_comp1\n`\n\tc1Rep.WriteCommit(\"ekara.yaml\", comp1Content)\n\n\tc2Rep := tester.CreateDir(\"comp2\")\n\tcomp2Content := `\nvars:\n key2: val2_comp2\n key3: val3_comp2\n key4: val4_comp2\n key5: val5_comp2\n key6: val6_comp2\n key7: val7_comp2\n key8: val8_comp2\n key9: val9_comp2\n`\n\tc2Rep.WriteCommit(\"ekara.yaml\", comp2Content)\n\n\tparent2Content := `\nekara:\n components:\n comp2:\n repository: comp2\n comp1:\n repository: comp1\n\nvars:\n key3: val3_ekara2\n key4: val4_ekara2\n key5: val5_ekara2\n key6: val6_ekara2\n key7: val7_ekara2\n key8: val8_ekara2\n key9: val9_ekara2\n`\n\trepParent2.WriteCommit(\"ekara.yaml\", parent2Content)\n\n\tc4Rep := tester.CreateDir(\"comp4\")\n\tcomp4Content := `\nvars:\n key4: val4_comp4\n key5: val5_comp4\n key6: val6_comp4\n key7: val7_comp4\n key8: val8_comp4\n key9: val9_comp4\n`\n\tc4Rep.WriteCommit(\"ekara.yaml\", comp4Content)\n\n\tc5Rep := tester.CreateDir(\"comp5\")\n\tcomp5Content := `\nvars:\n key5: val5_comp5\n key6: val6_comp5\n key7: val7_comp5\n key8: val8_comp5\n key9: val9_comp5\n`\n\tc5Rep.WriteCommit(\"ekara.yaml\", comp5Content)\n\n\tparent1Content := `\nekara:\n parent:\n repository: parent2\n components:\n comp5:\n repository: comp5\n comp4:\n repository: comp4\n\nvars:\n key6: val6_ekara1\n key7: val7_ekara1\n key8: val8_ekara1\n key9: val9_ekara1\n`\n\trepParent1.WriteCommit(\"ekara.yaml\", parent1Content)\n\n\tc3Rep := tester.CreateDir(\"comp3\")\n\tcomp3Content := `\nvars:\n key7: val7_comp3\n key8: val8_comp3\n key9: val9_comp3\n`\n\tc3Rep.WriteCommit(\"ekara.yaml\", comp3Content)\n\n\tc6Rep := tester.CreateDir(\"comp6\")\n\tcomp6Content := `\nvars:\n key8: val8_comp6\n key9: val9_comp6\n`\n\tc6Rep.WriteCommit(\"ekara.yaml\", comp6Content)\n\n\tdescContent := `\nname: ekaraDemoVar\nqualifier: dev\n\nekara:\n parent:\n repository: parent1\n components:\n comp6:\n repository: comp6\n comp3:\n repository: comp3\n\nvars:\n key9: val9_main\n \norchestrator:\n component: comp1\nproviders:\n p1:\n component: comp2\n p2:\n component: comp4\n p3:\n component: comp3\n p4:\n component: comp5\n p5:\n component: comp6\nnodes:\n node1:\n instances: 1\n provider:\n name: p1\n node2:\n instances: 1\n provider:\n name: p2\n node3:\n instances: 1\n provider:\n name: p3\n node4:\n instances: 1\n provider:\n name: p4\n node5:\n instances: 1\n provider:\n name: p5\n`\n\trepDesc.WriteCommit(\"ekara.yaml\", descContent)\n\n\ttester.Init(repDesc.AsRepository(\"master\"))\n\tenv := tester.Env()\n\tassert.NotNil(t, env)\n\n\ttester.AssertComponentsExactly(model.MainComponentId, model.MainComponentId+model.ParentComponentSuffix, model.MainComponentId+model.ParentComponentSuffix+model.ParentComponentSuffix, \"comp1\", \"comp2\", \"comp3\", \"comp4\", \"comp5\", \"comp6\")\n\n\t// We need to fetch:\n\t//- first the components referenced by parent2\n\t//- then parent2 itself\n\t//- later the components referenced by parent1\n\t//- then parent1 itself\n\t//- later the components referenced by the main descriptor\n\t//- then main descriptor itself\n\tassert.Exactly(t, tester.ComponentManager().ComponentOrder(), []string{\"comp1\", \"comp2\", model.MainComponentId + model.ParentComponentSuffix + model.ParentComponentSuffix, \"comp4\", \"comp5\", model.MainComponentId + model.ParentComponentSuffix, \"comp3\", \"comp6\", model.MainComponentId})\n\n\t// Check that all vars have been accumulated\n\tassert.Equal(t, len(tester.TemplateContext().(*model.TemplateContext).Vars), 9)\n\n\ttester.AssertParam(tester.TemplateContext().(*model.TemplateContext).Vars, \"key1\", \"val1_comp1\")\n\ttester.AssertParam(tester.TemplateContext().(*model.TemplateContext).Vars, \"key2\", \"val2_comp2\")\n\ttester.AssertParam(tester.TemplateContext().(*model.TemplateContext).Vars, \"key3\", \"val3_ekara2\")\n\ttester.AssertParam(tester.TemplateContext().(*model.TemplateContext).Vars, \"key4\", \"val4_comp4\")\n\ttester.AssertParam(tester.TemplateContext().(*model.TemplateContext).Vars, \"key5\", \"val5_comp5\")\n\ttester.AssertParam(tester.TemplateContext().(*model.TemplateContext).Vars, \"key6\", \"val6_ekara1\")\n\ttester.AssertParam(tester.TemplateContext().(*model.TemplateContext).Vars, \"key7\", \"val7_comp3\")\n\ttester.AssertParam(tester.TemplateContext().(*model.TemplateContext).Vars, \"key8\", \"val8_comp6\")\n\ttester.AssertParam(tester.TemplateContext().(*model.TemplateContext).Vars, \"key9\", \"val9_main\")\n}", "func (mp *TxPool) txConflicts(tx *btcutil.Tx) map[chainhash.Hash]*btcutil.Tx {\n\tconflicts := make(map[chainhash.Hash]*btcutil.Tx)\n\tfor _, txIn := range tx.MsgTx().TxIn {\n\t\tconflict, ok := mp.outpoints[txIn.PreviousOutPoint]\n\t\tif !ok {\n\t\t\tcontinue\n\t\t}\n\t\tconflicts[*conflict.Hash()] = conflict\n\t\tfor hash, descendant := range mp.txDescendants(conflict, nil) {\n\t\t\tconflicts[hash] = descendant\n\t\t}\n\t}\n\treturn conflicts\n}", "func resolveConflict(ctx context.Context, localObject, remoteObject *GenericObject, local, remote Storage) *Change {\n\t// local object is older than remote object. We preserve this object\n\tif localObject.Modified.After(remoteObject.Modified) {\n\t\tfmt.Printf(\"We should add local [%s] to remote\\n\", remoteObject.ID)\n\t\treturn &Change{\n\t\t\tType: ChangeTypeSet,\n\t\t\tObject: localObject,\n\t\t\tStore: remote,\n\t\t\tSyncStatus: &SyncStatus{\n\t\t\t\tID: localObject.ID,\n\t\t\t\tLocalHash: localObject.Hash,\n\t\t\t\tRemoteHash: localObject.Hash,\n\t\t\t}}\n\t}\n\t// remote object is older than local object. Preserve older object.\n\tfmt.Printf(\"We should add remote [%s] to local\\n\", remoteObject.ID)\n\treturn &Change{\n\t\tType: ChangeTypeSet,\n\t\tObject: remoteObject,\n\t\tStore: local,\n\t\tSyncStatus: &SyncStatus{\n\t\t\tID: remoteObject.ID,\n\t\t\tLocalHash: remoteObject.Hash,\n\t\t\tRemoteHash: remoteObject.Hash,\n\t\t}}\n}", "func (r *Root) updateObjects(p Parent, obs *objects, wfs []web.Folder, wds []web.Document) error {\n\tlogger.Debug2(\"%v current children: %#v\", p.Name(), p.Children())\n\tobjects := new(objects)\n\tobjects.init(len(wfs) + len(wds))\n\tfor _, wf := range wfs {\n\t\tlfd := r.idCache[wf.FolderID]\n\t\tif lfd.Folder == nil {\n\t\t\tif lfd2, ok := r.missing[wf.FolderID]; ok {\n\t\t\t\tif lfd2.Folder != nil {\n\t\t\t\t\tlfd.Folder = lfd2.Folder\n\t\t\t\t\tlfd2.Folder = nil\n\t\t\t\t\tlfd2.updateMap(r.missing, wf.FolderID)\n\t\t\t\t}\n\t\t\t}\n\t\t\tlfd.Folder = newFolder(p, wf)\n\t\t}\n\t\toldEmbed := lfd.Folder.Folder.Embedded\n\t\tlfd.Folder.Folder = wf\n\t\tlfd.Folder.Folder.Embedded = oldEmbed\n\t\tobjects.add(lfd.Folder)\n\t\tr.idCache[wf.FolderID] = lfd\n\t}\n\tfor _, wd := range wds {\n\t\tlfd := r.idCache[wd.DocumentID]\n\t\tif lfd.Document == nil {\n\t\t\tif lfd2, ok := r.missing[wd.DocumentID]; ok {\n\t\t\t\tif lfd2.Document != nil {\n\t\t\t\t\tlfd.Document = lfd2.Document\n\t\t\t\t\tlfd2.Document = nil\n\t\t\t\t\tif lfd2.empty() {\n\t\t\t\t\t\tdelete(r.missing, wd.DocumentID)\n\t\t\t\t\t} else {\n\t\t\t\t\t\tr.missing[wd.DocumentID] = lfd2\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\tlfd.Document = &Document{Folder: p.(*Folder)}\n\t\t\t}\n\t\t}\n\t\tlfd.Document.Document = wd\n\t\tobjects.add(lfd.Document)\n\t\tr.idCache[wd.DocumentID] = lfd\n\t}\n\tfor _, c := range obs.Children() {\n\t\tid := c.ID()\n\t\tif _, ok := objects.ids[id]; !ok {\n\t\t\tlfd := r.missing[id]\n\t\t\tswitch c := c.(type) {\n\t\t\tcase *Document:\n\t\t\t\tlfd.Document = c\n\t\t\tcase *Folder:\n\t\t\t\tlfd.Folder = c\n\t\t\tdefault:\n\t\t\t\tpanic(errors.Errorf(\n\t\t\t\t\t\"invalid object type: %T\", c))\n\t\t\t}\n\t\t\tr.missing[id] = lfd\n\t\t}\n\t}\n\t*obs = *objects\n\tlogger.Debug2(\"%v new children: %#v\", p.Name(), p.Children())\n\treturn nil\n}", "func TestActiveReplicatorPushAndPullConflict(t *testing.T) {\n\n\t// scenarios\n\tconflictResolutionTests := []struct {\n\t\tname string\n\t\tlocalRevisionBody []byte\n\t\tlocalRevID string\n\t\tremoteRevisionBody []byte\n\t\tremoteRevID string\n\t\tcommonAncestorRevID string\n\t\tconflictResolver string\n\t\texpectedBody []byte\n\t\texpectedRevID string\n\t\texpectedTombstonedRevID string\n\t}{\n\t\t{\n\t\t\tname: \"remoteWins\",\n\t\t\tlocalRevisionBody: []byte(`{\"source\": \"local\"}`),\n\t\t\tlocalRevID: \"1-a\",\n\t\t\tremoteRevisionBody: []byte(`{\"source\": \"remote\"}`),\n\t\t\tremoteRevID: \"1-b\",\n\t\t\tconflictResolver: `function(conflict) {return conflict.RemoteDocument;}`,\n\t\t\texpectedBody: []byte(`{\"source\": \"remote\"}`),\n\t\t\texpectedRevID: \"1-b\",\n\t\t},\n\t\t{\n\t\t\tname: \"merge\",\n\t\t\tlocalRevisionBody: []byte(`{\"source\": \"local\"}`),\n\t\t\tlocalRevID: \"1-a\",\n\t\t\tremoteRevisionBody: []byte(`{\"source\": \"remote\"}`),\n\t\t\tremoteRevID: \"1-b\",\n\t\t\tconflictResolver: `function(conflict) {\n\t\t\t\t\t\t\tvar mergedDoc = new Object();\n\t\t\t\t\t\t\tmergedDoc.source = \"merged\";\n\t\t\t\t\t\t\treturn mergedDoc;\n\t\t\t\t\t\t}`,\n\t\t\texpectedBody: []byte(`{\"source\": \"merged\"}`),\n\t\t\texpectedRevID: db.CreateRevIDWithBytes(2, \"1-b\", []byte(`{\"source\":\"merged\"}`)), // rev for merged body, with parent 1-b\n\t\t},\n\t\t{\n\t\t\tname: \"localWins\",\n\t\t\tlocalRevisionBody: []byte(`{\"source\": \"local\"}`),\n\t\t\tlocalRevID: \"1-a\",\n\t\t\tremoteRevisionBody: []byte(`{\"source\": \"remote\"}`),\n\t\t\tremoteRevID: \"1-b\",\n\t\t\tconflictResolver: `function(conflict) {return conflict.LocalDocument;}`,\n\t\t\texpectedBody: []byte(`{\"source\": \"local\"}`),\n\t\t\texpectedRevID: db.CreateRevIDWithBytes(2, \"1-b\", []byte(`{\"source\":\"local\"}`)), // rev for local body, transposed under parent 1-b\n\t\t},\n\t\t{\n\t\t\tname: \"localWinsRemoteTombstone\",\n\t\t\tlocalRevisionBody: []byte(`{\"source\": \"local\"}`),\n\t\t\tlocalRevID: \"2-a\",\n\t\t\tremoteRevisionBody: []byte(`{\"_deleted\": true}`),\n\t\t\tremoteRevID: \"2-b\",\n\t\t\tcommonAncestorRevID: \"1-a\",\n\t\t\tconflictResolver: `function(conflict) {return conflict.LocalDocument;}`,\n\t\t\texpectedBody: []byte(`{\"source\": \"local\"}`),\n\t\t\texpectedRevID: db.CreateRevIDWithBytes(3, \"2-b\", []byte(`{\"source\":\"local\"}`)), // rev for local body, transposed under parent 2-b\n\t\t},\n\t}\n\n\tfor _, test := range conflictResolutionTests {\n\t\tt.Run(test.name, func(t *testing.T) {\n\t\t\tbase.RequireNumTestBuckets(t, 2)\n\t\t\tbase.SetUpTestLogging(t, logger.LevelInfo, logger.KeyHTTP, logger.KeySync, logger.KeyChanges, logger.KeyCRUD)\n\n\t\t\t// Passive\n\t\t\trt2 := NewRestTester(t, &RestTesterConfig{\n\t\t\t\tTestBucket: base.GetTestBucket(t),\n\t\t\t\tDatabaseConfig: &DatabaseConfig{DbConfig: DbConfig{\n\t\t\t\t\tUsers: map[string]*db.PrincipalConfig{\n\t\t\t\t\t\t\"alice\": {\n\t\t\t\t\t\t\tPassword: base.StringPtr(\"pass\"),\n\t\t\t\t\t\t\tExplicitChannels: utils.SetOf(\"*\"),\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t}},\n\t\t\t})\n\t\t\tdefer rt2.Close()\n\n\t\t\tvar localRevisionBody db.Body\n\t\t\tassert.NoError(t, json.Unmarshal(test.localRevisionBody, &localRevisionBody))\n\n\t\t\tvar remoteRevisionBody db.Body\n\t\t\tassert.NoError(t, json.Unmarshal(test.remoteRevisionBody, &remoteRevisionBody))\n\n\t\t\tvar expectedLocalBody db.Body\n\t\t\tassert.NoError(t, json.Unmarshal(test.expectedBody, &expectedLocalBody))\n\n\t\t\t// Create revision on rt2 (remote)\n\t\t\tdocID := test.name\n\n\t\t\tif test.commonAncestorRevID != \"\" {\n\t\t\t\tresp, err := rt2.PutDocumentWithRevID(docID, test.commonAncestorRevID, \"\", remoteRevisionBody)\n\t\t\t\tassert.NoError(t, err)\n\t\t\t\tassertStatus(t, resp, http.StatusCreated)\n\t\t\t\trt2revID := respRevID(t, resp)\n\t\t\t\tassert.Equal(t, test.commonAncestorRevID, rt2revID)\n\t\t\t}\n\n\t\t\tresp, err := rt2.PutDocumentWithRevID(docID, test.remoteRevID, test.commonAncestorRevID, remoteRevisionBody)\n\t\t\tassert.NoError(t, err)\n\t\t\tassertStatus(t, resp, http.StatusCreated)\n\t\t\trt2revID := respRevID(t, resp)\n\t\t\tassert.Equal(t, test.remoteRevID, rt2revID)\n\n\t\t\tremoteDoc, err := rt2.GetDatabase().GetDocument(logger.TestCtx(t), docID, db.DocUnmarshalSync)\n\t\t\trequire.NoError(t, err)\n\n\t\t\t// Make rt2 listen on an actual HTTP port, so it can receive the blipsync request from rt1.\n\t\t\tsrv := httptest.NewServer(rt2.TestPublicHandler())\n\t\t\tdefer srv.Close()\n\n\t\t\tpassiveDBURL, err := url.Parse(srv.URL + \"/db\")\n\t\t\trequire.NoError(t, err)\n\n\t\t\t// Add basic auth creds to target db URL\n\t\t\tpassiveDBURL.User = url.UserPassword(\"alice\", \"pass\")\n\n\t\t\t// Active\n\t\t\trt1 := NewRestTester(t, &RestTesterConfig{\n\t\t\t\tTestBucket: base.GetTestBucket(t),\n\t\t\t})\n\t\t\tdefer rt1.Close()\n\n\t\t\t// Create revision on rt1 (local)\n\t\t\tif test.commonAncestorRevID != \"\" {\n\t\t\t\tresp, err = rt1.PutDocumentWithRevID(docID, test.commonAncestorRevID, \"\", localRevisionBody)\n\t\t\t\tassert.NoError(t, err)\n\t\t\t\tassertStatus(t, resp, http.StatusCreated)\n\t\t\t\trt1revID := respRevID(t, resp)\n\t\t\t\tassert.Equal(t, test.commonAncestorRevID, rt1revID)\n\t\t\t}\n\n\t\t\tresp, err = rt1.PutDocumentWithRevID(docID, test.localRevID, test.commonAncestorRevID, localRevisionBody)\n\t\t\tassert.NoError(t, err)\n\t\t\tassertStatus(t, resp, http.StatusCreated)\n\t\t\trt1revID := respRevID(t, resp)\n\t\t\tassert.Equal(t, test.localRevID, rt1revID)\n\n\t\t\tlocalDoc, err := rt1.GetDatabase().GetDocument(logger.TestCtx(t), docID, db.DocUnmarshalSync)\n\t\t\trequire.NoError(t, err)\n\n\t\t\tcustomConflictResolver, err := db.NewCustomConflictResolver(test.conflictResolver)\n\t\t\trequire.NoError(t, err)\n\t\t\tar := db.NewActiveReplicator(&db.ActiveReplicatorConfig{\n\t\t\t\tID: t.Name(),\n\t\t\t\tDirection: db.ActiveReplicatorTypePushAndPull,\n\t\t\t\tRemoteDBURL: passiveDBURL,\n\t\t\t\tActiveDB: &db.Database{\n\t\t\t\t\tDatabaseContext: rt1.GetDatabase(),\n\t\t\t\t},\n\t\t\t\tChangesBatchSize: 200,\n\t\t\t\tConflictResolverFunc: customConflictResolver,\n\t\t\t\tContinuous: true,\n\t\t\t\tReplicationStatsMap: base.SyncGatewayStats.NewDBStats(t.Name(), false, false, false).DBReplicatorStats(t.Name()),\n\t\t\t})\n\t\t\tdefer func() { assert.NoError(t, ar.Stop()) }()\n\n\t\t\t// Start the replicator (implicit connect)\n\t\t\tassert.NoError(t, ar.Start())\n\t\t\t// wait for the document originally written to rt2 to arrive at rt1. Should end up as winner under default conflict resolution\n\t\t\tbase.WaitForStat(func() int64 {\n\t\t\t\treturn ar.GetStatus().DocsWritten\n\t\t\t}, 1)\n\t\t\tlog.Printf(\"========================Replication should be done, checking with changes\")\n\n\t\t\t// Validate results on the local (rt1)\n\t\t\tchangesResults, err := rt1.WaitForChanges(1, fmt.Sprintf(\"/db/_changes?since=%d\", localDoc.Sequence), \"\", true)\n\t\t\trequire.NoError(t, err)\n\t\t\trequire.Len(t, changesResults.Results, 1)\n\t\t\tassert.Equal(t, docID, changesResults.Results[0].ID)\n\t\t\tassert.Equal(t, test.expectedRevID, changesResults.Results[0].Changes[0][\"rev\"])\n\t\t\tlog.Printf(\"Changes response is %+v\", changesResults)\n\n\t\t\trawDocResponse := rt1.SendAdminRequest(http.MethodGet, \"/db/_raw/\"+docID, \"\")\n\t\t\tlog.Printf(\"Raw response: %s\", rawDocResponse.Body.Bytes())\n\n\t\t\tdocResponse := rt1.SendAdminRequest(http.MethodGet, \"/db/\"+docID, \"\")\n\t\t\tlog.Printf(\"Non-raw response: %s\", docResponse.Body.Bytes())\n\n\t\t\tdoc, err := rt1.GetDatabase().GetDocument(logger.TestCtx(t), docID, db.DocUnmarshalAll)\n\t\t\trequire.NoError(t, err)\n\t\t\tassert.Equal(t, test.expectedRevID, doc.SyncData.CurrentRev)\n\t\t\tassert.Equal(t, expectedLocalBody, doc.Body())\n\t\t\tlog.Printf(\"Doc %s is %+v\", docID, doc)\n\t\t\tlog.Printf(\"Doc %s attachments are %+v\", docID, doc.Attachments)\n\t\t\tfor revID, revInfo := range doc.SyncData.History {\n\t\t\t\tlog.Printf(\"doc revision [%s]: %+v\", revID, revInfo)\n\t\t\t}\n\n\t\t\t// Validate only one active leaf node remains after conflict resolution, and that all parents\n\t\t\t// of leaves have empty bodies\n\t\t\tactiveCount := 0\n\t\t\tfor _, revID := range doc.SyncData.History.GetLeaves() {\n\t\t\t\trevInfo, ok := doc.SyncData.History[revID]\n\t\t\t\trequire.True(t, ok)\n\t\t\t\tif !revInfo.Deleted {\n\t\t\t\t\tactiveCount++\n\t\t\t\t}\n\t\t\t\tif revInfo.Parent != \"\" {\n\t\t\t\t\tparentRevInfo, ok := doc.SyncData.History[revInfo.Parent]\n\t\t\t\t\trequire.True(t, ok)\n\t\t\t\t\tassert.True(t, parentRevInfo.Body == nil)\n\t\t\t\t}\n\t\t\t}\n\t\t\tassert.Equal(t, 1, activeCount)\n\n\t\t\t// Validate results on the remote (rt2)\n\t\t\trt2Since := remoteDoc.Sequence\n\t\t\tif test.expectedRevID == test.remoteRevID {\n\t\t\t\t// no changes should have been pushed back up to rt2, because this rev won.\n\t\t\t\trt2Since = 0\n\t\t\t}\n\t\t\tchangesResults, err = rt2.WaitForChanges(1, fmt.Sprintf(\"/db/_changes?since=%d\", rt2Since), \"\", true)\n\t\t\trequire.NoError(t, err)\n\t\t\trequire.Len(t, changesResults.Results, 1)\n\t\t\tassert.Equal(t, docID, changesResults.Results[0].ID)\n\t\t\tassert.Equal(t, test.expectedRevID, changesResults.Results[0].Changes[0][\"rev\"])\n\t\t\tlog.Printf(\"Changes response is %+v\", changesResults)\n\n\t\t\tdoc, err = rt2.GetDatabase().GetDocument(logger.TestCtx(t), docID, db.DocUnmarshalAll)\n\t\t\trequire.NoError(t, err)\n\t\t\tassert.Equal(t, test.expectedRevID, doc.SyncData.CurrentRev)\n\t\t\tassert.Equal(t, expectedLocalBody, doc.Body())\n\t\t\tlog.Printf(\"Remote Doc %s is %+v\", docID, doc)\n\t\t\tlog.Printf(\"Remote Doc %s attachments are %+v\", docID, doc.Attachments)\n\t\t\tfor revID, revInfo := range doc.SyncData.History {\n\t\t\t\tlog.Printf(\"doc revision [%s]: %+v\", revID, revInfo)\n\t\t\t}\n\n\t\t\t// Validate only one active leaf node remains after conflict resolution, and that all parents\n\t\t\t// of leaves have empty bodies\n\t\t\tactiveCount = 0\n\t\t\tfor _, revID := range doc.SyncData.History.GetLeaves() {\n\t\t\t\trevInfo, ok := doc.SyncData.History[revID]\n\t\t\t\trequire.True(t, ok)\n\t\t\t\tif !revInfo.Deleted {\n\t\t\t\t\tactiveCount++\n\t\t\t\t}\n\t\t\t\tif revInfo.Parent != \"\" {\n\t\t\t\t\tparentRevInfo, ok := doc.SyncData.History[revInfo.Parent]\n\t\t\t\t\trequire.True(t, ok)\n\t\t\t\t\tassert.True(t, parentRevInfo.Body == nil)\n\t\t\t\t}\n\t\t\t}\n\t\t\tassert.Equal(t, 1, activeCount)\n\t\t})\n\t}\n}", "func (uc *userConfig) IfConflict(maxReplicas int) bool {\n\tret := false\n\t// move_leaders\n\tfor i, l1 := range uc.cfg.Leaders.Leader {\n\t\tfor j, l2 := range uc.cfg.Leaders.Leader {\n\t\t\tif i < j {\n\t\t\t\tif (l1.KeyStart <= l2.KeyStart && l1.KeyEnd > l2.KeyStart) ||\n\t\t\t\t\t(l2.KeyStart <= l1.KeyStart && l2.KeyEnd > l1.KeyStart) {\n\t\t\t\t\tif ((l1.StartTime.Before(l2.StartTime) || l1.StartTime.Equal(l2.StartTime)) && \n\t\t\t\t\t\t\tl1.EndTime.After(l2.StartTime)) || \n\t\t\t\t\t\t((l2.StartTime.Before(l1.StartTime) || l2.StartTime.Equal(l1.StartTime)) && \n\t\t\t\t\t\t\tl2.EndTime.After(l1.StartTime)) {\n\t\t\t\t\t\tlog.Error(\"Key Range Conflict\", zap.Ints(\"Config Move-Leader Nums\", []int{i, j}))\n\t\t\t\t\t\tret = true\n\t\t\t\t\t}\n\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\t// move_regions\n\tfor i, r1 := range uc.cfg.Regions.Region {\n\t\tfor j, r2 := range uc.cfg.Regions.Region {\n\t\t\tif i < j {\n\t\t\t\tif (r1.KeyStart <= r2.KeyStart && r1.KeyEnd > r2.KeyStart) ||\n\t\t\t\t\t(r2.KeyStart <= r1.KeyStart && r2.KeyEnd > r1.KeyStart) {\n\t\t\t\t\tif ((r1.StartTime.Before(r2.StartTime) || r1.StartTime.Equal(r2.StartTime)) &&\n\t\t\t\t\t\t\tr1.EndTime.After(r2.StartTime)) ||\n\t\t\t\t\t\t((r2.StartTime.Before(r1.StartTime) || r2.StartTime.Equal(r1.StartTime)) &&\n\t\t\t\t\t\t\tr2.EndTime.After(r1.StartTime)) {\n\t\t\t\t\t\tlog.Error(\"Key Range Conflict\", zap.Ints(\"Config Move-Region Nums\", []int{i, j}))\n\t\t\t\t\t\tret = true\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\t// store nums > max replicas\n\tfor i, r := range uc.cfg.Regions.Region {\n\t\tif len(r.Stores) > maxReplicas {\n\t\t\tlog.Error(\"the number of stores is beyond the max replicas\", zap.Int(\"Config Move-Region Nums\", i))\n\t\t\tret = true\n\t\t}\n\t}\n\treturn ret\n}", "func (c *Client) handleConflictingHeaders(\n\tctx context.Context,\n\tprimaryTrace []*types.LightBlock,\n\tchallendingBlock *types.LightBlock,\n\twitnessIndex int,\n\tnow time.Time,\n) error {\n\tsupportingWitness := c.witnesses[witnessIndex]\n\twitnessTrace, primaryBlock, err := c.examineConflictingHeaderAgainstTrace(\n\t\tctx,\n\t\tprimaryTrace,\n\t\tchallendingBlock,\n\t\tsupportingWitness,\n\t\tnow,\n\t)\n\tif err != nil {\n\t\tc.logger.Info(\"error validating witness's divergent header\", \"witness\", supportingWitness, \"err\", err)\n\t\treturn nil\n\t}\n\n\t// We are suspecting that the primary is faulty, hence we hold the witness as the source of truth\n\t// and generate evidence against the primary that we can send to the witness\n\tcommonBlock, trustedBlock := witnessTrace[0], witnessTrace[len(witnessTrace)-1]\n\tevidenceAgainstPrimary := newLightClientAttackEvidence(primaryBlock, trustedBlock, commonBlock)\n\tc.logger.Error(\"ATTEMPTED ATTACK DETECTED. Sending evidence againt primary by witness\", \"ev\", evidenceAgainstPrimary,\n\t\t\"primary\", c.primary, \"witness\", supportingWitness)\n\tc.sendEvidence(ctx, evidenceAgainstPrimary, supportingWitness)\n\n\tif primaryBlock.Commit.Round != witnessTrace[len(witnessTrace)-1].Commit.Round {\n\t\tc.logger.Info(\"The light client has detected, and prevented, an attempted amnesia attack.\" +\n\t\t\t\" We think this attack is pretty unlikely, so if you see it, that's interesting to us.\" +\n\t\t\t\" Can you let us know by opening an issue through https://github.com/tendermint/tendermint/issues/new?\")\n\t}\n\n\t// This may not be valid because the witness itself is at fault. So now we reverse it, examining the\n\t// trace provided by the witness and holding the primary as the source of truth. Note: primary may not\n\t// respond but this is okay as we will halt anyway.\n\tprimaryTrace, witnessBlock, err := c.examineConflictingHeaderAgainstTrace(\n\t\tctx,\n\t\twitnessTrace,\n\t\tprimaryBlock,\n\t\tc.primary,\n\t\tnow,\n\t)\n\tif err != nil {\n\t\tc.logger.Info(\"Error validating primary's divergent header\", \"primary\", c.primary, \"err\", err)\n\t\treturn ErrLightClientAttack\n\t}\n\n\t// We now use the primary trace to create evidence against the witness and send it to the primary\n\tcommonBlock, trustedBlock = primaryTrace[0], primaryTrace[len(primaryTrace)-1]\n\tevidenceAgainstWitness := newLightClientAttackEvidence(witnessBlock, trustedBlock, commonBlock)\n\tc.logger.Error(\"Sending evidence against witness by primary\", \"ev\", evidenceAgainstWitness,\n\t\t\"primary\", c.primary, \"witness\", supportingWitness)\n\tc.sendEvidence(ctx, evidenceAgainstWitness, c.primary)\n\t// We return the error and don't process anymore witnesses\n\treturn ErrLightClientAttack\n}", "func mergeMaps(orig, fixed *goyaml.Node) *goyaml.Node {\n\tmerged := shallowCopyNode(orig)\n\torigContent := orig.Content\n\tfixedContent := fixed.Content\n\n\t// Drop items from original if they are not in fixed\n\tfor i := 0; i < len(origContent); i += 2 {\n\t\torigKey := origContent[i]\n\t\tif isKeyInMap(origKey, fixed) {\n\t\t\torigVal := origContent[i+1]\n\t\t\tmerged.Content = append(merged.Content, origKey, origVal)\n\t\t}\n\t}\n\n\t// Update or add items from the fixed yaml which are not in the original\n\tfor i := 0; i < len(fixedContent); i += 2 {\n\t\tfixedKey := fixedContent[i]\n\t\tfixedVal := fixedContent[i+1]\n\t\tif mergedKeyIndex := findKeyInMap(fixedKey, merged); mergedKeyIndex == -1 {\n\t\t\t// Add item\n\t\t\tmerged.Content = append(merged.Content, fixedKey, fixedVal)\n\t\t} else {\n\t\t\t// Update item\n\t\t\tmergedValIndex := mergedKeyIndex + 1\n\t\t\tmergedVal := merged.Content[mergedValIndex]\n\n\t\t\tif fixedVal.Kind != mergedVal.Kind {\n\t\t\t\tmerged.Content[mergedValIndex] = fixedVal\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tswitch fixedVal.Kind {\n\t\t\tcase goyaml.ScalarNode:\n\t\t\t\tmerged.Content[mergedValIndex].Value = fixedVal.Value\n\t\t\tcase goyaml.MappingNode:\n\t\t\t\tmerged.Content[mergedValIndex] = mergeMaps(mergedVal, fixedVal)\n\t\t\tcase goyaml.SequenceNode:\n\t\t\t\tmerged.Content[mergedValIndex] = mergeSequences(fixedKey.Value, mergedVal, fixedVal)\n\t\t\tdefault:\n\t\t\t\tlog.Error(\"Unexpected yaml node kind\", fixedVal.Kind)\n\t\t\t}\n\t\t}\n\t}\n\n\treturn merged\n}", "func (rnode *RuleNode) HasConflict() bool {\n\treturn false // TODO\n}", "func checkMergeAndUpdate(c *Client, opt ReadTreeOptions, origidx map[IndexPath]*IndexEntry, newidx *Index, resetremovals []File) error {\n\tsparsePatterns := parseSparsePatterns(c, &opt)\n\tif !opt.NoSparseCheckout {\n\t\tleavesFile := false\n\t\tnewSparse := false\n\t\tfor _, entry := range newidx.Objects {\n\t\t\tif !checkSparseMatches(c, opt, entry.PathName, sparsePatterns) {\n\t\t\t\tif orig, ok := origidx[entry.PathName]; ok && !orig.SkipWorktree() {\n\t\t\t\t\tnewSparse = true\n\t\t\t\t}\n\t\t\t\tentry.SetSkipWorktree(true)\n\t\t\t\tif newidx.Version <= 2 {\n\t\t\t\t\tnewidx.Version = 3\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\tleavesFile = true\n\t\t\t}\n\t\t}\n\t\tfor _, entry := range origidx {\n\t\t\tif checkSparseMatches(c, opt, entry.PathName, sparsePatterns) {\n\t\t\t\t// This isn't necessarily true, but if it is we don't error out in\n\t\t\t\t// order to make let make the git test t1011.19 pass.\n\t\t\t\t//\n\t\t\t\t// t1011-read-tree-sparse-checkout works in mysterious ways.\n\t\t\t\tleavesFile = true\n\t\t\t}\n\t\t}\n\t\tif !leavesFile && newSparse {\n\t\t\treturn fmt.Errorf(\"Sparse checkout would leave no file in work tree\")\n\t\t}\n\t}\n\n\t// Check for invalid path names which are disallowed by git\n\tdisallow := func(path, piece string) bool {\n\t\tcasesensitive := true\n\t\tif protectHFS(c) {\n\t\t\t// HFS is case insensitive and ignores zero width\n\t\t\t// non-joiners anywhere in the file path\n\t\t\tcasesensitive = false\n\t\t\tpath = strings.Replace(path, \"\\u200c\", \"\", -1)\n\t\t\tpath = strings.TrimSpace(path)\n\t\t}\n\t\tif protectNTFS(c) {\n\t\t\t// git treats \"protectNTFS\" as \"protect the filesystem for\n\t\t\t// windows\", which means it also inherits weird dos filename\n\t\t\t// restrictions such as 8.3 length filenames and ~ and\n\t\t\t// no dots at the end of a path because that would just be\n\t\t\t// an empty file extension.\n\t\t\tcasesensitive = false\n\n\t\t\tre := regexp.MustCompile(`(?i)(^|\\\\|/)git~[0-9]+($|\\\\|/)`)\n\t\t\tif re.MatchString(path) {\n\t\t\t\t// Handle the case where the \".\" was removed and\n\t\t\t\t// we're left with nothing, or where the the .git\n\t\t\t\t// directory is 8.3 encoded.\n\t\t\t\treturn true\n\t\t\t}\n\t\t\t// Handle space or dots at end of a filename and backslashes\n\t\t\tre = regexp.MustCompile(`(?i)(^|\\\\|/)\\.git( |[.])*($|\\\\|/)`)\n\t\t\tif re.MatchString(path) {\n\t\t\t\treturn true\n\t\t\t}\n\t\t}\n\n\t\tif !casesensitive {\n\t\t\tpath = strings.ToLower(path)\n\t\t}\n\n\t\tif path == piece {\n\t\t\treturn true\n\t\t}\n\t\tif strings.HasSuffix(path, \"/\"+piece) || strings.HasPrefix(path, piece+\"/\") {\n\t\t\treturn true\n\t\t}\n\t\tif strings.Index(path, \"/\"+piece+\"/\") != -1 {\n\t\t\treturn true\n\t\t}\n\t\treturn false\n\t}\n\tfor _, entry := range newidx.Objects {\n\t\tpath := entry.PathName.String()\n\n\t\tif disallow(path, \".\") || disallow(path, \"..\") || disallow(path, \".git\") {\n\t\t\treturn fmt.Errorf(\"Invalid path %v\", path)\n\t\t}\n\t}\n\t// Keep a list of index entries to be updated by CheckoutIndex.\n\tfiles := make([]File, 0, len(newidx.Objects))\n\n\tif opt.Merge || opt.Reset || opt.Update {\n\t\t// Verify that merge won't overwrite anything that's been modified locally.\n\t\tfor _, entry := range newidx.Objects {\n\t\t\tf, err := entry.PathName.FilePath(c)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\tif opt.Update && f.IsDir() {\n\t\t\t\tuntracked, err := LsFiles(c, LsFilesOptions{Others: true, Modified: true}, []File{f})\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\tif len(untracked) > 0 {\n\t\t\t\t\treturn fmt.Errorf(\"error: Updating '%s%s' would lose untracked files in it\", c.SuperPrefix, entry.PathName)\n\t\t\t\t}\n\t\t\t}\n\t\t\tif entry.Stage() != Stage0 {\n\t\t\t\t// Don't check unmerged entries. One will always\n\t\t\t\t// conflict, which means that -u won't work\n\t\t\t\t// if we check them.\n\t\t\t\t// (We also don't add them to files, so they won't\n\t\t\t\t// make it to checkoutindex\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif entry.SkipWorktree() {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif opt.Update && !f.Exists() {\n\t\t\t\t// It doesn't exist on the filesystem, so it should be checked out.\n\t\t\t\tfiles = append(files, f)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\torig, ok := origidx[entry.PathName]\n\t\t\tif !ok {\n\t\t\t\t// If it wasn't in the original index, make sure\n\t\t\t\t// we check it out after verifying there's not\n\t\t\t\t// already something there.\n\t\t\t\tif opt.Update && f.Exists() {\n\t\t\t\t\tlsopts := LsFilesOptions{Others: true}\n\t\t\t\t\tif opt.ExcludePerDirectory != \"\" {\n\t\t\t\t\t\tlsopts.ExcludePerDirectory = []File{File(opt.ExcludePerDirectory)}\n\t\t\t\t\t}\n\t\t\t\t\tuntracked, err := LsFiles(c, lsopts, []File{f})\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\treturn err\n\t\t\t\t\t}\n\t\t\t\t\tif len(untracked) > 0 {\n\t\t\t\t\t\tif !entry.PathName.IsClean(c, entry.Sha1) {\n\t\t\t\t\t\t\treturn fmt.Errorf(\"Untracked working tree file '%v' would be overwritten by merge\", f)\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\tfile, err := entry.PathName.FilePath(c)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\tfiles = append(files, file)\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tif orig.Sha1 == entry.Sha1 {\n\t\t\t\t// Nothing was modified, so don't bother checking\n\t\t\t\t// anything out\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif entry.PathName.IsClean(c, orig.Sha1) {\n\t\t\t\t// it hasn't been modified locally, so we want to\n\t\t\t\t// make sure the newidx version is checked out.\n\t\t\t\tfile, err := entry.PathName.FilePath(c)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\tfiles = append(files, file)\n\t\t\t\tcontinue\n\t\t\t} else {\n\t\t\t\t// There are local unmodified changes on the filesystem\n\t\t\t\t// from the original that would be lost by -u, so return\n\t\t\t\t// an error unless --reset is specified.\n\t\t\t\tif !opt.Reset {\n\t\t\t\t\treturn fmt.Errorf(\"%s has local changes. Can not merge.\", entry.PathName)\n\t\t\t\t} else {\n\t\t\t\t\t// with --reset, checkout the file anyways.\n\t\t\t\t\tfile, err := entry.PathName.FilePath(c)\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\treturn err\n\t\t\t\t\t}\n\t\t\t\t\tfiles = append(files, file)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\n\tif !opt.DryRun && (opt.Update || opt.Reset) {\n\t\tif err := CheckoutIndexUncommited(c, newidx, CheckoutIndexOptions{Quiet: true, Force: true, Prefix: opt.Prefix}, files); err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\t// Convert to a map for constant time lookup in our loop..\n\t\tnewidxMap := newidx.GetMap()\n\n\t\t// Before returning, delete anything that was in the old index, removed\n\t\t// from the new index, and hasn't been changed on the filesystem.\n\t\tfor path, entry := range origidx {\n\t\t\tif _, ok := newidxMap[path]; ok {\n\t\t\t\t// It was already handled by checkout-index\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tfile, err := path.FilePath(c)\n\t\t\tif err != nil {\n\t\t\t\t// Don't error out since we've already\n\t\t\t\t// mucked up other stuff, just carry\n\t\t\t\t// on to the next file.\n\t\t\t\tfmt.Fprintf(os.Stderr, \"%v\\n\", err)\n\t\t\t\tcontinue\n\n\t\t\t}\n\n\t\t\t// It was deleted from the new index, but was in the\n\t\t\t// original index, so delete it if it hasn't been\n\t\t\t// changed on the filesystem.\n\t\t\tif path.IsClean(c, entry.Sha1) {\n\t\t\t\tif err := removeFileClean(file); err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t} else if !opt.NoSparseCheckout {\n\t\t\t\tif !checkSparseMatches(c, opt, path, sparsePatterns) {\n\t\t\t\t\tif file.Exists() {\n\t\t\t\t\t\tif err := removeFileClean(file); err != nil {\n\t\t\t\t\t\t\treturn err\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\t// Update stat information for things changed by CheckoutIndex, and remove anything\n\t\t// with the SkipWorktree bit set.\n\t\tfor _, entry := range newidx.Objects {\n\t\t\tf, err := entry.PathName.FilePath(c)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tif f.Exists() {\n\t\t\t\tif entry.SkipWorktree() {\n\t\t\t\t\tif entry.PathName.IsClean(c, entry.Sha1) {\n\t\t\t\t\t\tif err := removeFileClean(f); err != nil {\n\t\t\t\t\t\t\treturn err\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\tif err := entry.RefreshStat(c); err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\tif opt.Reset {\n\t\t\tfor _, file := range resetremovals {\n\t\t\t\t// It may have been removed by the removal loop above\n\t\t\t\tif file.Exists() {\n\t\t\t\t\tif err := file.Remove(); err != nil {\n\t\t\t\t\t\treturn err\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\treturn nil\n}", "func rebalance(p program, m map[string]program) {\n\t// seach for children, adopt them and remove them from map\n\tfor i, c := range p.children {\n\t\tv, ok := m[c.name]\n\t\tif ok {\n\t\t\tp.children[i] = v\n\t\t\tdelete(m, c.name)\n\t\t}\n\t}\n\t// search for parent & get adopted\n\tadopted := false\n\tfor _, v := range m {\n\t\tif getAdopted(p, v) {\n\t\t\tadopted = true\n\t\t\tbreak\n\t\t}\n\t}\n\t// no parent found, insert into map\n\tif !adopted {\n\t\tm[p.name] = p\n\t}\n}", "func (self *PhysicsP2) CreatePrismaticConstraint3O(bodyA interface{}, bodyB interface{}, lockRotation bool, anchorA []interface{}, anchorB []interface{}) *PhysicsP2PrismaticConstraint{\n return &PhysicsP2PrismaticConstraint{self.Object.Call(\"createPrismaticConstraint\", bodyA, bodyB, lockRotation, anchorA, anchorB)}\n}", "func TestMerge_Duplicate(t *testing.T) {\n\tpncounter1 := PNCounter{\n\t\tAdd: gcounter.GCounter{map[string]int{\"node1\": 3, \"node2\": 5, \"node3\": 7}},\n\t\tDelete: gcounter.GCounter{map[string]int{\"node1\": 3, \"node2\": 5, \"node3\": 7}},\n\t}\n\tpncounter2 := PNCounter{\n\t\tAdd: gcounter.GCounter{map[string]int{\"node1\": 3, \"node2\": 5, \"node3\": 7}},\n\t\tDelete: gcounter.GCounter{map[string]int{\"node1\": 3, \"node2\": 5, \"node3\": 7}},\n\t}\n\tpncounter3 := PNCounter{\n\t\tAdd: gcounter.GCounter{map[string]int{\"node1\": 3, \"node2\": 5, \"node3\": 7}},\n\t\tDelete: gcounter.GCounter{map[string]int{\"node1\": 3, \"node2\": 5, \"node3\": 7}},\n\t}\n\n\tpncounterExpected := PNCounter{\n\t\tAdd: gcounter.GCounter{map[string]int{\"node1\": 3, \"node2\": 5, \"node3\": 7}},\n\t\tDelete: gcounter.GCounter{map[string]int{\"node1\": 3, \"node2\": 5, \"node3\": 7}},\n\t}\n\n\tpncounterActual := Merge(pncounter1, pncounter2, pncounter3)\n\n\tcountExpected := 0\n\tcountActual := pncounterActual.GetTotal()\n\n\tassert.Equal(t, pncounterExpected, pncounterActual)\n\tassert.Equal(t, countExpected, countActual)\n\n\tpncounter = pncounter.Clear(testNode)\n}", "func (p Placement) Merge(with Placement) Placement {\n\tret := p\n\tif with.NodeAffinity != nil {\n\t\tret.NodeAffinity = with.NodeAffinity\n\t}\n\tif with.PodAffinity != nil {\n\t\tret.PodAffinity = with.PodAffinity\n\t}\n\tif with.PodAntiAffinity != nil {\n\t\tret.PodAntiAffinity = with.PodAntiAffinity\n\t}\n\tif with.Tolerations != nil {\n\t\tret.Tolerations = ret.mergeTolerations(with.Tolerations)\n\t}\n\tif with.TopologySpreadConstraints != nil {\n\t\tret.TopologySpreadConstraints = with.TopologySpreadConstraints\n\t}\n\treturn ret\n}", "func TestRaftSynchronization(t *testing.T) {\n\tID1 := \"1\"\n\tID2 := \"2\"\n\t// Configuration entry, used by the cluster to find its configuration.\n\tconfEntry := newConfEntry(1, 1, []string{ID1, ID2}, 54321)\n\n\ttests := []struct {\n\t\t// Entries in n1's log.\n\t\tn1Log []Entry\n\t\t// Entries in n2's log.\n\t\tn2Log []Entry\n\t\t// term of n1\n\t\tn1term uint64\n\t\t// term of n2\n\t\tn2term uint64\n\t\t// number of entries will be applied\n\t\tnApplied int\n\t}{\n\n\t\t// NOTE: entry (1, 1) will be the configuration.\n\n\t\t// n1: (1, 1), (1, 2)\n\t\t// n2: (1, 1), (1, 2)\n\t\t// applied: (1, 2)\n\t\t{\n\t\t\t[]Entry{confEntry, newEntry(1, 2)},\n\t\t\t[]Entry{confEntry, newEntry(1, 2)},\n\t\t\t1,\n\t\t\t1,\n\t\t\t1,\n\t\t},\n\n\t\t// n1: (1, 1), (1, 2), (1, 3)\n\t\t// n2: (1, 1), (1, 2)\n\t\t// applied: (1, 2), (1, 3)\n\t\t{\n\t\t\t[]Entry{confEntry, newEntry(1, 2), newEntry(1, 3)},\n\t\t\t[]Entry{confEntry, newEntry(1, 2)},\n\t\t\t1,\n\t\t\t1,\n\t\t\t2,\n\t\t},\n\n\t\t// n1: (1, 1),\n\t\t// n2: (1, 1), (1, 2)\n\t\t// applied: (1, 2)\n\t\t{\n\t\t\t[]Entry{confEntry},\n\t\t\t[]Entry{confEntry, newEntry(1, 2)},\n\t\t\t1,\n\t\t\t1,\n\t\t\t1,\n\t\t},\n\n\t\t// n1: (1, 1), (1, 2), (1, 3)\n\t\t// n2: (1, 1), (2, 2)\n\t\t// applied: (2, 2)\n\t\t{\n\t\t\t[]Entry{confEntry, newEntry(1, 2), newEntry(1, 3)},\n\t\t\t[]Entry{confEntry, newEntry(2, 2)},\n\t\t\t1,\n\t\t\t2,\n\t\t\t1,\n\t\t},\n\t}\n\n\tfor i, test := range tests {\n\t\tt.Log(\"running test:\", test)\n\t\tclusterPrefix := fmt.Sprintf(\"TestRaftSynchronization_%d\", i)\n\n\t\t// Create n1 node.\n\t\tstorage := NewStorage(NewMemSnapshotMgr(), initLog(test.n1Log...), NewMemState(test.n1term))\n\t\tfsm1 := newTestFSM(ID1)\n\t\tn1 := testCreateRaftNode(getTestConfig(ID1, clusterPrefix+ID1), storage)\n\t\t// Create n2 node.\n\t\tstorage = NewStorage(NewMemSnapshotMgr(), initLog(test.n2Log...), NewMemState(test.n2term))\n\t\tfsm2 := newTestFSM(ID2)\n\t\tn2 := testCreateRaftNode(getTestConfig(ID2, clusterPrefix+ID2), storage)\n\t\tconnectAllNodes(n1, n2)\n\t\tn1.Start(fsm1)\n\t\tn2.Start(fsm2)\n\n\t\t// Two FSMs should have applied same sequence of commands.\n\t\tif !testEntriesEqual(fsm1.appliedCh, fsm2.appliedCh, test.nApplied) {\n\t\t\tt.Fatal(\"two FSMs in same group applied different sequence of commands.\")\n\t\t}\n\t}\n}", "func ResolveConflicts(root *rbxapidump.Root) {\n\tfoundPages := false\n\tVisitClasses(root, func(c rbxapi.Class) {\n\t\tclass := c.(*rbxapidump.Class)\n\t\t// Second instance of Pages class. Was immediately renamed to\n\t\t// StandardPages in the next version.\n\t\tswitch class.Name {\n\t\tcase \"Pages\":\n\t\t\tif foundPages {\n\t\t\t\tclass.Name = \"StandardPages\"\n\t\t\t} else {\n\t\t\t\tfoundPages = true\n\t\t\t}\n\t\tcase \"DataModel\":\n\t\t\t// Many versions saw a DataModel.Loaded function, which conflicted\n\t\t\t// with the Loaded event. Apparently it went unused, and so was\n\t\t\t// ultimately removed. The same is done here. Although it might be\n\t\t\t// worth renaming it instead, there isn't any specific name that can\n\t\t\t// be used.\n\t\t\tmembers := class.Members[:0]\n\t\t\tfor _, member := range class.Members {\n\t\t\t\tif member.GetName() == \"Loaded\" &&\n\t\t\t\t\tmember.GetMemberType() == \"Function\" {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\tmembers = append(members, member)\n\t\t\t}\n\t\t\tfor i := len(members); i < len(class.Members); i++ {\n\t\t\t\tclass.Members[i] = nil\n\t\t\t}\n\t\t\tclass.Members = members\n\t\t}\n\t})\n\tfoundCameraMode := false\n\tVisitEnums(root, func(e rbxapi.Enum) {\n\t\tenum := e.(*rbxapidump.Enum)\n\t\t// Second instance of CameraMode enum. Was renamed to CustomCameraMode\n\t\t// after several versions.\n\t\tswitch enum.Name {\n\t\tcase \"CameraMode\":\n\t\t\tif foundCameraMode {\n\t\t\t\tenum.Name = \"CustomCameraMode\"\n\t\t\t} else {\n\t\t\t\tfoundCameraMode = true\n\t\t\t}\n\t\tcase \"KeyCode\":\n\t\t\t// Many versions saw a number of redundant KeyCode.KeypadEquals enum\n\t\t\t// items. All the extras were eventually removed, so they're not\n\t\t\t// very interesting for keeping around.\n\t\t\tfoundKeypadEquals := false\n\t\t\titems := enum.Items[:0]\n\t\t\tfor _, item := range enum.Items {\n\t\t\t\tif item.Name == \"KeypadEquals\" {\n\t\t\t\t\tif foundKeypadEquals {\n\t\t\t\t\t\tcontinue\n\t\t\t\t\t} else {\n\t\t\t\t\t\tfoundKeypadEquals = true\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\titems = append(items, item)\n\t\t\t}\n\t\t\tfor i := len(items); i < len(enum.Items); i++ {\n\t\t\t\tenum.Items[i] = nil\n\t\t\t}\n\t\t\tenum.Items = items\n\t\t}\n\t})\n\tfoundRunning := false\n\tVisitEnumItems(root, func(e rbxapi.Enum, i rbxapi.EnumItem) {\n\t\tenum := e.(*rbxapidump.Enum)\n\t\titem := i.(*rbxapidump.EnumItem)\n\t\t// Second instance of Running enum item. Was renamed to RunningNoPhysics\n\t\t// after many versions.\n\t\tif enum.Name == \"HumanoidStateType\" && item.Name == \"Running\" {\n\t\t\tif foundRunning {\n\t\t\t\titem.Name = \"RunningNoPhysics\"\n\t\t\t} else {\n\t\t\t\tfoundRunning = true\n\t\t\t}\n\t\t}\n\t})\n}", "func mergeRecursive(dataA *interface{}, dataB *interface{}, arrayAppend bool, previousData *interface{}, previousKey string) {\n\taArray, aIsArray := (*dataA).([]interface{})\n\tbArray, bIsArray := (*dataB).([]interface{})\n\taMap, aIsMap := (*dataA).(map[string]interface{})\n\tbMap, bIsMap := (*dataB).(map[string]interface{})\n\n\tif aIsArray && bIsArray {\n\t\tif arrayAppend {\n\t\t\tSetArrayInDataTo(dataB, \"[+]\", previousData, previousKey, append(aArray, bArray...))\n\t\t} else {\n\t\t\tSetArrayInDataTo(dataB, \"[+]\", previousData, previousKey, aArray)\n\t\t}\n\t\treturn\n\t} else if aIsMap && bIsMap {\n\t\tfor aKey, aValue := range aMap {\n\t\t\tif _, exists := bMap[aKey]; exists {\n\t\t\t\ttmp := bMap[aKey]\n\t\t\t\tmergeRecursive(&aValue, &tmp, arrayAppend, dataB, aKey)\n\t\t\t} else {\n\t\t\t\tbMap[aKey] = aValue\n\t\t\t}\n\t\t}\n\t} else {\n\t\tif previousData != nil {\n\t\t\tpMap, pIsMap := (*previousData).(map[string]interface{})\n\n\t\t\tif pIsMap {\n\t\t\t\tpMap[previousKey] = *dataA\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\n\t\t*dataB = *dataA\n\t}\n}", "func (m Meta) Merge(other Change) (otherx, cx Change) {\n\tif m.Change != nil {\n\t\tother, m.Change = m.Change.Merge(other)\n\t}\n\treturn other, m\n}", "func UnionB3B3(b0, b1 Bounds3) Bounds3 {\n\tp1 := Point3{X: math.Min(b0.pMin.X, b1.pMin.X), Y: math.Min(b0.pMin.Y, b1.pMin.Y), Z: math.Min(b0.pMin.Z, b1.pMin.Z)}\n\tp2 := Point3{X: math.Max(b0.pMax.X, b1.pMax.X), Y: math.Max(b0.pMax.Y, b1.pMax.Y), Z: math.Max(b0.pMax.Z, b1.pMax.Z)}\n\treturn Bounds3{p1, p2}\n}", "func (md *mergeDelegate) NotifyMerge(peers []*memberlist.Node) error {\n\tcID := uuid.NewV4().String()\n\tmd.peer.registerOutgoingChallenge(cID)\n\n\tchallengeMap := make(map[string]bool)\n\n\t// TODO: Create some timeouts etc for stopping large join reqeusts\n\n\t// Go through all nodes in the cluster requesting to join\n\tfor _, peer := range peers {\n\t\t// Make a token\n\t\tb := make([]byte, 8)\n\t\trand.Read(b)\n\t\tquestionString := fmt.Sprintf(\"%x\", b)\n\n\t\t// Add it to the map so we can do later lookup to prevent repeat challenges\n\t\tchallengeMap[questionString] = false\n\n\t\t// Create a challenge from the token\n\t\tc := challenge{Question: questionString, ChallengeID: cID}\n\t\tchallengeBytes, err := json.Marshal(c)\n\t\tif err != nil {\n\t\t\tpanic(err)\n\t\t}\n\n\t\t// Create an action for the remote peer to process\n\t\taction := &update{Action: \"challenge_question\", Data: challengeBytes, From: *md.peer.member.LocalNode()}\n\t\tactionBytes, _ := json.Marshal(action)\n\n\t\t// Send the message to the remote peer\n\t\tmd.peer.member.SendReliable(peer, actionBytes)\n\t}\n\n\ttimeout := make(chan bool, 1)\n\tgo func() {\n\t\ttime.Sleep(2 * time.Second)\n\t\ttimeout <- true\n\t}()\n\n\tincomingResponses, err := md.peer.getChallengeResponseChannel(cID)\n\tif err != nil {\n\t\treturn errors.New(\"Node responded with unknown challenge ID\")\n\t}\n\tsuccessfulCount := 0\n\n\t// Wait until timeout has completed or a new response has come in\n\tfor {\n\t\tselect {\n\t\tcase sm := <-incomingResponses:\n\t\t\tif !sm.IsInPoolAndVerified() {\n\t\t\t\treturn errors.New(\"Challenge message from peer is not verified or not in pool\")\n\t\t\t}\n\t\t\t// Get the challenge response message\n\t\t\tmbytes, err := sm.Message.MarshalJSON()\n\t\t\tif err != nil {\n\t\t\t\treturn errors.New(\"Can't parse challenge from signed message\")\n\t\t\t}\n\n\t\t\tm := &message.Message{}\n\t\t\terr = json.Unmarshal(mbytes, m)\n\t\t\tif err != nil {\n\t\t\t\treturn errors.New(\"Challenge sent back is corrupted\")\n\t\t\t}\n\n\t\t\tcbytes, err := m.Content.MarshalJSON()\n\t\t\tif err != nil {\n\t\t\t\treturn errors.New(\"Challenge sent back is corrupted\")\n\t\t\t}\n\n\t\t\tc := &challenge{}\n\t\t\terr = json.Unmarshal(cbytes, c)\n\t\t\tif err != nil {\n\t\t\t\treturn errors.New(\"Challenge sent back is corrupted\")\n\t\t\t}\n\n\t\t\t// If the value exists\n\t\t\tif val, ok := challengeMap[c.Question]; ok {\n\t\t\t\tif val {\n\t\t\t\t\treturn errors.New(\"Challenged has already been used\")\n\t\t\t\t}\n\t\t\t\tchallengeMap[c.Question] = true\n\t\t\t\tsuccessfulCount++\n\t\t\t}\n\n\t\t\t// All challenges have successfully been received\n\t\t\tif successfulCount == len(challengeMap) {\n\t\t\t\treturn nil\n\t\t\t}\n\t\t\t// Value must not be in the issued challenges\n\t\t\treturn errors.New(\"Peer returned an unused challenge\")\n\n\t\tcase <-timeout:\n\t\t\treturn errors.New(\"Timeout reached before all nodes in joining cluster could respond\")\n\t\t}\n\t}\n\n}", "func mergeBranch(c *cli.Context) error {\n\tif !dit.CheckDitFolderPresent() {\n\t\treturn ErrNotInitialized\n\t}\n\tif c.NArg() == 0 {\n\t\treturn ErrIncorrectOperands\n\t}\n\theadHash := dit.ReadHeadBranch(c.Args().First())\n\tmergedCommit, _ := dit.GetCommit(headHash)\n\tcurrentCommit := dit.GetHeadCommit()\n\tsplitCommit := dit.GetLCA(currentCommit.CommitID, mergedCommit.CommitID)\n\tif splitCommit.CommitID == mergedCommit.CommitID {\n\t\tfmt.Println(\"Given branch is an ancestor of the current branch.\")\n\t\treturn nil\n\t}\n\tif splitCommit.CommitID == currentCommit.CommitID {\n\t\tif err := dit.ApplyCommitToWD(currentCommit, mergedCommit); err != nil {\n\t\t\treturn err\n\t\t}\n\t\tdit.ResetIndex()\n\t\trefsHead := dit.GetRefsHead()\n\t\tif err := ioutil.WriteFile(path.Join(config.RootPath, refsHead), []byte(mergedCommit.CommitID), config.DefaultFilePerm); err != nil {\n\t\t\treturn errors.Wrap(err, \"Failed to Set HEAD to the new commit\")\n\t\t}\n\t\tfmt.Println(\"Current branch fast-forwared to \", mergedCommit.CommitID)\n\t\treturn nil\n\t}\n\n\tcurrentIndex, err := dit.ReadIndex()\n\tif err != nil {\n\t\tcurrentIndex = &dit.Index{\n\t\t\tStagedFiles: make(map[string]string),\n\t\t\tRemovedFiles: make(map[string]bool),\n\t\t}\n\t}\n\tfileMap := dit.JoinMapString(currentCommit.Blob, mergedCommit.Blob)\n\tisConflict := false\n\tconflictedFiles := []string{}\n\t// first rule\n\tfor fileName := range fileMap {\n\t\tcurrentFileHash, isCurrentTracked := currentCommit.Blob[fileName]\n\t\tsplitFileHash, isSplitTracked := splitCommit.Blob[fileName]\n\t\tmergeFileHash, isMergedTracked := mergedCommit.Blob[fileName]\n\t\tswitch {\n\t\t// File tracked at split point and in current head\n\t\tcase isSplitTracked && isCurrentTracked && isMergedTracked:\n\t\t\tif (splitFileHash != mergeFileHash) && (splitFileHash == currentFileHash) {\n\t\t\t\t// Any files that have been modified in the given branch since the split point,\n\t\t\t\t// but not modified in the current branch since the split point should be changed\n\t\t\t\t// to their versions in the given branch (checked out from the commit at the front of the given branch).\n\t\t\t\t// These files should then all be automatically staged.\n\t\t\t\tblob, _ := dit.GetBlob(mergeFileHash)\n\t\t\t\tif err := dit.OverwriteFileWithBlob(blob, fileName); err != nil {\n\t\t\t\t\treturn errors.Wrapf(err, \"Failed to overwrite %s \\n\", fileName)\n\t\t\t\t}\n\t\t\t\tcurrentIndex.StagedFiles[fileName] = mergeFileHash\n\t\t\t} else if (splitFileHash != mergeFileHash) && (splitFileHash != currentFileHash) && (currentFileHash != mergeFileHash) {\n\t\t\t\t// \"Modified in different ways\" can mean that the contents of both are changed and different from other\n\t\t\t\tblobHead, _ := dit.GetBlob(currentFileHash)\n\t\t\t\tblobMerge, _ := dit.GetBlob(mergeFileHash)\n\t\t\t\tif err := dit.OverwriteFileWithConflictedBlob(blobHead, blobMerge, fileName); err != nil {\n\t\t\t\t\treturn errors.Wrapf(err, \"Failed to overwrite %s \\n\", fileName)\n\t\t\t\t}\n\t\t\t}\n\t\tcase isSplitTracked && isCurrentTracked && !isMergedTracked:\n\t\t\t// Any files present at the split point, unmodified in the current branch branch, and\n\t\t\t// absent in the given branch should be removed (and untracked).\n\t\t\tif splitFileHash == currentFileHash {\n\t\t\t\tcurrentIndex.RemovedFiles[fileName] = true\n\t\t\t\tos.Remove(fileName)\n\t\t\t} else if splitFileHash != currentFileHash {\n\t\t\t\tfmt.Println(fileName, \" Conflict\")\n\t\t\t\tblobHead, _ := dit.GetBlob(currentFileHash)\n\t\t\t\tblobMerge, _ := dit.GetBlob(mergeFileHash)\n\t\t\t\tif err := dit.OverwriteFileWithConflictedBlob(blobHead, blobMerge, fileName); err != nil {\n\t\t\t\t\treturn errors.Wrapf(err, \"Failed to overwrite %s \\n\", fileName)\n\t\t\t\t}\n\t\t\t}\n\t\tcase isSplitTracked && !isCurrentTracked && isMergedTracked:\n\t\t\tif splitFileHash != mergeFileHash {\n\t\t\t\tfmt.Println(fileName, \" Conflict\")\n\t\t\t\tblobHead, _ := dit.GetBlob(currentFileHash)\n\t\t\t\tblobMerge, _ := dit.GetBlob(mergeFileHash)\n\t\t\t\tif err := dit.OverwriteFileWithConflictedBlob(blobHead, blobMerge, fileName); err != nil {\n\t\t\t\t\treturn errors.Wrapf(err, \"Failed to overwrite %s \\n\", fileName)\n\t\t\t\t}\n\t\t\t}\n\t\tcase isSplitTracked && !isCurrentTracked && !isMergedTracked:\n\t\tcase !isSplitTracked && isCurrentTracked && isMergedTracked:\n\t\t\tif currentFileHash != mergeFileHash {\n\t\t\t\tfmt.Println(fileName, \" Conflict\")\n\t\t\t\tblobHead, _ := dit.GetBlob(currentFileHash)\n\t\t\t\tblobMerge, _ := dit.GetBlob(mergeFileHash)\n\t\t\t\tif err := dit.OverwriteFileWithConflictedBlob(blobHead, blobMerge, fileName); err != nil {\n\t\t\t\t\treturn errors.Wrapf(err, \"Failed to overwrite %s \\n\", fileName)\n\t\t\t\t}\n\t\t\t}\n\t\tcase !isSplitTracked && isCurrentTracked && !isMergedTracked:\n\t\tcase !isSplitTracked && !isCurrentTracked && isMergedTracked:\n\t\t\t// Any files that were not present at the split point and are present only\n\t\t\t// in the given branch should be checked out and staged.\n\t\t\tblob, _ := dit.GetBlob(mergeFileHash)\n\t\t\tif err := dit.OverwriteFileWithBlob(blob, fileName); err != nil {\n\t\t\t\treturn errors.Wrapf(err, \"Failed to overwrite %s \\n\", fileName)\n\t\t\t}\n\t\t\tcurrentIndex.StagedFiles[fileName] = mergeFileHash\n\t\tcase !isSplitTracked && !isCurrentTracked && !isMergedTracked:\n\t\t}\n\t}\n\tif isConflict {\n\t\tfmt.Println(\"Encountered a merge conflict.\")\n\t\tfmt.Println(\"Conflicted Files : \", conflictedFiles)\n\t\treturn nil\n\t}\n\t// Create a commit\n\treturn nil\n}" ]
[ "0.5305594", "0.530267", "0.5156637", "0.5114457", "0.49938747", "0.49833733", "0.4961236", "0.4697486", "0.46694422", "0.46552038", "0.4631997", "0.46033674", "0.45770454", "0.4563981", "0.4560225", "0.45419317", "0.4523825", "0.45117086", "0.4492587", "0.4451917", "0.44307408", "0.44247085", "0.44164267", "0.44027027", "0.43989334", "0.43514228", "0.4343416", "0.43405536", "0.43321285", "0.43129468", "0.43093553", "0.4296663", "0.4294829", "0.42943257", "0.4290026", "0.4288832", "0.4286941", "0.42735964", "0.4272149", "0.42716876", "0.42682168", "0.42594984", "0.42381704", "0.4232495", "0.42268214", "0.4225567", "0.42237929", "0.42151356", "0.4213962", "0.420905", "0.4207762", "0.42071047", "0.42013732", "0.4195572", "0.41930714", "0.41922304", "0.41902503", "0.41887316", "0.4171957", "0.41682586", "0.4146485", "0.41441825", "0.4143662", "0.41286272", "0.41201413", "0.41134", "0.41117686", "0.41077408", "0.41070336", "0.41048387", "0.4104709", "0.41027328", "0.41020882", "0.40981603", "0.40936378", "0.4091522", "0.40906322", "0.40884626", "0.4075044", "0.4071799", "0.4066326", "0.40615886", "0.40613505", "0.4059789", "0.40566415", "0.40557683", "0.40540266", "0.4053535", "0.404799", "0.40452617", "0.40434897", "0.40391588", "0.403749", "0.4028585", "0.40283683", "0.40233657", "0.40220696", "0.4020912", "0.40180856", "0.40091693" ]
0.6773516
0
a and b cannot be merged if they are of different NomsKind, or if at least one of the two is nil, or if either is a Noms primitive.
func unmergeable(a, b types.Value) bool { if a != nil && b != nil { aKind, bKind := a.Type().Kind(), b.Type().Kind() return aKind != bKind || types.IsPrimitiveKind(aKind) || types.IsPrimitiveKind(bKind) } return true }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func MergedKnownPrimitiveTypes(a Expr, b Expr) PrimitiveType {\n\tx := KnownPrimitiveType(a)\n\ty := KnownPrimitiveType(b)\n\tif x == PrimitiveUnknown || y == PrimitiveUnknown {\n\t\treturn PrimitiveUnknown\n\t}\n\tif x == y {\n\t\treturn x\n\t}\n\treturn PrimitiveMixed // Definitely some kind of primitive\n}", "func canMerge(a, b *jointRequest) bool {\n\tif !reflect.DeepEqual(a.tileConfig, b.tileConfig) {\n\t\treturn false\n\t}\n\tif !reflect.DeepEqual(a.query, b.query) {\n\t\treturn false\n\t}\n\treturn a.dataset == b.dataset\n}", "func mergeAllowedMentions(a, b discordgo.AllowedMentions) discordgo.AllowedMentions {\n\t// merge mention types\nOUTER:\n\tfor _, v := range b.Parse {\n\t\tfor _, av := range a.Parse {\n\t\t\tif v == av {\n\t\t\t\tcontinue OUTER\n\t\t\t}\n\t\t}\n\n\t\ta.Parse = append(a.Parse, v)\n\t\tswitch v {\n\t\tcase discordgo.AllowedMentionTypeUsers:\n\t\t\ta.Users = nil\n\t\t\tb.Users = nil\n\t\tcase discordgo.AllowedMentionTypeRoles:\n\t\t\ta.Roles = nil\n\t\t\tb.Roles = nil\n\t\t}\n\t}\n\n\thasParseRoles := false\n\thasParseUsers := false\n\tfor _, p := range a.Parse {\n\t\tswitch p {\n\t\tcase discordgo.AllowedMentionTypeRoles:\n\t\t\thasParseRoles = true\n\t\tcase discordgo.AllowedMentionTypeUsers:\n\t\t\thasParseUsers = true\n\t\t}\n\t}\n\n\t// merge mentioned roles\n\tif !hasParseRoles {\n\tOUTER2:\n\t\tfor _, v := range b.Roles {\n\t\t\tfor _, av := range a.Roles {\n\t\t\t\tif v == av {\n\t\t\t\t\tcontinue OUTER2\n\t\t\t\t}\n\t\t\t}\n\n\t\t\ta.Roles = append(a.Roles, v)\n\t\t}\n\t}\n\n\t// merge mentioned users\n\tif !hasParseUsers {\n\tOUTER3:\n\t\tfor _, v := range b.Users {\n\t\t\tfor _, av := range a.Users {\n\t\t\t\tif v == av {\n\t\t\t\t\tcontinue OUTER3\n\t\t\t\t}\n\t\t\t}\n\n\t\t\ta.Users = append(a.Users, v)\n\t\t}\n\t}\n\n\treturn a\n}", "func (a Possibility) Union(b Possibility) Possibility {\n\tif a == Impossible || b == Impossible {\n\t\treturn Impossible\n\t}\n\tif a.Equals(b) == True {\n\t\treturn a\n\t}\n\treturn Maybe\n}", "func mergeIfMergable(obj reflect.Value, src reflect.Value) (reflect.Value, bool) {\n\tvar out reflect.Value\n\n\t// Look for the .WithDefaults method.\n\tmeth, ok := obj.Type().MethodByName(\"Merge\")\n\tif !ok {\n\t\treturn out, false\n\t}\n\n\t// Verify the signature matches our Mergable psuedointerface:\n\t// - two inputs (the receiver), and one output\n\t// - input types match output type exactly (disallow the usual pointer receiver semantics)\n\tif meth.Type.NumIn() != 2 || meth.Type.NumOut() != 1 {\n\t\treturn out, false\n\t}\n\tif meth.Type.In(0) != meth.Type.In(1) || meth.Type.In(0) != meth.Type.Out(0) {\n\t\treturn out, false\n\t}\n\n\t// Psuedointerface matches, call the .Merge method.\n\tout = meth.Func.Call([]reflect.Value{obj, src})[0]\n\n\treturn out, true\n}", "func compat(a, b *Type, seenA, seenB map[*Type]bool) bool { //nolint:gocyclo\n\t// Normalize and break cycles from recursive types.\n\ta, b = a.NonOptional(), b.NonOptional()\n\tif a == b || seenA[a] || seenB[b] {\n\t\treturn true\n\t}\n\tseenA[a], seenB[b] = true, true\n\t// Handle Any\n\tif a.Kind() == Any || b.Kind() == Any {\n\t\treturn true\n\t}\n\t// Handle simple scalars\n\tif ax, bx := a.Kind() == Bool, b.Kind() == Bool; ax || bx {\n\t\treturn ax && bx\n\t}\n\tif ax, bx := ttIsStringEnum(a), ttIsStringEnum(b); ax || bx {\n\t\treturn ax && bx\n\t}\n\tif ax, bx := a.Kind().IsNumber(), b.Kind().IsNumber(); ax || bx {\n\t\treturn ax && bx\n\t}\n\tif ax, bx := a.Kind() == TypeObject, b.Kind() == TypeObject; ax || bx {\n\t\treturn ax && bx\n\t}\n\t// Handle composites\n\tswitch a.Kind() {\n\tcase Array, List:\n\t\tswitch b.Kind() {\n\t\tcase Array, List:\n\t\t\treturn compat(a.Elem(), b.Elem(), seenA, seenB)\n\t\t}\n\t\treturn false\n\tcase Set:\n\t\tif b.Kind() == Set {\n\t\t\treturn compat(a.Key(), b.Key(), seenA, seenB)\n\t\t}\n\t\treturn false\n\tcase Map:\n\t\tif b.Kind() == Map {\n\t\t\treturn compat(a.Key(), b.Key(), seenA, seenB) && compat(a.Elem(), b.Elem(), seenA, seenB)\n\t\t}\n\t\treturn false\n\tcase Struct:\n\t\tif b.Kind() == Struct {\n\t\t\tif ttIsEmptyStruct(a) || ttIsEmptyStruct(b) {\n\t\t\t\treturn true // empty struct is compatible with all other structs\n\t\t\t}\n\t\t\treturn compatFields(a, b, seenA, seenB)\n\t\t}\n\t\treturn false\n\tcase Union:\n\t\tif b.Kind() == Union {\n\t\t\treturn compatFields(a, b, seenA, seenB)\n\t\t}\n\t\treturn false\n\tdefault:\n\t\tpanic(fmt.Errorf(\"vdl: Compatible unhandled types %q %q\", a, b))\n\t}\n}", "func nonBinaryMergeFn(ctx context.Context, a, b int) (int, error) {\n\treturn a + b, nil\n}", "func TestMerge(t *testing.T) {\n\ta := NewEvent(\"1970-01-01T00:00:00Z\", map[int64]interface{}{-1: int64(30), -2: \"foo\"})\n\tb := NewEvent(\"1970-01-01T00:00:00Z\", map[int64]interface{}{-1: 20, 3: \"baz\"})\n\ta.Merge(b)\n\tif a.Data[-1] != 20 || a.Data[-2] != \"foo\" || a.Data[3] != \"baz\" {\n\t\tt.Fatalf(\"Invalid merge: %v\", a.Data)\n\t}\n}", "func (a *API) Merge(other API) {\n\tif a.Short == \"\" {\n\t\ta.Short = other.Short\n\t}\n\n\tif a.Long == \"\" {\n\t\ta.Long = other.Long\n\t}\n\n\ta.Operations = append(a.Operations, other.Operations...)\n}", "func mergeTypeFlag(a, b uint) uint {\n\treturn a & (b&mysql.NotNullFlag | ^mysql.NotNullFlag) & (b&mysql.UnsignedFlag | ^mysql.UnsignedFlag)\n}", "func (p *ErrorHandlingPolicy) Merge(src ErrorHandlingPolicy) {\n\tif p.IgnoreFileErrors == nil && src.IgnoreFileErrors != nil {\n\t\tp.IgnoreFileErrors = newBool(*src.IgnoreFileErrors)\n\t}\n\n\tif p.IgnoreDirectoryErrors == nil && src.IgnoreDirectoryErrors != nil {\n\t\tp.IgnoreDirectoryErrors = newBool(*src.IgnoreDirectoryErrors)\n\t}\n}", "func merge(dst, src any, opts ...func(*mergeConfig)) error {\n\tif dst == nil || src == nil {\n\t\t// Nothing available to merge if dst or src are nil.\n\t\t// This can occur early on in reconciliation when the\n\t\t// status subresource has not been set yet.\n\t\treturn nil\n\t}\n\n\tconfig := &mergeConfig{}\n\n\tfor _, opt := range opts {\n\t\topt(config)\n\t}\n\n\tdstMap, ok := dst.(map[string]any)\n\tif !ok {\n\t\treturn errors.New(errUnsupportedDstObject)\n\t}\n\n\tsrcMap, ok := src.(map[string]any)\n\tif !ok {\n\t\treturn errors.New(errUnsupportedSrcObject)\n\t}\n\n\treturn mergo.Merge(&dstMap, filter(srcMap, config.srcfilter...), config.mergeOptions...)\n}", "func checkIncompatibleStructsCast(src types.Type, dst types.Type) bool {\n\t// check if the source type is a struct\n\tsrcStruct, ok := src.(*types.Struct)\n\tif !ok {\n\t\treturn false\n\t}\n\n\t// check if the destination type is a struct\n\tdstStruct, ok := dst.(*types.Struct)\n\tif !ok {\n\t\treturn false\n\t}\n\n\tsrcPlatformDependentCount := 0\n\tdstPlatformDependentCount := 0\n\n\t// count platform dependent types in the source type\n\tfor i := 0; i < srcStruct.NumFields(); i++ {\n\t\tif isPlatformDependent(srcStruct.Field(i)) {\n\t\t\tsrcPlatformDependentCount += 1\n\t\t}\n\t}\n\n\t// count platform dependent types in the destination type\n\tfor i := 0; i < dstStruct.NumFields(); i++ {\n\t\tif isPlatformDependent(dstStruct.Field(i)) {\n\t\t\tdstPlatformDependentCount += 1\n\t\t}\n\t}\n\n\t// check whether the amounts match\n\treturn srcPlatformDependentCount != dstPlatformDependentCount\n}", "func ThreeWay(a, b, parent types.Value, vwr types.ValueReadWriter) (merged types.Value, err error) {\n\tif a == nil && b == nil {\n\t\treturn parent, nil\n\t} else if a == nil {\n\t\treturn parent, newMergeConflict(\"Cannot merge nil Value with %s.\", b.Type().Describe())\n\t} else if b == nil {\n\t\treturn parent, newMergeConflict(\"Cannot merge %s with nil value.\", a.Type().Describe())\n\t} else if unmergeable(a, b) {\n\t\treturn parent, newMergeConflict(\"Cannot merge %s with %s.\", a.Type().Describe(), b.Type().Describe())\n\t}\n\n\treturn threeWayMerge(a, b, parent, vwr)\n}", "func merge(dst, src *unstructured.Unstructured) bool {\n\tdstNS := dst.GetLabels()[resourceLabelNamespace]\n\tsrcNS := src.GetLabels()[resourceLabelNamespace]\n\tif dstNS != srcNS {\n\t\treturn false\n\t}\n\n\tif dstResults, ok, _ := unstructured.NestedSlice(dst.UnstructuredContent(), \"results\"); ok {\n\t\tif srcResults, ok, _ := unstructured.NestedSlice(src.UnstructuredContent(), \"results\"); ok {\n\t\t\tdstResults = append(dstResults, srcResults...)\n\n\t\t\tif err := unstructured.SetNestedSlice(dst.UnstructuredContent(), dstResults, \"results\"); err == nil {\n\t\t\t\taddSummary(dst, src)\n\t\t\t\treturn true\n\t\t\t}\n\t\t}\n\t}\n\treturn false\n}", "func (m *valueMerger) tryMerge(left, right, base val.Tuple) (val.Tuple, bool) {\n\t// If we're merging a keyless table and the keys match, but the values are different,\n\t// that means that the row data is the same, but the cardinality has changed, and if the\n\t// cardinality has changed in different ways on each merge side, we can't auto resolve.\n\tif m.keyless {\n\t\treturn nil, false\n\t}\n\n\tif base != nil && (left == nil) != (right == nil) {\n\t\t// One row deleted, the other modified\n\t\treturn nil, false\n\t}\n\n\t// Because we have non-identical diffs, left and right are guaranteed to be\n\t// non-nil at this point.\n\tif left == nil || right == nil {\n\t\tpanic(\"found nil left / right which should never occur\")\n\t}\n\n\tmergedValues := make([][]byte, m.numCols)\n\tfor i := 0; i < m.numCols; i++ {\n\t\tv, isConflict := m.processColumn(i, left, right, base)\n\t\tif isConflict {\n\t\t\treturn nil, false\n\t\t}\n\t\tmergedValues[i] = v\n\t}\n\n\treturn val.NewTuple(m.syncPool, mergedValues...), true\n}", "func TestMergeTwoLists_Example2(t *testing.T) {\n\toutput := mergeTwoLists_v2(nil, nil)\n\n\tif output != nil {\n\t\tt.Errorf(\"Expected: nil. Actual: %v\", output)\n\t}\n}", "func (bm tsidbitmap) Merge(other tsidbitmap) (tsidbitmap, bool) {\n\tfor len(bm) < len(other) {\n\t\tbm = append(bm, 0)\n\t}\n\tdirty := false\n\tfor i, v := range other {\n\t\tdirty = dirty || (bm[i]|v != bm[i])\n\t\tbm[i] |= v\n\t}\n\treturn bm, dirty\n}", "func (a *AttributeDefinition) Merge(other *AttributeDefinition) *AttributeDefinition {\n\tif other == nil {\n\t\treturn a\n\t}\n\tif a == nil {\n\t\treturn other\n\t}\n\tleft := a.Type.(Object)\n\tright := other.Type.(Object)\n\tif left == nil || right == nil {\n\t\tpanic(\"cannot merge non object attributes\") // bug\n\t}\n\tfor n, v := range right {\n\t\tleft[n] = v\n\t}\n\treturn a\n}", "func CompObjects(a, b interface{}) bool {\n\tif reflect.TypeOf(a) != reflect.TypeOf(b) {\n\t\treturn false\n\t}\n\n\tswitch a.(type) {\n\tcase map[string]interface{}:\n\t\tx := a.(map[string]interface{})\n\t\ty := b.(map[string]interface{})\n\n\t\tif len(x) != len(y) {\n\t\t\treturn false\n\t\t}\n\n\t\teq := true\n\t\tfor k, v := range x {\n\t\t\tv2, found := y[k]\n\t\t\teq = eq && found && CompObjects(v, v2)\n\t\t}\n\t\treturn eq\n\tcase []interface{}:\n\t\tx := a.([]interface{})\n\t\ty := b.([]interface{})\n\n\t\tif len(x) != len(y) {\n\t\t\treturn false\n\t\t}\n\n\t\teq := true\n\t\tfor i, v := range x {\n\t\t\teq = eq && CompObjects(v, y[i])\n\t\t}\n\t\treturn eq\n\tcase string, float64, bool, nil:\n\t\treturn a == b\n\tdefault:\n\t\t// Don't know how to compare these types\n\t\treturn false\n\t}\n}", "func (c *ConfigureOpener) Merge(other ConfigureOpener) {\n\tif c.ErrorThresholdPercentage == 0 {\n\t\tc.ErrorThresholdPercentage = other.ErrorThresholdPercentage\n\t}\n\tif c.RequestVolumeThreshold == 0 {\n\t\tc.RequestVolumeThreshold = other.RequestVolumeThreshold\n\t}\n\tif c.Now == nil {\n\t\tc.Now = other.Now\n\t}\n\tif c.RollingDuration == 0 {\n\t\tc.RollingDuration = other.RollingDuration\n\t}\n\tif c.NumBuckets == 0 {\n\t\tc.NumBuckets = other.NumBuckets\n\t}\n}", "func merge(mapA, mapB map[string]interface{}) map[string]interface{} {\n\tresult := make(map[string]interface{})\n\tfor k, v := range mapA {\n\t\tresult[k] = v\n\t}\n\tfor k, v := range mapB {\n\t\tif _, ok := result[k]; !ok || result[k] == nil || result[k] == false {\n\t\t\tresult[k] = v\n\t\t}\n\t}\n\treturn result\n}", "func (p *Properties) Merge(other *Properties) error {\n\tif other != nil {\n\t\tcomposite := errors.NewCompositeError()\n\t\tfor k, v := range other.m {\n\t\t\tcomposite.Append(p.Set(k, v))\n\t\t}\n\n\t\treturn composite.OrNilIfEmpty()\n\t}\n\n\treturn nil\n}", "func (a *AudioMeta) Merge(b *AudioMeta) {\n\tif len(a.Title) == 0 {\n\t\ta.Title = b.Title\n\t}\n\n\tif len(a.Artist) == 0 {\n\t\ta.Artist = b.Artist\n\t}\n\n\tif len(a.Album) == 0 {\n\t\ta.Album = b.Album\n\t}\n\n\tif a.Track != b.Track && a.Track == 0 {\n\t\ta.Track = b.Track\n\t}\n\n\tif a.TotalTracks != b.TotalTracks && a.TotalTracks == 0 {\n\t\ta.TotalTracks = b.TotalTracks\n\t}\n\n\tif a.Year != b.Year && a.Year == 0 {\n\t\ta.Year = b.Year\n\t}\n\n\tif len(a.AlbumArtist) == 0 {\n\t\ta.AlbumArtist = b.AlbumArtist\n\t}\n\n\tif len(a.Genre) == 0 {\n\t\ta.Genre = b.Genre\n\t}\n\n\tif a.Duration != b.Duration && a.Duration == 0 {\n\t\ta.Duration = b.Duration\n\t}\n\n\tif a.CoverBufer == nil {\n\t\ta.CoverBufer = b.CoverBufer\n\t}\n}", "func MergeErr(err1, err2 error) error {\n\tif err1 == nil {\n\t\treturn err2\n\t} else if err2 == nil {\n\t\treturn err1\n\t}\n\tranError1, ok1 := err1.(RanError)\n\tif !ok1 {\n\t\tranError1 = NewOutError(err1)\n\t}\n\tranError2, ok2 := err2.(RanError)\n\tif !ok2 {\n\t\tranError2 = NewOutError(err2)\n\t}\n\treturn Merge(ranError1, ranError2)\n}", "func (e *dataUsageEntry) merge(other dataUsageEntry) {\n\te.Objects += other.Objects\n\te.Versions += other.Versions\n\te.Size += other.Size\n\tors := other.ReplicationStats\n\tempty := replicationStats{}\n\tif ors != nil && *ors != empty {\n\t\tif e.ReplicationStats == nil {\n\t\t\te.ReplicationStats = &replicationStats{}\n\t\t}\n\t\te.ReplicationStats.PendingSize += other.ReplicationStats.PendingSize\n\t\te.ReplicationStats.FailedSize += other.ReplicationStats.FailedSize\n\t\te.ReplicationStats.ReplicatedSize += other.ReplicationStats.ReplicatedSize\n\t\te.ReplicationStats.ReplicaSize += other.ReplicationStats.ReplicaSize\n\t\te.ReplicationStats.PendingCount += other.ReplicationStats.PendingCount\n\t\te.ReplicationStats.FailedCount += other.ReplicationStats.FailedCount\n\n\t}\n\n\tfor i, v := range other.ObjSizes[:] {\n\t\te.ObjSizes[i] += v\n\t}\n}", "func merge(a, b map[string]interface{}) map[string]interface{} {\n\tresult := make(map[string]interface{})\n\tfor key, bValue := range b {\n\t\taValue := a[key]\n\t\tswitch bMap := bValue.(type) {\n\t\tcase map[string]interface{}:\n\t\t\taMap, ok := aValue.(map[string]interface{})\n\t\t\tif ok {\n\t\t\t\tresult[key] = merge(aMap, bMap)\n\t\t\t} else {\n\t\t\t\tresult[key] = bValue\n\t\t\t}\n\t\tdefault:\n\t\t\tresult[key] = bValue\n\t\t}\n\t}\n\tfor key, aValue := range a {\n\t\tif _, ok := b[key]; !ok {\n\t\t\tresult[key] = aValue\n\t\t}\n\t}\n\treturn result\n}", "func MetaMerge(a, b map[string]interface{}) map[string]interface{} {\n\tfor k, v := range b {\n\t\t// don't over-write\n\t\tif _, ok := a[k]; !ok {\n\t\t\ta[k] = v\n\t\t}\n\t}\n\treturn a\n}", "func (m merger) Merge(a, b <-chan int) <-chan int {\n\tmerged := make(chan int)\n\n\tgo func() {\n\t\tdefer close(merged)\n\n\t\tfor b != nil || a != nil {\n\t\t\tselect {\n\t\t\tcase val, received := <-a:\n\t\t\t\tif !received {\n\t\t\t\t\t// assigning nil value to chan disable the related case in the select statement\n\t\t\t\t\t// and avoid loosing CPU resources without reasons when chan.\n\t\t\t\t\ta = nil\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\tmerged <- val\n\n\t\t\tcase val, received := <-b:\n\t\t\t\tif !received {\n\t\t\t\t\tb = nil\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\tmerged <- val\n\t\t\t}\n\t\t}\n\t}()\n\n\treturn merged\n}", "func (m *Empty) Merge(overrides *Empty) {\n\tif m == nil || overrides == nil {\n\t\treturn\n\t}\n\n}", "func mergeConfigs(first *config, second *config) (*config, error) {\n\tmerged := &config{}\n\tr1 := reflect.ValueOf(*first)\n\tr2 := reflect.ValueOf(*second)\n\tfor i := 0; i < r1.NumField(); i++ {\n\t\tf1 := r1.Field(i)\n\t\tf2 := r2.Field(i)\n\t\tdst := reflect.ValueOf(merged).Elem().Field(i)\n\t\tswitch f1.Kind() {\n\t\tcase reflect.String:\n\t\t\tif f1.String() == \"\" {\n\t\t\t\tdst.SetString(f2.String())\n\t\t\t} else {\n\t\t\t\tdst.SetString(f1.String())\n\t\t\t}\n\t\tcase reflect.Bool:\n\t\t\tif f1.Bool() || f2.Bool() {\n\t\t\t\tdst.SetBool(true)\n\t\t\t}\n\t\t}\n\t}\n\treturn merged, nil\n}", "func Noconv(t1 *Type, t2 *Type) bool", "func (a Annotation) Merge(other schema.Annotation) schema.Annotation {\n\tvar ant Annotation\n\tswitch other := other.(type) {\n\tcase Annotation:\n\t\tant = other\n\tcase *Annotation:\n\t\tif other != nil {\n\t\t\tant = *other\n\t\t}\n\tdefault:\n\t\treturn a\n\t}\n\tif tag := ant.StructTag; tag != \"\" {\n\t\ta.StructTag = tag\n\t}\n\treturn a\n}", "func (s *mergeBaseSuite) TestNoAncestorsWhenNoCommonHistory(c *C) {\n\trevs := []string{\"M\", \"N\"}\n\tnothing := []string{}\n\ts.AssertMergeBase(c, revs, nothing)\n}", "func TestMergeTwoLists_Example3(t *testing.T) {\n\toutput := mergeTwoLists_v2(nil, &ListNode{\n\t\tVal: 0,\n\t\tNext: nil,\n\t})\n\n\texpectedOutput := &ListNode{\n\t\tVal: 0,\n\t\tNext: nil,\n\t}\n\n\tif !reflect.DeepEqual(expectedOutput, output) {\n\t\tt.Errorf(\"Expected: &{0 <nil>}. Actual: %v\", output)\n\t}\n}", "func (sr *StyleBox) Combine(b StyleBox) {\n\n\tvalOfA := reflect.ValueOf(sr)\n\tvalOfA = valOfA.Elem() // dereference\n\n\tvalOfB := reflect.ValueOf(b)\n\n\ttypeOfB := valOfB.Type()\n\tif valOfB.Kind().String() != \"struct\" {\n\t\t//\n\t}\n\n\tfor i := 0; i < valOfB.NumField(); i++ {\n\n\t\tfnB := typeOfB.Field(i).Name\n\t\t// log.Printf(\"Fieldname %v\", fnB)\n\n\t\tvlB := valOfB.Field(i).Interface()\n\n\t\t// string\n\t\tif vlBStr, ok := vlB.(string); ok {\n\t\t\tif vlBStr != \"\" {\n\t\t\t\tvlA := valOfA.Field(i).Interface()\n\t\t\t\tif vlAStr, ok := vlA.(string); ok {\n\t\t\t\t\tif vlAStr == \"\" { // base is empty\n\t\t\t\t\t\tif valOfA.FieldByName(fnB).CanSet() {\n\t\t\t\t\t\t\tvalOfA.FieldByName(fnB).Set(valOfB.Field(i))\n\t\t\t\t\t\t\t// log.Printf(\"Set string %v\", fnB)\n\t\t\t\t\t\t} else {\n\t\t\t\t\t\t\tlog.Printf(\"Cannot set string %v\", fnB)\n\t\t\t\t\t\t}\n\t\t\t\t\t} else {\n\t\t\t\t\t\t// log.Printf(\"Base for string %v not empty %v\", fnB, vlAStr)\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\t// int\n\t\tif vlBInt, ok := vlB.(int); ok {\n\t\t\tif vlBInt != 0 {\n\t\t\t\tvlA := valOfA.Field(i).Interface()\n\t\t\t\tif vlAInt, ok := vlA.(int); ok {\n\t\t\t\t\tif vlAInt == 0 { // base is empty\n\t\t\t\t\t\tif valOfA.FieldByName(fnB).CanSet() {\n\t\t\t\t\t\t\tvalOfA.FieldByName(fnB).Set(valOfB.Field(i))\n\t\t\t\t\t\t\t// log.Printf(\"Set int %v\", fnB)\n\t\t\t\t\t\t} else {\n\t\t\t\t\t\t\tlog.Printf(\"Cannot set int %v\", fnB)\n\t\t\t\t\t\t}\n\t\t\t\t\t} else {\n\t\t\t\t\t\t// log.Printf(\"Base for int %v not empty %v\", fnB, vlAInt)\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t}\n}", "func Merge(left, right []string) []string {\n\treturn CollectVariety(left, right, GetAll, GetAll, GetAll)\n}", "func (obj object) Merge(other Object) (Object, bool) {\n\treturn obj.MergeWith(other, func(v1, v2 *Term) (*Term, bool) {\n\t\tobj1, ok1 := v1.Value.(Object)\n\t\tobj2, ok2 := v2.Value.(Object)\n\t\tif !ok1 || !ok2 {\n\t\t\treturn nil, true\n\t\t}\n\t\tobj3, ok := obj1.Merge(obj2)\n\t\tif !ok {\n\t\t\treturn nil, true\n\t\t}\n\t\treturn NewTerm(obj3), false\n\t})\n}", "func (a *BooleanArray) Merge(b *BooleanArray) {\n\tif a.Len() == 0 {\n\t\t*a = *b\n\t\treturn\n\t}\n\n\tif b.Len() == 0 {\n\t\treturn\n\t}\n\n\t// Normally, both a and b should not contain duplicates. Due to a bug in older versions, it's\n\t// possible stored blocks might contain duplicate values. Remove them if they exists before\n\t// merging.\n\t// a = a.Deduplicate()\n\t// b = b.Deduplicate()\n\n\tif a.MaxTime() < b.MinTime() {\n\t\ta.Timestamps = append(a.Timestamps, b.Timestamps...)\n\t\ta.Values = append(a.Values, b.Values...)\n\t\treturn\n\t}\n\n\tif b.MaxTime() < a.MinTime() {\n\t\tvar tmp BooleanArray\n\t\ttmp.Timestamps = append(b.Timestamps, a.Timestamps...)\n\t\ttmp.Values = append(b.Values, a.Values...)\n\t\t*a = tmp\n\t\treturn\n\t}\n\n\tout := NewBooleanArrayLen(a.Len() + b.Len())\n\ti, j, k := 0, 0, 0\n\tfor i < len(a.Timestamps) && j < len(b.Timestamps) {\n\t\tif a.Timestamps[i] < b.Timestamps[j] {\n\t\t\tout.Timestamps[k] = a.Timestamps[i]\n\t\t\tout.Values[k] = a.Values[i]\n\t\t\ti++\n\t\t} else if a.Timestamps[i] == b.Timestamps[j] {\n\t\t\tout.Timestamps[k] = b.Timestamps[j]\n\t\t\tout.Values[k] = b.Values[j]\n\t\t\ti++\n\t\t\tj++\n\t\t} else {\n\t\t\tout.Timestamps[k] = b.Timestamps[j]\n\t\t\tout.Values[k] = b.Values[j]\n\t\t\tj++\n\t\t}\n\t\tk++\n\t}\n\n\tif i < len(a.Timestamps) {\n\t\tn := copy(out.Timestamps[k:], a.Timestamps[i:])\n\t\tcopy(out.Values[k:], a.Values[i:])\n\t\tk += n\n\t} else if j < len(b.Timestamps) {\n\t\tn := copy(out.Timestamps[k:], b.Timestamps[j:])\n\t\tcopy(out.Values[k:], b.Values[j:])\n\t\tk += n\n\t}\n\n\ta.Timestamps = out.Timestamps[:k]\n\ta.Values = out.Values[:k]\n}", "func Min(a interface{}, b interface{}) interface{} {\n\tif a == nil {\n\t\treturn b\n\t}\n\tif b == nil {\n\t\treturn a\n\t}\n\tif Cmp(a, b) > 0 {\n\t\treturn b\n\t}\n\treturn a\n}", "func (s *mergeBaseSuite) TestCommonAncestorInMergedOrphans(c *C) {\n\trevs := []string{\"A\", \"B\"}\n\texpectedRevs := []string{\"AB\"}\n\ts.AssertMergeBase(c, revs, expectedRevs)\n}", "func (a Possibility) Intersect(b Possibility) Possibility {\n\tif a == Impossible || b == Impossible {\n\t\treturn Impossible\n\t}\n\tif a == Maybe {\n\t\tif b == Maybe {\n\t\t\treturn Maybe\n\t\t}\n\t\ta, b = b, a\n\t}\n\t// a is True or False\n\t// b is True, False or Maybe\n\tif b == Maybe || a == b {\n\t\treturn a\n\t}\n\treturn Impossible\n}", "func merge(a, b <-chan int) <-chan int {\n\tc := make(chan int) // create channel\n\t// anom function\n\tgo func() {\n\t\tdefer close(c)\n\t\tfor a != nil || b != nil {\n\t\t\tselect {\n\t\t\tcase v, ok := <-a:\n\t\t\t\tif !ok {\n\t\t\t\t\tfmt.Println(\"a is done\")\n\t\t\t\t\ta = nil // good for ther CPU\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\tc <- v\n\t\t\tcase v, ok := <-b:\n\t\t\t\tif !ok {\n\t\t\t\t\tfmt.Println(\"b is done\")\n\t\t\t\t\tb = nil // good for the CPU\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\tc <- v\n\t\t\t}\n\t\t}\n\t}()\n\treturn c\n}", "func merge(c chan<- int, a, b <-chan int) { // 此处的 a b 与 main 里的 a b 不是同一个变量,只是它们的值里都含有相同的指针,指向同一块内存;当 main 里的 a b 被置为 nil 之后,这里的 a b 并没有改变\n\tfor a != nil || b != nil {\n\t\tfmt.Println(\"receive and send\", a, b)\n\t\tselect {\n\t\tcase v, ok := <-a:\n\t\t\tif ok {\n\t\t\t\tc <- v\n\t\t\t}\n\t\tcase v, ok := <-b:\n\t\t\tif ok {\n\t\t\t\tc <- v\n\t\t\t}\n\t\t}\n\t}\n\tfmt.Println(\"merge finish\")\n}", "func (m Meta) Merge(other Change) (otherx, cx Change) {\n\tif m.Change != nil {\n\t\tother, m.Change = m.Change.Merge(other)\n\t}\n\treturn other, m\n}", "func mergeNanocubeResponse(dest, src *NanocubeResponse) {\n\tif dest.Layers == nil {\n\t\tdest.Layers = []string{}\n\t}\n\n\tif dest.Root.Val != nil {\n\t\tif src.Root.Val != nil {\n\t\t\t*(dest.Root.Val) += *(src.Root.Val)\n\t\t}\n\t\treturn\n\t} else if src.Root.Val != nil {\n\t\tdest.Root.Val = src.Root.Val\n\t}\n\n\tdest.Root.Children = mergeChildren(dest.Root.Children, src.Root.Children)\n}", "func (c *networkConfiguration) Conflicts(o *networkConfiguration) error {\n\tif o == nil {\n\t\treturn errors.New(\"same configuration\")\n\t}\n\n\t// Also empty, because only one network with empty name is allowed\n\tif c.BridgeName == o.BridgeName {\n\t\treturn errors.New(\"networks have same bridge name\")\n\t}\n\n\t// They must be in different subnets\n\tif (c.AddressIPv4 != nil && o.AddressIPv4 != nil) &&\n\t\t(c.AddressIPv4.Contains(o.AddressIPv4.IP) || o.AddressIPv4.Contains(c.AddressIPv4.IP)) {\n\t\treturn errors.New(\"networks have overlapping IPv4\")\n\t}\n\n\t// They must be in different v6 subnets\n\tif (c.AddressIPv6 != nil && o.AddressIPv6 != nil) &&\n\t\t(c.AddressIPv6.Contains(o.AddressIPv6.IP) || o.AddressIPv6.Contains(c.AddressIPv6.IP)) {\n\t\treturn errors.New(\"networks have overlapping IPv6\")\n\t}\n\n\treturn nil\n}", "func Merge(orig, v interface{}, defaults bool) {\n\tvv := reflect.ValueOf(v).Elem()\n\tif !vv.IsValid() {\n\t\treturn\n\t}\n\tvorig := reflect.ValueOf(orig).Elem()\n\tfor i := 0; i < vorig.NumField(); i++ {\n\t\tfield := vv.Field(i)\n\t\tif defaults {\n\t\t\tfield = vorig.Field(i)\n\t\t}\n\t\tvar empty bool\n\t\tswitch field.Type().Kind() {\n\t\tcase reflect.Struct:\n\t\t\tempty = true // TODO: merge structs recursively\n\t\tcase reflect.Chan, reflect.Func, reflect.Slice, reflect.Map:\n\t\t\tempty = field.IsNil()\n\t\tdefault:\n\t\t\tempty = field.Interface() == reflect.Zero(field.Type()).Interface()\n\t\t}\n\t\tif !empty {\n\t\t\tvorig.Field(i).Set(vv.Field(i))\n\t\t}\n\t}\n}", "func UnionMerge[T any](obj T, src T) T {\n\tname := fmt.Sprintf(\"%T\", obj)\n\n\tvObj := reflect.ValueOf(obj)\n\tvSrc := reflect.ValueOf(src)\n\n\t// obj must have the same type as src.\n\tassertTypeMatch(vObj, vSrc)\n\n\tif vObj.Kind() != reflect.Struct {\n\t\tpanic(\"UnionMerge must only be called on struct types\")\n\t}\n\n\treturn unionMerge(vObj, vSrc, name).Interface().(T)\n}", "func TestMerge3_Merge_fail(t *testing.T) {\n\t// TODO: make this test pass on windows -- currently failing due to comment whitespace changes\n\ttestutil.SkipWindows(t)\n\n\t_, datadir, _, ok := runtime.Caller(0)\n\tif !assert.True(t, ok) {\n\t\tt.FailNow()\n\t}\n\tdatadir = filepath.Join(filepath.Dir(datadir), \"testdata2\")\n\n\t// setup the local directory\n\tdir := t.TempDir()\n\n\tif !assert.NoError(t, copyutil.CopyDir(\n\t\tfilesys.MakeFsOnDisk(),\n\t\tfilepath.Join(datadir, \"dataset1-localupdates\"),\n\t\tfilepath.Join(dir, \"dataset1\"))) {\n\t\tt.FailNow()\n\t}\n\n\terr := filters.Merge3{\n\t\tOriginalPath: filepath.Join(datadir, \"dataset1\"),\n\t\tUpdatedPath: filepath.Join(datadir, \"dataset1-remoteupdates\"),\n\t\tDestPath: filepath.Join(dir, \"dataset1\"),\n\t\tMatcher: &filters.DefaultGVKNNMatcher{MergeOnPath: false},\n\t}.Merge()\n\tif !assert.Error(t, err) {\n\t\tt.FailNow()\n\t}\n}", "func coalesce(x, y *declNode) {\n\t// x becomes y's canonical representative.\n\ty.rep = x\n\n\t// x accumulates y's internal references.\n\tfor z := range y.intRefs {\n\t\tx.intRefs[z] = true\n\t}\n\ty.intRefs = nil\n\n\t// x accumulates y's external references.\n\tfor z := range y.extRefs {\n\t\tif x.extRefs == nil {\n\t\t\tx.extRefs = make(symbolSet)\n\t\t}\n\t\tx.extRefs[z] = true\n\t}\n\ty.extRefs = nil\n}", "func union(a, b map[string]bool) {\n\tfor k, v := range b {\n\t\tif v {\n\t\t\ta[k] = v\n\t\t}\n\t}\n}", "func merge(original interface{}, update interface{}) (merged interface{}, err error) {\n\n\tlogger.Info(\"Merging\", \"original\", original, \"update\", update)\n\n\tswitch O := original.(type) {\n\n\tcase map[string]interface{}:\n\t\tU, ok := update.(map[string]interface{})\n\t\tif !ok {\n\t\t\treturn nil, errors.New(\"update is not mS like original\")\n\t\t}\n\t\tlogger.Info(\"mS entering\")\n\t\tfor key, val := range U {\n\t\t\tlogger.Debug(\"in merge mS-U\", \"key\", key, \"val\", val, \"curr\", O[key])\n\t\t\tif curr, exists := O[key]; exists {\n\t\t\t\ttmp, err := merge(curr, val)\n\t\t\t\tlogger.Debug(\"after merge mS\", \"tmp\", tmp, \"err\", err)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn nil, errors.Wrap(err, \"in merge mS\")\n\t\t\t\t}\n\t\t\t\tO[key] = tmp\n\t\t\t} else {\n\t\t\t\tO[key] = val\n\t\t\t}\n\t\t}\n\t\tlogger.Info(\"mS returning\", \"O\", O)\n\t\treturn O, nil\n\n\tcase []interface{}:\n\t\tU, ok := update.([]interface{})\n\t\tif !ok {\n\t\t\treturn nil, errors.New(\"update is not aI like original\")\n\t\t}\n\t\t// logger.Warn(\"O\", \"data\", O)\n\t\t// logger.Warn(\"U\", \"data\", U)\n\n\t\tlogger.Info(\"aI entering\")\n\t\t// turn update into map\n\t\tUM := map[string]interface{}{}\n\t\tfor i, elem := range U {\n\t\t\tswitch E := elem.(type) {\n\n\t\t\tcase map[string]interface{}:\n\t\t\t\tname, ok := E[\"name\"]\n\t\t\t\tif !ok {\n\t\t\t\t\treturn nil, errors.New(\"original array objects must have names to be merged\")\n\t\t\t\t}\n\t\t\t\tUM[name.(string)] = E\n\n\t\t\tcase string:\n\t\t\t\tUM[E] = E\n\n\t\t\tdefault:\n\t\t\t\tlogger.Error(\"original unknown elem type in aI\", \"i\", i, \"elem\", elem)\n\t\t\t\treturn nil, errors.New(\"original unknown elem type in aI\")\n\t\t\t}\n\t\t}\n\n\t\tfor i, elem := range O {\n\t\t\t// logger.Crit(\"O-loop\", \"i\", i, \"elem\", elem)\n\t\t\tswitch E := elem.(type) {\n\n\t\t\tcase map[string]interface{}:\n\t\t\t\tiname, ok := E[\"name\"]\n\t\t\t\tif !ok {\n\t\t\t\t\treturn nil, errors.New(\"original array objects must have names to be merged\")\n\t\t\t\t}\n\n\t\t\t\tname := iname.(string)\n\t\t\t\t// logger.Error(\"Name\", \"name\", name)\n\n\t\t\t\tcurr, exists := UM[name]\n\t\t\t\tif exists {\n\t\t\t\t\ttmp, err := merge(elem, curr)\n\t\t\t\t\t// this is correct, the var names curr and elem are backwards...\n\t\t\t\t\t// busy fixing a bug\n\t\t\t\t\t// logger.Crit(\"merging with existing element\", \"key\", name, \"val\", curr, \"curr\", elem)\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\treturn nil, errors.Wrap(err, \"in merge MS\")\n\t\t\t\t\t}\n\t\t\t\t\tO[i] = tmp\n\t\t\t\t\tdelete(UM, name)\n\t\t\t\t}\n\t\t\tcase string:\n\t\t\t\t_, exists := UM[E]\n\t\t\t\tif exists {\n\t\t\t\t\tdelete(UM, E)\n\t\t\t\t}\n\n\t\t\tdefault:\n\t\t\t\tlogger.Error(\"original unknown elem type in aI\", \"i\", i, \"elem\", elem)\n\t\t\t\treturn nil, errors.New(\"original unknown elem type in aI\")\n\t\t\t}\n\t\t}\n\t\t// merge\n\t\tlogger.Info(\"aI\")\n\n\t\t// turn back into array\n\t\tOA := []interface{}{}\n\t\tfor _, val := range O {\n\t\t\tOA = append(OA, val)\n\t\t}\n\t\tfor _, elem := range U {\n\t\t\tswitch E := elem.(type) {\n\n\t\t\tcase map[string]interface{}:\n\t\t\t\tname, ok := E[\"name\"]\n\t\t\t\tif !ok {\n\t\t\t\t\treturn nil, errors.New(\"original array objects must have names to be merged\")\n\t\t\t\t}\n\t\t\t\t_, exists := UM[name.(string)]\n\t\t\t\tif exists {\n\t\t\t\t\tOA = append(OA, elem)\n\t\t\t\t}\n\n\t\t\tcase string:\n\t\t\t\t_, exists := UM[E]\n\t\t\t\tif exists {\n\t\t\t\t\tOA = append(OA, elem)\n\t\t\t\t}\n\n\t\t\t}\n\t\t}\n\n\t\t// logger.Error(\"OA\", \"data\", OA)\n\n\t\tlogger.Info(\"aI returning\", \"OA\", OA)\n\t\treturn OA, nil\n\n\tcase string:\n\t\treturn update, nil\n\n\tdefault:\n\t\treturn nil, errors.New(\"unmergable original\" + fmt.Sprintf(\"%t, %+v\", original, original))\n\n\t}\n\n\tlogger.Crit(\"Shouldn't get here (end of merge function)\")\n\treturn nil, errors.New(\"PANIC, should not get here\")\n}", "func merge(new, dst *Range) bool {\n\tif new.End() < dst.Pos {\n\t\treturn false\n\t}\n\tif new.End() > dst.End() {\n\t\tdst.Size = new.Size\n\t} else {\n\t\tdst.Size += dst.Pos - new.Pos\n\t}\n\tdst.Pos = new.Pos\n\treturn true\n}", "func (sr *StyleText) Combine(b StyleText) {\n\n\tvalOfA := reflect.ValueOf(sr)\n\tvalOfA = valOfA.Elem() // dereference\n\n\tvalOfB := reflect.ValueOf(b)\n\n\ttypeOfB := valOfB.Type()\n\tif valOfB.Kind().String() != \"struct\" {\n\t\t//\n\t}\n\n\tfor i := 0; i < valOfB.NumField(); i++ {\n\n\t\tfnB := typeOfB.Field(i).Name\n\t\t// log.Printf(\"Fieldname %v\", fnB)\n\n\t\tvlB := valOfB.Field(i).Interface()\n\n\t\t// string\n\t\tif vlBStr, ok := vlB.(string); ok {\n\t\t\tif vlBStr != \"\" {\n\t\t\t\tvlA := valOfA.Field(i).Interface()\n\t\t\t\tif vlAStr, ok := vlA.(string); ok {\n\t\t\t\t\tif vlAStr == \"\" { // base is empty\n\t\t\t\t\t\tif valOfA.FieldByName(fnB).CanSet() {\n\t\t\t\t\t\t\tvalOfA.FieldByName(fnB).Set(valOfB.Field(i))\n\t\t\t\t\t\t\t// log.Printf(\"Set string %v\", fnB)\n\t\t\t\t\t\t} else {\n\t\t\t\t\t\t\tlog.Printf(\"Cannot set string %v\", fnB)\n\t\t\t\t\t\t}\n\t\t\t\t\t} else {\n\t\t\t\t\t\t// log.Printf(\"Base for string %v not empty %v\", fnB, vlAStr)\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\t// int\n\t\tif vlBInt, ok := vlB.(int); ok {\n\t\t\tif vlBInt != 0 {\n\t\t\t\tvlA := valOfA.Field(i).Interface()\n\t\t\t\tif vlAInt, ok := vlA.(int); ok {\n\t\t\t\t\tif vlAInt == 0 { // base is empty\n\t\t\t\t\t\tif valOfA.FieldByName(fnB).CanSet() {\n\t\t\t\t\t\t\tvalOfA.FieldByName(fnB).Set(valOfB.Field(i))\n\t\t\t\t\t\t\t// log.Printf(\"Set int %v\", fnB)\n\t\t\t\t\t\t} else {\n\t\t\t\t\t\t\tlog.Printf(\"Cannot set int %v\", fnB)\n\t\t\t\t\t\t}\n\t\t\t\t\t} else {\n\t\t\t\t\t\t// log.Printf(\"Base for int %v not empty %v\", fnB, vlAInt)\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t}\n}", "func Or(a, b ObjectLinterFn) ObjectLinterFn {\n\treturn func(o runtime.Object) error {\n\t\tif a == nil || b == nil {\n\t\t\treturn errors.New(errNilLinterFn)\n\t\t}\n\t\taErr := a(o)\n\t\tbErr := b(o)\n\t\tif aErr == nil || bErr == nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn errors.Errorf(errOrFmt, aErr, bErr)\n\t}\n}", "func equalOwnerRefs(a, b []metav1.OwnerReference) bool {\n\tif len(a) != len(b) {\n\t\treturn false\n\t}\n\tfor i, v := range a {\n\t\tif v != b[i] {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}", "func equalBubbledMetadata(md1, md2 siadir.Metadata) error {\n\t// Check AggregateHealth\n\tif md1.AggregateHealth != md2.AggregateHealth {\n\t\treturn fmt.Errorf(\"AggregateHealth not equal, %v and %v\", md1.AggregateHealth, md2.AggregateHealth)\n\t}\n\t// Check AggregateNumFiles\n\tif md1.AggregateNumFiles != md2.AggregateNumFiles {\n\t\treturn fmt.Errorf(\"AggregateNumFiles not equal, %v and %v\", md1.AggregateNumFiles, md2.AggregateNumFiles)\n\t}\n\t// Check Size\n\tif md1.AggregateSize != md2.AggregateSize {\n\t\treturn fmt.Errorf(\"aggregate sizes not equal, %v and %v\", md1.AggregateSize, md2.AggregateSize)\n\t}\n\t// Check Health\n\tif md1.Health != md2.Health {\n\t\treturn fmt.Errorf(\"healths not equal, %v and %v\", md1.Health, md2.Health)\n\t}\n\t// Check LastHealthCheckTimes\n\tif md2.LastHealthCheckTime != md1.LastHealthCheckTime {\n\t\treturn fmt.Errorf(\"LastHealthCheckTimes not equal %v and %v\", md2.LastHealthCheckTime, md1.LastHealthCheckTime)\n\t}\n\t// Check MinRedundancy\n\tif md1.MinRedundancy != md2.MinRedundancy {\n\t\treturn fmt.Errorf(\"MinRedundancy not equal, %v and %v\", md1.MinRedundancy, md2.MinRedundancy)\n\t}\n\t// Check Mod Times\n\tif md2.ModTime != md1.ModTime {\n\t\treturn fmt.Errorf(\"ModTimes not equal %v and %v\", md2.ModTime, md1.ModTime)\n\t}\n\t// Check NumFiles\n\tif md1.NumFiles != md2.NumFiles {\n\t\treturn fmt.Errorf(\"NumFiles not equal, %v and %v\", md1.NumFiles, md2.NumFiles)\n\t}\n\t// Check NumStuckChunks\n\tif md1.NumStuckChunks != md2.NumStuckChunks {\n\t\treturn fmt.Errorf(\"NumStuckChunks not equal, %v and %v\", md1.NumStuckChunks, md2.NumStuckChunks)\n\t}\n\t// Check NumSubDirs\n\tif md1.NumSubDirs != md2.NumSubDirs {\n\t\treturn fmt.Errorf(\"NumSubDirs not equal, %v and %v\", md1.NumSubDirs, md2.NumSubDirs)\n\t}\n\t// Check StuckHealth\n\tif md1.StuckHealth != md2.StuckHealth {\n\t\treturn fmt.Errorf(\"stuck healths not equal, %v and %v\", md1.StuckHealth, md2.StuckHealth)\n\t}\n\treturn nil\n}", "func (bm *BoundedMean) Merge(bm2 *BoundedMean) error {\n\tif err := checkMergeBoundedMean(bm, bm2); err != nil {\n\t\treturn err\n\t}\n\tbm.NormalizedSum.Merge(&bm2.NormalizedSum)\n\tbm.Count.Merge(&bm2.Count)\n\tbm2.state = merged\n\treturn nil\n}", "func Merge(l error, r error) error {\n\tif l == nil {\n\t\treturn r\n\t}\n\tif r == nil {\n\t\treturn l\n\t}\n\n\tif c, ok := l.(*collection); ok {\n\t\tc.append(r)\n\t\treturn c\n\t}\n\tif c, ok := r.(*collection); ok {\n\t\tc.insertFront(l)\n\t\treturn c\n\t}\n\tc := newCollection()\n\tc.append(l)\n\tc.append(r)\n\treturn c\n}", "func Merge(a, b AABB) AABB {\n\treturn AABB{\n\t\tf.Min(a.L, b.L), f.Min(a.B, b.B),\n\t\tf.Max(a.R, b.R), f.Max(a.T, b.T),\n\t}\n}", "func CheckMergeCompliance(\n\trepo plumbing2.LocalRepo,\n\tchange *plumbing2.ItemChange,\n\tmergeProposalID,\n\tpushKeyID string,\n\tkeepers core.Logic) error {\n\n\tref := plumbing.ReferenceName(change.Item.GetName())\n\tif !ref.IsBranch() {\n\t\treturn fmt.Errorf(\"merge error: pushed reference must be a branch\")\n\t}\n\n\tpropID := mergerequest.MakeMergeRequestProposalID(mergeProposalID)\n\tprop := repo.GetState().Proposals.Get(propID)\n\tif prop == nil {\n\t\treturn fmt.Errorf(\"merge error: target merge proposal was not found\")\n\t}\n\n\t// Ensure the signer is the creator of the proposal\n\tpushKey := keepers.PushKeyKeeper().Get(pushKeyID)\n\tif pushKey.Address.String() != prop.Creator {\n\t\treturn fmt.Errorf(\"merge error: push key owner did not create the proposal\")\n\t}\n\n\t// Check if the merge proposal has been closed\n\tclosed, err := keepers.RepoKeeper().IsProposalClosed(repo.GetName(), propID)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"merge error: %s\", err)\n\t} else if closed {\n\t\treturn fmt.Errorf(\"merge error: target merge proposal is already closed\")\n\t}\n\n\t// Ensure the proposal's base branch matches the pushed branch\n\tvar propBaseBranch = string(prop.ActionData[constants.ActionDataKeyBaseBranch])\n\tif ref.Short() != propBaseBranch {\n\t\treturn fmt.Errorf(\"merge error: pushed branch name and proposal base branch name must match\")\n\t}\n\n\t// Check whether the merge proposal has been accepted\n\tif !prop.IsAccepted() {\n\t\tif prop.Outcome == 0 {\n\t\t\treturn fmt.Errorf(\"merge error: target merge proposal is undecided\")\n\t\t} else {\n\t\t\treturn fmt.Errorf(\"merge error: target merge proposal was not accepted\")\n\t\t}\n\t}\n\n\tvar propTargetHash = string(prop.ActionData[constants.ActionDataKeyTargetHash])\n\tif change.Item.GetData() != propTargetHash {\n\t\treturn fmt.Errorf(\"merge error: pushed commit did not match merge proposal target hash\")\n\t}\n\n\treturn nil\n}", "func (query *Query) Merge(q2 Query) Query {\n\t// dereference to make a copy of this query\n\tq := *query\n\n\tfor _, tag := range q2.Tags {\n\t\tif !StrSliceContains(q.Tags, tag) {\n\t\t\tq.Tags = append(q.Tags, tag)\n\t\t}\n\t}\n\n\tfor _, tag := range q2.AntiTags {\n\t\tif !StrSliceContains(q.AntiTags, tag) {\n\t\t\tq.AntiTags = append(q.AntiTags, tag)\n\t\t}\n\t}\n\n\tif q2.Project != \"\" {\n\t\tif q.Project != \"\" && q.Project != q2.Project {\n\t\t\tExitFail(\"Could not apply q2, project conflict\")\n\t\t} else {\n\t\t\tq.Project = q2.Project\n\t\t}\n\t}\n\n\tif q2.Priority != \"\" {\n\t\tif q.Priority != \"\" {\n\t\t\tExitFail(\"Could not apply q2, priority conflict\")\n\t\t} else {\n\t\t\tq.Priority = q2.Priority\n\t\t}\n\t}\n\n\treturn q\n}", "func Merge(a, b map[string]interface{}) map[string]interface{} {\n\tresult := make(map[string]interface{})\n\tfor k, av := range a {\n\t\tar := reflect.ValueOf(av)\n\t\tif avm, ok := av.(map[string]interface{}); ok {\n\t\t\tif bv, ok := b[k]; ok {\n\t\t\t\tif bvm, ok := bv.(map[string]interface{}); ok {\n\t\t\t\t\tresult[k] = Merge(avm, bvm)\n\t\t\t\t} else {\n\t\t\t\t\tresult[k] = bv\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\tresult[k] = av\n\t\t\t}\n\t\t} else if ar.Kind() == reflect.Slice {\n\t\t\tif bv, ok := b[k]; ok {\n\t\t\t\tbr := reflect.ValueOf(bv)\n\t\t\t\tif br.Kind() == reflect.Slice {\n\t\t\t\t\tcombined := reflect.MakeSlice(ar.Type(), 0, ar.Len())\n\t\t\t\t\tfor i := 0; i < ar.Len(); i++ {\n\t\t\t\t\t\tcombined = reflect.Append(combined, ar.Index(i))\n\t\t\t\t\t}\n\t\t\t\t\tfor i := 0; i < br.Len(); i++ {\n\t\t\t\t\t\tcombined = reflect.Append(combined, br.Index(i))\n\t\t\t\t\t}\n\t\t\t\t\tresult[k] = combined.Interface()\n\t\t\t\t} else {\n\t\t\t\t\tresult[k] = bv\n\t\t\t\t}\n\t\t\t}\n\t\t} else {\n\t\t\tif bv, ok := b[k]; ok {\n\t\t\t\tresult[k] = bv\n\t\t\t} else {\n\t\t\t\tresult[k] = av\n\t\t\t}\n\t\t}\n\t}\n\tfor k, bv := range b {\n\t\tif _, ok := a[k]; !ok {\n\t\t\tresult[k] = bv\n\t\t}\n\t}\n\treturn result\n}", "func TestMerge_Empty(t *testing.T) {\n\tpncounter1 := PNCounter{}\n\tpncounter2 := PNCounter{}\n\tpncounter3 := PNCounter{}\n\n\tpncounterExpected := PNCounter{}\n\tpncounterActual := Merge(pncounter1, pncounter2, pncounter3)\n\n\tcountExpected := 0\n\tcountActual := pncounterActual.GetTotal()\n\n\tassert.Equal(t, pncounterExpected, pncounterActual)\n\tassert.Equal(t, countExpected, countActual)\n\n\tpncounter = pncounter.Clear(testNode)\n}", "func (r *tickResult) merge(other tickResult) tickResult {\n\treturn tickResult{\n\t\tstandard: r.standard.merge(other.standard),\n\t\tforwarded: r.forwarded.merge(other.forwarded),\n\t\ttimed: r.timed.merge(other.timed),\n\t}\n}", "func (sr *StyleGridItem) Combine(b StyleGridItem) {\n\n\tvalOfA := reflect.ValueOf(sr)\n\tvalOfA = valOfA.Elem() // dereference\n\n\tvalOfB := reflect.ValueOf(b)\n\n\ttypeOfB := valOfB.Type()\n\tif valOfB.Kind().String() != \"struct\" {\n\t\t//\n\t}\n\n\tfor i := 0; i < valOfB.NumField(); i++ {\n\n\t\tfnB := typeOfB.Field(i).Name\n\t\t// log.Printf(\"Fieldname %v\", fnB)\n\n\t\tvlB := valOfB.Field(i).Interface()\n\n\t\t// string\n\t\tif vlBStr, ok := vlB.(string); ok {\n\t\t\tif vlBStr != \"\" {\n\t\t\t\tvlA := valOfA.Field(i).Interface()\n\t\t\t\tif vlAStr, ok := vlA.(string); ok {\n\t\t\t\t\tif vlAStr == \"\" { // base is empty\n\t\t\t\t\t\tif valOfA.FieldByName(fnB).CanSet() {\n\t\t\t\t\t\t\tvalOfA.FieldByName(fnB).Set(valOfB.Field(i))\n\t\t\t\t\t\t\t// log.Printf(\"Set string %v\", fnB)\n\t\t\t\t\t\t} else {\n\t\t\t\t\t\t\tlog.Printf(\"Cannot set string %v\", fnB)\n\t\t\t\t\t\t}\n\t\t\t\t\t} else {\n\t\t\t\t\t\t// log.Printf(\"Base for string %v not empty %v\", fnB, vlAStr)\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\t// int\n\t\tif vlBInt, ok := vlB.(int); ok {\n\t\t\tif vlBInt != 0 {\n\t\t\t\tvlA := valOfA.Field(i).Interface()\n\t\t\t\tif vlAInt, ok := vlA.(int); ok {\n\t\t\t\t\tif vlAInt == 0 { // base is empty\n\t\t\t\t\t\tif valOfA.FieldByName(fnB).CanSet() {\n\t\t\t\t\t\t\tvalOfA.FieldByName(fnB).Set(valOfB.Field(i))\n\t\t\t\t\t\t\t// log.Printf(\"Set int %v\", fnB)\n\t\t\t\t\t\t} else {\n\t\t\t\t\t\t\tlog.Printf(\"Cannot set int %v\", fnB)\n\t\t\t\t\t\t}\n\t\t\t\t\t} else {\n\t\t\t\t\t\t// log.Printf(\"Base for int %v not empty %v\", fnB, vlAInt)\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t}\n}", "func (b Binding) Merge(b2 Binding) Binding {\n\n\tresult := Binding{}\n\n\tfor k, v := range b {\n\t\tresult[k] = v\n\t}\n\n\tfor k, v := range b2 {\n\t\tresult[k] = v\n\t}\n\n\treturn result\n}", "func compatible(a uint32, b uint32) int8 {\n // first grab only the bits that differ\n var diff uint32 = a ^ b;\n // then check that they all differ in the same way\n return all_on(a, diff);\n}", "func (h *HyperLogLog32) Union(a, b *HyperLogLog32) error {\n\tif a.p != b.p {\n\t\treturn errors.New(\"card: mismatched precision\")\n\t}\n\tta := reflect.TypeOf(b.hash)\n\tif reflect.TypeOf(b.hash) != ta {\n\t\treturn errors.New(\"card: mismatched hash function\")\n\t}\n\tif h.hash != nil && reflect.TypeOf(h.hash) != ta {\n\t\treturn errors.New(\"card: mismatched hash function\")\n\t}\n\n\tif h != a && h != b {\n\t\t*h = HyperLogLog32{p: a.p, m: a.m, hash: h.hash, register: make([]uint8, a.m)}\n\t}\n\tfor i, r := range a.register {\n\t\th.register[i] = max(r, b.register[i])\n\t}\n\treturn nil\n}", "func (a *Addresses) Merge(b *Addresses) *Addresses {\n\tresult := *a\n\n\tif b.HTTP != \"\" {\n\t\tresult.HTTP = b.HTTP\n\t}\n\treturn &result\n}", "func (rs Reasons) Union(other Reasons) Reasons {\n\tmerged := make(Reasons)\n\tfor r := range rs {\n\t\tmerged.Add(r)\n\t}\n\tfor r := range other {\n\t\tmerged.Add(r)\n\t}\n\treturn merged\n}", "func (mb *MutableBag) Merge(bags ...*MutableBag) error {\n\t// first step is to make sure there are no redundant definitions of the same attribute\n\tkeys := make(map[string]bool)\n\tfor _, bag := range bags {\n\t\tif bag == nil {\n\t\t\tcontinue\n\t\t}\n\t\tfor k := range bag.values {\n\t\t\tif keys[k] {\n\t\t\t\treturn fmt.Errorf(\"conflicting value for attribute %s\", k)\n\t\t\t}\n\t\t\tkeys[k] = true\n\t\t}\n\t}\n\n\t// now that we know there are no conflicting definitions, do the actual merging...\n\tfor _, bag := range bags {\n\t\tif bag == nil {\n\t\t\tcontinue\n\t\t}\n\t\tfor k, v := range bag.values {\n\t\t\tmb.values[k] = copyValue(v)\n\t\t}\n\t}\n\n\treturn nil\n}", "func main() {\n\tnums1 := []int{1,4,7,0,0,0}\n\tnums2 := []int{2,3,6}\n\tmerge(nums1, len(nums1)-len(nums2), nums2, len(nums2))\n\tfmt.Println(nums1)\n}", "func mergeAny(less func(i, j Any) bool, i1, i2 <-chan Any) (out <-chan Any) {\n\tcha := make(chan Any)\n\tgo func(out chan<- Any, i1, i2 <-chan Any) {\n\t\tdefer close(out)\n\t\tvar (\n\t\t\tclos1, clos2 bool // we found the chan closed\n\t\t\tbuff1, buff2 bool // we've read 'from', but not sent (yet)\n\t\t\tok bool // did we read successfully?\n\t\t\tfrom1, from2 Any // what we've read\n\t\t)\n\n\t\tfor !clos1 || !clos2 {\n\n\t\t\tif !clos1 && !buff1 {\n\t\t\t\tif from1, ok = <-i1; ok {\n\t\t\t\t\tbuff1 = true\n\t\t\t\t} else {\n\t\t\t\t\tclos1 = true\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tif !clos2 && !buff2 {\n\t\t\t\tif from2, ok = <-i2; ok {\n\t\t\t\t\tbuff2 = true\n\t\t\t\t} else {\n\t\t\t\t\tclos2 = true\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tif clos1 && !buff1 {\n\t\t\t\tfrom1 = from2\n\t\t\t}\n\t\t\tif clos2 && !buff2 {\n\t\t\t\tfrom2 = from1\n\t\t\t}\n\n\t\t\tif less(from1, from2) {\n\t\t\t\tout <- from1\n\t\t\t\tbuff1 = false\n\t\t\t} else if less(from2, from1) {\n\t\t\t\tout <- from2\n\t\t\t\tbuff2 = false\n\t\t\t} else {\n\t\t\t\tout <- from1 // == from2\n\t\t\t\tbuff1 = false\n\t\t\t\tbuff2 = false\n\t\t\t}\n\t\t}\n\t}(cha, i1, i2)\n\treturn cha\n}", "func (a *ServerConfig) Merge(b *ServerConfig) *ServerConfig {\n\tresult := *a\n\n\tif b.Enabled {\n\t\tresult.Enabled = true\n\t}\n\n\treturn &result\n}", "func MergeMappings(a, b map[string]interface{}) error {\n\tfor k, bv := range b {\n\t\tav, ok := a[k]\n\t\tif !ok {\n\t\t\ta[k] = bv\n\t\t\tcontinue\n\t\t}\n\t\tif !reflect.DeepEqual(av, bv) {\n\t\t\treturn fmt.Errorf(\"merge conflict for key '%v': old value=%v, new value=%v\", k, av, bv)\n\t\t}\n\t}\n\treturn nil\n}", "func TestMergeLengthThree(t *testing.T) {\n\tA := []int{6, 2, 3}\n\tR := []int{2, 3, 6}\n\n\tif r := merge(A, 0, 0, 2); !reflect.DeepEqual(r, R) {\n\t\tt.Error(\"Expected\", R, \"got\", r)\n\t}\n}", "func (a *Ports) Merge(b *Ports) *Ports {\n\tresult := *a\n\n\tif b.HTTP != 0 {\n\t\tresult.HTTP = b.HTTP\n\t}\n\treturn &result\n}", "func Merge(target, source interface{}, opt *Options) error {\n\tvT := reflect.ValueOf(target)\n\tvS := reflect.ValueOf(source)\n\n\tif target != nil && vT.Type() == valType {\n\t\tvT = vT.Interface().(reflect.Value)\n\t}\n\tif source != nil && vS.Type() == valType {\n\t\tvS = vS.Interface().(reflect.Value)\n\t}\n\n\tif vT.Kind() != reflect.Ptr {\n\t\treturn errors.New(\"target must be a pointer\")\n\t}\n\n\tif !reflect.Indirect(vT).IsValid() {\n\t\treturn errors.New(\"target can not be zero value\")\n\t}\n\n\t// use defaults if none are provided\n\tif opt == nil {\n\t\topt = NewOptions()\n\t}\n\n\tif opt.mergeFuncs == nil {\n\t\treturn errors.New(\"invalid options, use NewOptions() to generate and then modify as needed\")\n\t}\n\n\t//make a copy here so if there is an error mid way, the target stays in tact\n\tcp := vT.Elem()\n\n\tmerged, err := merge(cp, reflect.Indirect(vS), opt)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif !isSettable(vT.Elem(), merged) {\n\t\treturn fmt.Errorf(\"Merge failed: expected merged result to be %v but got %v\",\n\t\t\tvT.Elem().Type(), merged.Type())\n\t}\n\n\tvT.Elem().Set(merged)\n\treturn nil\n}", "func Union(a, b RegInvImage) RegInvImage {\n\tfor imageName, digestTags := range b {\n\t\t// If a does not have this image at all, then it's a simple\n\t\t// injection.\n\t\tif a[imageName] == nil {\n\t\t\ta[imageName] = digestTags\n\t\t\tcontinue\n\t\t}\n\t\tfor digest, tags := range digestTags {\n\t\t\t// If a has the image but not this digest, inject just this digest\n\t\t\t// and all associated tags.\n\t\t\tif a[imageName][digest] == nil {\n\t\t\t\ta[imageName][digest] = tags\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\t// If c has the digest already, try to inject those tags in b that\n\t\t\t// are not already in a.\n\t\t\ttagSlice := TagSlice{}\n\t\t\tfor tag := range tags.Union(a[imageName][digest]) {\n\t\t\t\tif tag == \"latest\" {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\ttagSlice = append(tagSlice, tag)\n\t\t\t}\n\t\t\ta[imageName][digest] = tagSlice\n\t\t}\n\t}\n\n\treturn a\n}", "func (ctx *RequestContext) safeMerge(src, dst reflect.Value) (err error) {\n\tty := dst.Type()\n\tfor i := 0; i < dst.NumField(); i++ {\n\t\tw := ty.Field(i).Tag.Get(\"writeRight\")\n\t\tif w == \"\" || ctx.HasRight(w) {\n\t\t\tsrcField := src.Field(i)\n\t\t\tdstField := dst.Field(i)\n\t\t\tif isRecursibleType(srcField) {\n\t\t\t\terr = ctx.safeMerge(srcField, dstField)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\tdstField.Set(srcField)\n\t\t\t}\n\t\t}\n\t}\n\n\treturn nil\n}", "func mergeInt(a, b int) int {\n\tif a != 0 {\n\t\treturn a\n\t}\n\n\treturn b\n}", "func (a *UnsignedArray) Merge(b *UnsignedArray) {\n\tif a.Len() == 0 {\n\t\t*a = *b\n\t\treturn\n\t}\n\n\tif b.Len() == 0 {\n\t\treturn\n\t}\n\n\t// Normally, both a and b should not contain duplicates. Due to a bug in older versions, it's\n\t// possible stored blocks might contain duplicate values. Remove them if they exists before\n\t// merging.\n\t// a = a.Deduplicate()\n\t// b = b.Deduplicate()\n\n\tif a.MaxTime() < b.MinTime() {\n\t\ta.Timestamps = append(a.Timestamps, b.Timestamps...)\n\t\ta.Values = append(a.Values, b.Values...)\n\t\treturn\n\t}\n\n\tif b.MaxTime() < a.MinTime() {\n\t\tvar tmp UnsignedArray\n\t\ttmp.Timestamps = append(b.Timestamps, a.Timestamps...)\n\t\ttmp.Values = append(b.Values, a.Values...)\n\t\t*a = tmp\n\t\treturn\n\t}\n\n\tout := NewUnsignedArrayLen(a.Len() + b.Len())\n\ti, j, k := 0, 0, 0\n\tfor i < len(a.Timestamps) && j < len(b.Timestamps) {\n\t\tif a.Timestamps[i] < b.Timestamps[j] {\n\t\t\tout.Timestamps[k] = a.Timestamps[i]\n\t\t\tout.Values[k] = a.Values[i]\n\t\t\ti++\n\t\t} else if a.Timestamps[i] == b.Timestamps[j] {\n\t\t\tout.Timestamps[k] = b.Timestamps[j]\n\t\t\tout.Values[k] = b.Values[j]\n\t\t\ti++\n\t\t\tj++\n\t\t} else {\n\t\t\tout.Timestamps[k] = b.Timestamps[j]\n\t\t\tout.Values[k] = b.Values[j]\n\t\t\tj++\n\t\t}\n\t\tk++\n\t}\n\n\tif i < len(a.Timestamps) {\n\t\tn := copy(out.Timestamps[k:], a.Timestamps[i:])\n\t\tcopy(out.Values[k:], a.Values[i:])\n\t\tk += n\n\t} else if j < len(b.Timestamps) {\n\t\tn := copy(out.Timestamps[k:], b.Timestamps[j:])\n\t\tcopy(out.Values[k:], b.Values[j:])\n\t\tk += n\n\t}\n\n\ta.Timestamps = out.Timestamps[:k]\n\ta.Values = out.Values[:k]\n}", "func Missing(a, b interface{}) interface{} {\n\tarrA := reflect.ValueOf(a)\n\tarrB := reflect.ValueOf(b)\n\n\tif arrA.Kind() != reflect.Slice {\n\t\tpanic(\"invalid data-type, occurred in sxutil package\")\n\t}\n\tif arrB.Kind() != reflect.Slice {\n\t\tpanic(\"invalid data-type, occurred in sxutil package\")\n\t}\n\n\tvar arrC []interface{}\n\tm := make(map[interface{}]bool)\n\n\tfor i := 0; i < arrA.Len(); i++ {\n\t\tm[arrA.Index(i).Interface()] = true\n\t}\n\n\tfor i := 0; i < arrB.Len(); i++ {\n\t\tif _, ok := m[arrB.Index(i).Interface()]; !ok {\n\t\t\tarrC = append(arrC, arrB.Index(i).Interface())\n\t\t}\n\t}\n\n\treturn arrC\n}", "func (s *mergeBaseSuite) TestAncestorUnrelated(c *C) {\n\trevs := []string{\"M\", \"N\"}\n\ts.AssertAncestor(c, revs, false)\n\n\trevs = []string{\"N\", \"M\"}\n\ts.AssertAncestor(c, revs, false)\n}", "func (c *Catalog) Merge(other *Catalog) {\n\tif other == nil {\n\t\treturn\n\t}\n\tif other.Paths != nil {\n\t\tc.Paths = append(c.Paths, other.Paths...)\n\t}\n\tif other.Stats != nil {\n\t\tstats, _ := c.GetStats()\n\t\tstats.Merge(other.Stats)\n\t}\n\tif other.Cameras != nil {\n\t\tc.Cameras = c.Cameras.Merge(other.Cameras)\n\t}\n\tif other.Lenses != nil {\n\t\tc.Lenses = c.Lenses.Merge(other.Lenses)\n\t}\n\tif other.Photos != nil {\n\t\tc.Photos = append(c.Photos, other.Photos...)\n\t}\n\tif other.Collections != nil {\n\t\tc.Collections = append(c.Collections, other.Collections...)\n\t}\n\tif other.CollectionTree != nil && c.CollectionTree == nil {\n\t\tc.CollectionTree = other.CollectionTree\n\t}\n}", "func (a *StringArray) Merge(b *StringArray) {\n\tif a.Len() == 0 {\n\t\t*a = *b\n\t\treturn\n\t}\n\n\tif b.Len() == 0 {\n\t\treturn\n\t}\n\n\t// Normally, both a and b should not contain duplicates. Due to a bug in older versions, it's\n\t// possible stored blocks might contain duplicate values. Remove them if they exists before\n\t// merging.\n\t// a = a.Deduplicate()\n\t// b = b.Deduplicate()\n\n\tif a.MaxTime() < b.MinTime() {\n\t\ta.Timestamps = append(a.Timestamps, b.Timestamps...)\n\t\ta.Values = append(a.Values, b.Values...)\n\t\treturn\n\t}\n\n\tif b.MaxTime() < a.MinTime() {\n\t\tvar tmp StringArray\n\t\ttmp.Timestamps = append(b.Timestamps, a.Timestamps...)\n\t\ttmp.Values = append(b.Values, a.Values...)\n\t\t*a = tmp\n\t\treturn\n\t}\n\n\tout := NewStringArrayLen(a.Len() + b.Len())\n\ti, j, k := 0, 0, 0\n\tfor i < len(a.Timestamps) && j < len(b.Timestamps) {\n\t\tif a.Timestamps[i] < b.Timestamps[j] {\n\t\t\tout.Timestamps[k] = a.Timestamps[i]\n\t\t\tout.Values[k] = a.Values[i]\n\t\t\ti++\n\t\t} else if a.Timestamps[i] == b.Timestamps[j] {\n\t\t\tout.Timestamps[k] = b.Timestamps[j]\n\t\t\tout.Values[k] = b.Values[j]\n\t\t\ti++\n\t\t\tj++\n\t\t} else {\n\t\t\tout.Timestamps[k] = b.Timestamps[j]\n\t\t\tout.Values[k] = b.Values[j]\n\t\t\tj++\n\t\t}\n\t\tk++\n\t}\n\n\tif i < len(a.Timestamps) {\n\t\tn := copy(out.Timestamps[k:], a.Timestamps[i:])\n\t\tcopy(out.Values[k:], a.Values[i:])\n\t\tk += n\n\t} else if j < len(b.Timestamps) {\n\t\tn := copy(out.Timestamps[k:], b.Timestamps[j:])\n\t\tcopy(out.Values[k:], b.Values[j:])\n\t\tk += n\n\t}\n\n\ta.Timestamps = out.Timestamps[:k]\n\ta.Values = out.Values[:k]\n}", "func Merge(a, b IntSet, reuse []int) IntSet {\n\tswitch {\n\tcase a.Empty():\n\t\treturn b\n\tcase b.Empty():\n\t\treturn a\n\tcase a.Inverse:\n\t\tif b.Inverse {\n\t\t\treturn IntSet{Inverse: true, Set: intersect(a.Set, b.Set, reuse)}\n\t\t}\n\t\treturn IntSet{Inverse: true, Set: subtract(a.Set, b.Set, reuse)}\n\tcase b.Inverse:\n\t\treturn IntSet{Inverse: true, Set: subtract(b.Set, a.Set, reuse)}\n\t}\n\treturn IntSet{Set: combine(a.Set, b.Set, reuse)}\n}", "func (c ConversionFuncs) Merge(other ConversionFuncs) ConversionFuncs {\n\tmerged := NewConversionFuncs()\n\tfor k, v := range c.untyped {\n\t\tmerged.untyped[k] = v\n\t}\n\tfor k, v := range other.untyped {\n\t\tmerged.untyped[k] = v\n\t}\n\treturn merged\n}", "func MergeTwoStatistics(s1 *Statistic, s2 *Statistic) *Statistic {\n\tvar tmpPeerId string\n\tif s1.PeerId == s2.PeerId {\n\t\ttmpPeerId = s1.PeerId\n\t} else if s1.PeerId == \"\" {\n\t\ttmpPeerId = s2.PeerId\n\t} else if s2.PeerId == \"\"{\n\t\ttmpPeerId = s1.PeerId\n\t} else {\n\t\ttmpPeerId = \"ALL\"\n\t}\n\treturn &Statistic{\n\t\tPeerId:\t tmpPeerId,\n\t\tNumBlockSend: s1.NumBlockSend + s2.NumBlockSend,\n\t\tNumBlockRecv: s1.NumBlockRecv + s2.NumBlockRecv,\n\t\tNumDupBlock: s1.NumDupBlock + s2.NumDupBlock,\n\t}\n}", "func checkOverlap(a, b scene.AABB) bool {\n const EPS = 0.0001\n e := vecmath.Vec3d{EPS, EPS, EPS}\n minA := vecmath.ToVec3d(&a.Min).Added(e)\n minB := vecmath.ToVec3d(&b.Min).Added(e)\n maxA := vecmath.ToVec3d(&a.Max).Subtracted(e)\n maxB := vecmath.ToVec3d(&b.Max).Subtracted(e)\n\n adjustedA := scene.MakeAABBV(minA, maxA)\n adjustedB := scene.MakeAABBV(minB, maxB)\n\n return adjustedA.Overlaps(&adjustedB)\n}", "func (rc *ResourceCollection) Merge(other *ResourceCollection) {\n\tfor resourceDomain, resourceKinds := range rc.collection { // e.g. for AWS\n\t\tif _, exists := other.collection[resourceDomain]; !exists { // only A has AWS\n\t\t\trc.collection[resourceDomain] = resourceKinds\n\t\t} else { // both have AWS\n\t\t\trc.ensureResourcePathExists(resourceDomain, \"\")\n\n\t\t\tfor resourceKind, resources := range rc.collection[resourceDomain] { // e.g. for EC2 instances\n\t\t\t\tif _, exists := other.collection[resourceDomain][resourceKind]; !exists { // only A has any EC2 instances\n\t\t\t\t\trc.collection[resourceDomain][resourceKind] = resources\n\t\t\t\t} else { // both have some EC2 instances\n\t\t\t\t\trc.ensureResourcePathExists(resourceDomain, resourceKind)\n\n\t\t\t\t\tfor id, resource := range rc.collection[resourceDomain][resourceKind] { // e.g. for EC2 instance with ID i-abc123def456\n\t\t\t\t\t\trc.collection[resourceDomain][resourceKind][id] = resource\n\t\t\t\t\t}\n\n\t\t\t\t\tfor id, resource := range other.collection[resourceDomain][resourceKind] {\n\t\t\t\t\t\trc.collection[resourceDomain][resourceKind][id] = resource\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tfor resourceKind, resources := range other.collection[resourceDomain] { // e.g. for security groups\n\t\t\t\tif _, exists := rc.collection[resourceDomain][resourceKind]; !exists { // only B has any security groups\n\t\t\t\t\trc.collection[resourceDomain][resourceKind] = resources\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\n\tfor resourceDomain, resourceKinds := range other.collection { // e.g. for GCP\n\t\tif _, exists := rc.collection[resourceDomain]; !exists { // only B has GCP\n\t\t\trc.collection[resourceDomain] = resourceKinds\n\t\t}\n\t}\n}", "func merge(existing, kind *Config) error {\n\t// verify assumptions about kubeadm / kind kubeconfigs\n\tif err := checkKubeadmExpectations(kind); err != nil {\n\t\treturn err\n\t}\n\n\t// insert or append cluster entry\n\tshouldAppend := true\n\tfor i := range existing.Clusters {\n\t\tif existing.Clusters[i].Name == kind.Clusters[0].Name {\n\t\t\texisting.Clusters[i] = kind.Clusters[0]\n\t\t\tshouldAppend = false\n\t\t}\n\t}\n\tif shouldAppend {\n\t\texisting.Clusters = append(existing.Clusters, kind.Clusters[0])\n\t}\n\n\t// insert or append user entry\n\tshouldAppend = true\n\tfor i := range existing.Users {\n\t\tif existing.Users[i].Name == kind.Users[0].Name {\n\t\t\texisting.Users[i] = kind.Users[0]\n\t\t\tshouldAppend = false\n\t\t}\n\t}\n\tif shouldAppend {\n\t\texisting.Users = append(existing.Users, kind.Users[0])\n\t}\n\n\t// insert or append context entry\n\tshouldAppend = true\n\tfor i := range existing.Contexts {\n\t\tif existing.Contexts[i].Name == kind.Contexts[0].Name {\n\t\t\texisting.Contexts[i] = kind.Contexts[0]\n\t\t\tshouldAppend = false\n\t\t}\n\t}\n\tif shouldAppend {\n\t\texisting.Contexts = append(existing.Contexts, kind.Contexts[0])\n\t}\n\n\t// set the current context\n\texisting.CurrentContext = kind.CurrentContext\n\n\t// TODO: We should not need this, but it allows broken clients that depend\n\t// on apiVersion and kind to work. Notably the upstream javascript client.\n\t// See: https://github.com/kubernetes-sigs/kind/issues/1242\n\tif len(existing.OtherFields) == 0 {\n\t\t// TODO: Should we be deep-copying? for now we don't need to\n\t\t// and doing so would be a pain (re and de-serialize maybe?) :shrug:\n\t\texisting.OtherFields = kind.OtherFields\n\t}\n\n\treturn nil\n}", "func (cl *ConfigLocks) Merge(c2 *ConfigLocks) {\n\tcl.Bits |= c2.Bits\n\tfor t := range c2.Tags {\n\t\tcl.Tags[t] = struct{}{}\n\t}\n}", "func (a Value) Add(b Value) (*Value, error) {\n\tswitch {\n\tcase a.IsNative() != b.IsNative():\n\t\treturn nil, fmt.Errorf(\"Cannot add native and non-native values\")\n\tcase a.IsZero():\n\t\treturn b.Clone(), nil\n\tcase b.IsZero():\n\t\treturn a.Clone(), nil\n\tdefault:\n\t\tav, bv, ao := a.factor(b)\n\t\tv := newValue(a.native, (av+bv) < 0, abs(av+bv), ao)\n\t\treturn v, v.canonicalise()\n\t}\n}", "func mergeConditional(newVal, originalVal interface{}, merge bool) (interface{}, error) {\n\tif !merge || util.IsValueNilOrDefault(originalVal) {\n\t\treturn newVal, nil\n\t}\n\tnewS, err := yaml.Marshal(newVal)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif util.IsYAMLEmpty(string(newS)) {\n\t\treturn originalVal, nil\n\t}\n\toriginalS, err := yaml.Marshal(originalVal)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif util.IsYAMLEmpty(string(originalS)) {\n\t\treturn newVal, nil\n\t}\n\n\tmergedS, err := util.OverlayYAML(string(originalS), string(newS))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif util.IsMap(originalVal) {\n\t\t// For JSON compatibility\n\t\tout := make(map[string]interface{})\n\t\tif err := yaml.Unmarshal([]byte(mergedS), &out); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn out, nil\n\t}\n\t// For scalars and slices, copy the type\n\tout := originalVal\n\tif err := yaml.Unmarshal([]byte(mergedS), &out); err != nil {\n\t\treturn nil, err\n\t}\n\treturn out, nil\n}", "func TestMerge(t *testing.T) {\n\tpncounter1 := PNCounter{\n\t\tAdd: gcounter.GCounter{map[string]int{\"node1\": 3, \"node2\": 5, \"node3\": 7}},\n\t\tDelete: gcounter.GCounter{map[string]int{\"node1\": 3, \"node2\": 5, \"node3\": 7}},\n\t}\n\tpncounter2 := PNCounter{\n\t\tAdd: gcounter.GCounter{map[string]int{\"node1\": 4, \"node2\": 6, \"node3\": 8}},\n\t\tDelete: gcounter.GCounter{map[string]int{\"node1\": 3, \"node2\": 5, \"node3\": 7}},\n\t}\n\tpncounter3 := PNCounter{\n\t\tAdd: gcounter.GCounter{map[string]int{\"node1\": 2, \"node2\": 4, \"node3\": 9}},\n\t\tDelete: gcounter.GCounter{map[string]int{\"node1\": 2, \"node2\": 4, \"node3\": 9}},\n\t}\n\n\tpncounterExpected := PNCounter{\n\t\tAdd: gcounter.GCounter{map[string]int{\"node1\": 4, \"node2\": 6, \"node3\": 9}},\n\t\tDelete: gcounter.GCounter{map[string]int{\"node1\": 3, \"node2\": 5, \"node3\": 9}},\n\t}\n\n\tpncounterActual := Merge(pncounter1, pncounter2, pncounter3)\n\n\tcountExpected := 2\n\tcountActual := pncounterActual.GetTotal()\n\n\tassert.Equal(t, pncounterExpected, pncounterActual)\n\tassert.Equal(t, countExpected, countActual)\n\n\tpncounter = pncounter.Clear(testNode)\n}", "func (s *mergeBaseSuite) TestAncestorBeyondMerges(c *C) {\n\trevs := []string{\"M\", \"G\"}\n\ts.AssertAncestor(c, revs, true)\n\n\trevs = []string{\"G\", \"M\"}\n\ts.AssertAncestor(c, revs, false)\n}", "func NotOr(dst, a, b []byte) int {\n\tn := len(a)\n\tif len(b) < n {\n\t\tn = len(b)\n\t}\n\tif len(dst) < n {\n\t\tn = len(dst)\n\t}\n\n\tif n == 0 {\n\t\treturn 0\n\t}\n\n\tnorASM(&dst[0], &a[0], &b[0], uint64(n))\n\treturn n\n}" ]
[ "0.5755972", "0.5697133", "0.5668373", "0.55909467", "0.5590817", "0.5558355", "0.5500499", "0.54455835", "0.54032934", "0.5354523", "0.51682526", "0.5137761", "0.511377", "0.51049376", "0.50986505", "0.5072738", "0.50718033", "0.506198", "0.5021708", "0.49960023", "0.49937177", "0.499266", "0.49824223", "0.49646968", "0.49350667", "0.49315953", "0.49284342", "0.49052373", "0.4895", "0.48897436", "0.48878267", "0.48713738", "0.4869994", "0.48651367", "0.48415905", "0.48337632", "0.48045087", "0.48038265", "0.48037928", "0.47959653", "0.47859424", "0.47790408", "0.475516", "0.4752513", "0.47478047", "0.47465065", "0.47429466", "0.4731791", "0.4728116", "0.47222808", "0.4708718", "0.46983996", "0.46964332", "0.46864444", "0.46855077", "0.46848562", "0.46842054", "0.46821168", "0.46806338", "0.4677058", "0.46759012", "0.46678868", "0.46614817", "0.46525842", "0.4651477", "0.46493042", "0.46461132", "0.4642749", "0.46422997", "0.46399054", "0.4637981", "0.4635613", "0.46353355", "0.46318227", "0.46274504", "0.46260706", "0.46233383", "0.46184745", "0.46183696", "0.46163005", "0.4608867", "0.46082914", "0.46021798", "0.4598698", "0.45982704", "0.45963788", "0.45934328", "0.45910415", "0.45908958", "0.45863876", "0.45843834", "0.45779756", "0.45751387", "0.45726863", "0.45679623", "0.45672926", "0.45640063", "0.45548713", "0.45522517", "0.4550547" ]
0.69784236
0